gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import unicode_literals
from rest_framework import status as http_status
import logging
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.db import connection
from django.db import transaction
from flask import request
from framework.auth import Auth
from framework.sessions import get_session
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_signed, must_be_logged_in
from osf.exceptions import InvalidTagError, TagNotFoundError
from osf.models import FileVersion, Node, OSFUser
from osf.utils.permissions import WRITE
from osf.utils.requests import check_select_for_update
from website.project.decorators import (
must_not_be_registration, must_have_permission
)
from website.project.model import has_anonymous_link
from website.files import exceptions
from website.settings import StorageLimits
from addons.osfstorage import utils
from addons.osfstorage import decorators
from addons.osfstorage.models import OsfStorageFolder
from addons.osfstorage import settings as osf_storage_settings
logger = logging.getLogger(__name__)
def make_error(code, message_short=None, message_long=None):
data = {}
if message_short:
data['message_short'] = message_short
if message_long:
data['message_long'] = message_long
return HTTPError(code, data=data)
@must_be_signed
def osfstorage_update_metadata(payload, **kwargs):
"""Metadata received from WaterButler, is built incrementally via latent task calls to this endpoint.
The basic metadata response looks like::
{
"metadata": {
# file upload
"name": "file.name",
"md5": "d41d8cd98f00b204e9800998ecf8427e",
"path": "...",
"sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"version": "2",
"downloads": "1",
"checkout": "...",
"latestVersionSeen": {"userId": "abc12", "seen": true},
"modified": "a date",
"modified_utc": "a date in utc",
# glacier vault (optional)
"archive": "glacier_key",
"vault": "glacier_vault_name",
# parity files
"parity": {
"redundancy": "5",
"files": [
{"name": "foo.txt.par2","sha256": "abc123"},
{"name": "foo.txt.vol00+01.par2","sha256": "xyz321"},
]
}
},
}
"""
try:
version_id = payload['version']
metadata = payload['metadata']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
if check_select_for_update():
version = FileVersion.objects.filter(_id=version_id).select_for_update().first()
else:
version = FileVersion.objects.filter(_id=version_id).first()
if version is None:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
version.update_metadata(metadata)
return {'status': 'success'}
@must_be_signed
@decorators.load_guid_as_target
def osfstorage_get_storage_quota_status(target, **kwargs):
# Storage caps only restrict Nodes
if not isinstance(target, Node):
return {
'over_quota': False
}
# Storage calculation for the target has been accepted and will run asynchronously
if target.storage_limit_status is StorageLimits.NOT_CALCULATED:
raise HTTPError(http_status.HTTP_202_ACCEPTED)
# Storage cap limits differ for public and private nodes
if target.is_public:
over_quota = target.storage_limit_status >= StorageLimits.OVER_PUBLIC
else:
over_quota = target.storage_limit_status >= StorageLimits.OVER_PRIVATE
return {
'over_quota': over_quota
}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_get_revisions(file_node, payload, target, **kwargs):
from osf.models import PageCounter, FileVersion # TODO Fix me onces django works
is_anon = has_anonymous_link(target, Auth(private_key=request.args.get('view_only')))
counter_prefix = 'download:{}:{}:'.format(file_node.target._id, file_node._id)
version_count = file_node.versions.count()
counts = dict(PageCounter.objects.filter(resource=file_node.target.guids.first().id, file=file_node, action='download').values_list('_id', 'total'))
qs = FileVersion.includable_objects.filter(basefilenode__id=file_node.id).include('creator__guids').order_by('-created')
for i, version in enumerate(qs):
version._download_count = counts.get('{}{}'.format(counter_prefix, version_count - i - 1), 0)
# Return revisions in descending order
return {
'revisions': [
utils.serialize_revision(target, file_node, version, index=version_count - idx - 1, anon=is_anon)
for idx, version in enumerate(qs)
]
}
@decorators.waterbutler_opt_hook
def osfstorage_copy_hook(source, destination, name=None, **kwargs):
ret = source.copy_under(destination, name=name).serialize(), http_status.HTTP_201_CREATED
return ret
@decorators.waterbutler_opt_hook
def osfstorage_move_hook(source, destination, name=None, **kwargs):
try:
ret = source.move_under(destination, name=name).serialize(), http_status.HTTP_200_OK
except exceptions.FileNodeCheckedOutError:
raise HTTPError(http_status.HTTP_405_METHOD_NOT_ALLOWED, data={
'message_long': 'Cannot move file as it is checked out.'
})
except exceptions.FileNodeIsPrimaryFile:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data={
'message_long': 'Cannot move file as it is the primary file of preprint.'
})
return ret
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_lineage(file_node, **kwargs):
lineage = []
while file_node:
lineage.append(file_node.serialize())
file_node = file_node.parent
return {'data': lineage}
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_metadata(file_node, **kwargs):
try:
# TODO This should change to version as its internal it can be changed anytime
version = int(request.args.get('revision'))
except (ValueError, TypeError): # If its not a number
version = None
return file_node.serialize(version=version, include_full=True)
@must_be_signed
@decorators.autoload_filenode(must_be='folder')
def osfstorage_get_children(file_node, **kwargs):
from django.contrib.contenttypes.models import ContentType
user_id = request.args.get('user_id')
user_content_type_id = ContentType.objects.get_for_model(OSFUser).id
user_pk = OSFUser.objects.filter(guids___id=user_id, guids___id__isnull=False).values_list('pk', flat=True).first()
with connection.cursor() as cursor:
# Read the documentation on FileVersion's fields before reading this code
cursor.execute("""
SELECT json_agg(CASE
WHEN F.type = 'osf.osfstoragefile' THEN
json_build_object(
'id', F._id
, 'path', '/' || F._id
, 'name', F.name
, 'kind', 'file'
, 'size', LATEST_VERSION.size
, 'downloads', COALESCE(DOWNLOAD_COUNT, 0)
, 'version', (SELECT COUNT(*) FROM osf_basefileversionsthrough WHERE osf_basefileversionsthrough.basefilenode_id = F.id)
, 'contentType', LATEST_VERSION.content_type
, 'modified', LATEST_VERSION.created
, 'created', EARLIEST_VERSION.created
, 'checkout', CHECKOUT_GUID
, 'md5', LATEST_VERSION.metadata ->> 'md5'
, 'sha256', LATEST_VERSION.metadata ->> 'sha256'
, 'latestVersionSeen', SEEN_LATEST_VERSION.case
)
ELSE
json_build_object(
'id', F._id
, 'path', '/' || F._id || '/'
, 'name', F.name
, 'kind', 'folder'
)
END
)
FROM osf_basefilenode AS F
LEFT JOIN LATERAL (
SELECT * FROM osf_fileversion
JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id
WHERE osf_basefileversionsthrough.basefilenode_id = F.id
ORDER BY created DESC
LIMIT 1
) LATEST_VERSION ON TRUE
LEFT JOIN LATERAL (
SELECT * FROM osf_fileversion
JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id
WHERE osf_basefileversionsthrough.basefilenode_id = F.id
ORDER BY created ASC
LIMIT 1
) EARLIEST_VERSION ON TRUE
LEFT JOIN LATERAL (
SELECT _id from osf_guid
WHERE object_id = F.checkout_id
AND content_type_id = %s
LIMIT 1
) CHECKOUT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT P.total AS DOWNLOAD_COUNT FROM osf_pagecounter AS P
WHERE P.resource_id = %s
AND P.file_id = F.id
AND P.action = 'download'
AND P.version ISNULL
LIMIT 1
) DOWNLOAD_COUNT ON TRUE
LEFT JOIN LATERAL (
SELECT EXISTS(
SELECT (1) FROM osf_fileversionusermetadata
INNER JOIN osf_fileversion ON osf_fileversionusermetadata.file_version_id = osf_fileversion.id
INNER JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id
WHERE osf_fileversionusermetadata.user_id = %s
AND osf_basefileversionsthrough.basefilenode_id = F.id
LIMIT 1
)
) SEEN_FILE ON TRUE
LEFT JOIN LATERAL (
SELECT CASE WHEN SEEN_FILE.exists
THEN
CASE WHEN EXISTS(
SELECT (1) FROM osf_fileversionusermetadata
WHERE osf_fileversionusermetadata.file_version_id = LATEST_VERSION.fileversion_id
AND osf_fileversionusermetadata.user_id = %s
LIMIT 1
)
THEN
json_build_object('user', %s, 'seen', TRUE)
ELSE
json_build_object('user', %s, 'seen', FALSE)
END
ELSE
NULL
END
) SEEN_LATEST_VERSION ON TRUE
WHERE parent_id = %s
AND (NOT F.type IN ('osf.trashedfilenode', 'osf.trashedfile', 'osf.trashedfolder'))
""", [
user_content_type_id,
file_node.target.guids.first().id,
user_pk,
user_pk,
user_id,
user_id,
file_node.id
])
return cursor.fetchone()[0] or []
@must_be_signed
@decorators.autoload_filenode(must_be='folder')
def osfstorage_create_child(file_node, payload, **kwargs):
parent = file_node # Just for clarity
name = payload.get('name')
user = OSFUser.load(payload.get('user'))
is_folder = payload.get('kind') == 'folder'
if getattr(file_node.target, 'is_registration', False) and not getattr(file_node.target, 'archiving', False):
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data={
'message_short': 'Registered Nodes are immutable',
'message_long': "The operation you're trying to do cannot be applied to registered Nodes, which are immutable",
}
)
if not (name or user) or '/' in name:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
if getattr(file_node.target, 'is_quickfiles', False) and is_folder:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={'message_long': 'You may not create a folder for QuickFiles'})
try:
# Create a save point so that we can rollback and unlock
# the parent record
with transaction.atomic():
if is_folder:
created, file_node = True, parent.append_folder(name)
else:
created, file_node = True, parent.append_file(name)
except (ValidationError, IntegrityError):
created, file_node = False, parent.find_child_by_name(name, kind=int(not is_folder))
if not created and is_folder:
raise HTTPError(http_status.HTTP_409_CONFLICT, data={
'message_long': 'Cannot create folder "{name}" because a file or folder already exists at path "{path}"'.format(
name=file_node.name,
path=file_node.materialized_path,
)
})
if file_node.checkout and file_node.checkout._id != user._id:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data={
'message_long': 'File cannot be updated due to checkout status.'
})
if not is_folder:
try:
metadata = dict(payload['metadata'], **payload['hashes'])
location = dict(payload['settings'], **dict(
payload['worker'], **{
'object': payload['metadata']['name'],
'service': payload['metadata']['provider'],
}
))
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
new_version = file_node.create_version(user, location, metadata)
version_id = new_version._id
archive_exists = new_version.archive is not None
else:
version_id = None
archive_exists = False
return {
'status': 'success',
'archive': not archive_exists, # Should waterbutler also archive this file
'data': file_node.serialize(),
'version': version_id,
}, http_status.HTTP_201_CREATED if created else http_status.HTTP_200_OK
@must_be_signed
@must_not_be_registration
@decorators.autoload_filenode()
def osfstorage_delete(file_node, payload, target, **kwargs):
user = OSFUser.load(payload['user'])
auth = Auth(user)
#TODO Auth check?
if not auth:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
if file_node == OsfStorageFolder.objects.get_root(target=target):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
file_node.delete(user=user)
except exceptions.FileNodeCheckedOutError:
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
except exceptions.FileNodeIsPrimaryFile:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data={
'message_long': 'Cannot delete file as it is the primary file of preprint.'
})
return {'status': 'success'}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_download(file_node, payload, **kwargs):
# Set user ID in session data for checking if user is contributor
# to project.
user_id = payload.get('user')
if user_id:
current_session = get_session()
current_session.data['auth_user_id'] = user_id
current_session.save()
if not request.args.get('version'):
version_id = None
else:
try:
version_id = int(request.args['version'])
except ValueError:
raise make_error(http_status.HTTP_400_BAD_REQUEST, message_short='Version must be an integer if not specified')
version = file_node.get_version(version_id, required=True)
file_version_thru = version.get_basefilenode_version(file_node)
name = file_version_thru.version_name if file_version_thru else file_node.name
return {
'data': {
'name': name,
'path': version.location_hash,
},
'settings': {
osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE],
},
}
@must_have_permission(WRITE)
@decorators.autoload_filenode(must_be='file')
def osfstorage_add_tag(file_node, **kwargs):
data = request.get_json()
if file_node.add_tag(data['tag'], kwargs['auth']):
return {'status': 'success'}, http_status.HTTP_200_OK
return {'status': 'failure'}, http_status.HTTP_400_BAD_REQUEST
@must_have_permission(WRITE)
@decorators.autoload_filenode(must_be='file')
def osfstorage_remove_tag(file_node, **kwargs):
data = request.get_json()
try:
file_node.remove_tag(data['tag'], kwargs['auth'])
except TagNotFoundError:
return {'status': 'failure'}, http_status.HTTP_409_CONFLICT
except InvalidTagError:
return {'status': 'failure'}, http_status.HTTP_400_BAD_REQUEST
else:
return {'status': 'success'}, http_status.HTTP_200_OK
@must_be_logged_in
def update_region(auth, **kwargs):
user = auth.user
user_settings = user.get_addon('osfstorage')
data = request.get_json()
try:
region_id = data['region_id']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
user_settings.set_region(region_id)
except ValueError:
raise HTTPError(404, data=dict(message_short='Region not found',
message_long='A storage region with this id does not exist'))
return {'message': 'User region updated.'}
| |
#!/usr/bin/env python3
"""Simple tool to work with protobuf in pyatv."""
import argparse
import binascii
from collections import namedtuple
import difflib
import glob
from io import BytesIO
import os
import re
import stat
import subprocess
import sys
import zipfile
import cryptography
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305
from google.protobuf.text_format import MessageToString
import requests
# New messages re-using inner message of another type
REUSED_MESSAGES = {"DEVICE_INFO_MESSAGE": "DEVICE_INFO_UPDATE_MESSAGE"}
BASE_PATH = os.path.join("pyatv", "protocols", "mrp", "protobuf")
OUTPUT_TEMPLATE = """\"\"\"Simplified extension handling for protobuf messages.
THIS CODE IS AUTO-GENERATED - DO NOT EDIT!!!
\"\"\"
from .ProtocolMessage_pb2 import ProtocolMessage
{packages}
{messages}
_EXTENSION_LOOKUP = {{
{extensions}
}}
{constants}
def _inner_message(self):
extension = _EXTENSION_LOOKUP.get(self.type, None)
if extension:
return self.Extensions[extension]
raise Exception('unknown type: ' + str(self.type))
ProtocolMessage.inner = _inner_message # type: ignore
"""
MessageInfo = namedtuple("MessageInfo", ["module", "title", "accessor", "const"])
def _protobuf_url(version):
base_url = (
"https://github.com/protocolbuffers/protobuf/"
+ "releases/download/v{version}/protoc-{version}-{platform}.zip"
)
platforms = {
"linux": "linux-x86_64",
"darwin": "osx-x86_64",
"win32": "win64",
}
platform = platforms.get(sys.platform)
if not platform:
print("Unsupported platform: " + sys.platform, file=sys.stderr)
sys.exit(1)
return base_url.format(version=version, platform=platform)
def _get_protobuf_version():
with open("base_versions.txt", encoding="utf-8") as file:
for line in file:
match = re.match(r"protobuf==(\d+\.\d+\.\d+)[^0-9,]*", line)
if match:
return match.group(1)
raise Exception("failed to determine protobuf version")
def _download_protoc(force=False):
if os.path.exists(protoc_path()) and not force:
print("Not downloading protoc (already exists)")
return
version = _get_protobuf_version()
url = _protobuf_url(version)
print("Downloading", url)
resp = requests.get(url)
with zipfile.ZipFile(BytesIO(resp.content)) as zip_file:
for zip_info in zip_file.infolist():
if zip_info.filename.startswith("bin/protoc"):
print("Extracting", zip_info.filename)
basename, extension = os.path.splitext(zip_info.filename)
zip_info.filename = f"{basename}-{version}{extension}"
zip_file.extract(zip_info)
break
if not os.path.exists(protoc_path()):
print(protoc_path(), "was not downloaded correctly", file=sys.stderr)
sys.exit(1)
file_stat = os.stat(protoc_path())
os.chmod(protoc_path(), file_stat.st_mode | stat.S_IEXEC)
def _verify_protoc_version():
expected_version = _get_protobuf_version()
try:
ret = subprocess.run(
[protoc_path(), "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
installed_version = ret.stdout.decode("utf-8").split(" ")[1].rstrip()
if installed_version != expected_version:
print(
"Expected protobuf",
expected_version,
"but found",
installed_version,
file=sys.stderr,
)
sys.exit(1)
except FileNotFoundError:
print(
"Protbuf compiler (protoc) not found. Re-run with --download",
file=sys.stderr,
)
sys.exit(1)
else:
print(f"Using protobuf version {expected_version}")
def protoc_path():
"""Return path to protoc binary."""
binary = f"protoc-{_get_protobuf_version()}" + (
".exe" if sys.platform == "win32" else ""
)
return os.path.join("bin", binary)
def extract_message_info():
"""Get information about all messages of interest."""
filename = os.path.join(BASE_PATH, "ProtocolMessage.proto")
with open(filename, encoding="utf-8", mode="r") as file:
types_found = False
for line in file:
stripped = line.lstrip().rstrip()
# Look for the Type enum
if stripped == "enum Type {":
types_found = True
continue
if types_found and stripped == "}":
break
if not types_found:
continue
constant = stripped.split(" ")[0]
title = constant.title().replace("_", "").replace("Hid", "HID") # Hack...
accessor = title[0].lower() + title[1:]
if not os.path.exists(os.path.join(BASE_PATH, title + ".proto")):
continue
yield MessageInfo(title + "_pb2", title, accessor, constant)
def extract_unreferenced_messages():
"""Get messages not referenced anywhere."""
for filename in os.listdir(BASE_PATH):
tmp = os.path.splitext(filename)
if tmp[1] != ".proto" or tmp[0] == "ProtocolMessage":
continue
with open(os.path.join(BASE_PATH, filename), encoding="utf-8") as file:
for line in file:
if line.startswith("message"):
yield tmp[0] + "_pb2", line.split(" ")[1]
def generate_module_code():
"""Generate protobuf message wrappercode."""
message_names = set()
packages = []
messages = []
extensions = []
constants = []
# Extract everything needed to generate output file
for info in extract_message_info():
message_names.add(info.title)
packages.append("from . import " + info.module)
messages.append(f"from .{info.module} import {info.title}")
extensions.append(
f"ProtocolMessage.{info.const}: {info.module}.{info.accessor},"
)
constants.append(f"{info.const} = ProtocolMessage.{info.const}")
reused = REUSED_MESSAGES.get(info.const)
if reused:
extensions.append(
f"ProtocolMessage.{reused}: {info.module}.{info.accessor},"
)
constants.append(f"{reused} = ProtocolMessage.{reused}")
# Look for remaining messages
for module_name, message_name in extract_unreferenced_messages():
if message_name not in message_names:
message_names.add(message_name)
messages.append(f"from .{module_name} import {message_name}")
return OUTPUT_TEMPLATE.format(
packages="\n".join(sorted(packages)),
messages="\n".join(sorted(messages)),
extensions="\n ".join(sorted(extensions)),
constants="\n".join(sorted(constants)),
)
def update_auto_generated_code():
"""Generate and update auto-generated wrapper code."""
proto_files = glob.glob(os.path.join(BASE_PATH, "*.proto"))
subprocess.run(
[protoc_path(), "--proto_path=.", "--python_out=.", "--mypy_out=."]
+ proto_files,
check=False,
)
module_code = generate_module_code()
with open(os.path.join(BASE_PATH, "__init__.py"), encoding="utf-8", mode="w") as f:
f.write(module_code)
return 0
def verify_generated_code():
"""Verify that generated code is up-to-date."""
generated_code = generate_module_code().splitlines(True)
with open(os.path.join(BASE_PATH, "__init__.py"), encoding="utf-8", mode="r") as f:
actual = f.readlines()
diff = list(
difflib.unified_diff(
actual, generated_code, fromfile="current", tofile="updated"
)
)
if diff:
print("Generated code is NOT up-to-date!", file=sys.stderr)
print(15 * "*", file=sys.stderr)
print("".join(diff), file=sys.stderr)
print(15 * "*", file=sys.stderr)
print("Re-run with generate to update code.", file=sys.stderr)
return 1
print("Generated code is up-to-date!")
return 0
def _print_single_message(data, unknown_fields):
# Import here to allow other parts of script, e.g. message generation to run
# without having pyatv installed
# pylint: disable=import-outside-toplevel
from pyatv.protocols.mrp.protobuf import ProtocolMessage
# pylint: enable=import-outside-toplevel
parsed = ProtocolMessage()
parsed.ParseFromString(data)
# The print_unknown_fields is only available in newer versions of protobuf
# (from 3.8 or so). This script is generally only run with newer versions than
# that, so we can disable pylint here.
output = MessageToString( # pylint: disable=unexpected-keyword-arg
parsed, print_unknown_fields=unknown_fields
)
print(output)
def decode_and_print_message(args):
"""Decode and print protobuf messages."""
# Import here to allow other parts of script, e.g. message generation to run
# without having pyatv installed
from pyatv.support import variant # pylint: disable=import-outside-toplevel
buf = binascii.unhexlify(args.message)
if not args.stream:
buf = variant.write_variant(len(buf)) + buf
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
_print_single_message(data, args.unknown_fields)
return 0
def _decrypt_chacha20poly1305(data, nounce, key):
"""Decrypt data with specified key and nounce."""
data = binascii.unhexlify(data)
input_key = binascii.unhexlify(key)
input_nonce = b"\x00\x00\x00\x00" + nounce.to_bytes(length=8, byteorder="little")
chacha = ChaCha20Poly1305(input_key)
try:
print(f"Trying key {input_key} with nounce {input_nonce}")
decrypted_data = chacha.decrypt(input_nonce, data, None)
print(
"Data decrypted!"
f"\n - Nonce : {binascii.hexlify(input_nonce).decode()}"
f"\n - Key : {binascii.hexlify(input_key).decode()}"
f"\n - Data : {binascii.hexlify(decrypted_data).decode()}\n"
)
_print_single_message(decrypted_data, True)
return True
except cryptography.exceptions.InvalidTag:
pass
return False
def decrypt_and_print_message(args):
"""Try to decrypt and print a message."""
for key in args.keys:
for nounce in range(args.nounce_lower, args.nounce_upper):
if _decrypt_chacha20poly1305(args.message, nounce, key):
return 0
return 1
def main(): # pylint: disable=too-many-return-statements
"""Script starts here."""
if not os.path.exists(".git"):
print("Run this script from repo root", file=sys.stderr)
return 1
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--download",
action="store_true",
help="download protobuf compiler",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="force download if already downloaded",
)
subparsers = parser.add_subparsers(help="sub commands", dest="command")
subparsers.add_parser("generate", help="generate protobuf wrapper")
subparsers.add_parser("verify", help="verify wrapper is up-to-date")
decode = subparsers.add_parser("decode", help="decode protobuf message(s)")
decode.add_argument("message", help="message in hex to decode")
decode.add_argument(
"-u",
"--unknown-fields",
action="store_true",
help="include unknown fields",
)
decode.add_argument(
"-s",
"--stream",
action="store_true",
help="decode protocol stream of messages",
)
decrypt = subparsers.add_parser("decrypt", help="decrypt protobuf message")
decrypt.add_argument("message", help="message in hex to decrypt")
decrypt.add_argument("keys", nargs="+", help="keys to decrypt with")
decrypt.add_argument(
"-l",
"--nounce-lower",
type=int,
default=0,
help="start value for nounce",
)
decrypt.add_argument(
"-u",
"--nounce-upper",
type=int,
default=128,
help="upper value for nounce",
)
args = parser.parse_args()
if not args.command:
parser.error("No command specified")
return 1
if args.command == "generate":
if args.download:
_download_protoc(args.force)
_verify_protoc_version()
return update_auto_generated_code()
if args.command == "verify":
if args.download:
_download_protoc(args.force)
_verify_protoc_version()
return verify_generated_code()
if args.command == "decode":
return decode_and_print_message(args)
if args.command == "decrypt":
return decrypt_and_print_message(args)
return 1
if __name__ == "__main__":
sys.exit(main())
| |
"""Stops an Internal LB vm."""
from baseCmd import *
from baseResponse import *
class stopInternalLoadBalancerVMCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the internal lb vm"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""Force stop the VM. The caller knows the VM is stopped."""
self.forced = None
self.typeInfo['forced'] = 'boolean'
self.required = ["id", ]
class stopInternalLoadBalancerVMResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the router"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the router"""
self.account = None
self.typeInfo['account'] = 'string'
"""the date and time the router was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the first DNS for the router"""
self.dns1 = None
self.typeInfo['dns1'] = 'string'
"""the second DNS for the router"""
self.dns2 = None
self.typeInfo['dns2'] = 'string'
"""the domain associated with the router"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain ID associated with the router"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the gateway for the router"""
self.gateway = None
self.typeInfo['gateway'] = 'string'
"""the guest IP address for the router"""
self.guestipaddress = None
self.typeInfo['guestipaddress'] = 'string'
"""the guest MAC address for the router"""
self.guestmacaddress = None
self.typeInfo['guestmacaddress'] = 'string'
"""the guest netmask for the router"""
self.guestnetmask = None
self.typeInfo['guestnetmask'] = 'string'
"""the ID of the corresponding guest network"""
self.guestnetworkid = None
self.typeInfo['guestnetworkid'] = 'string'
"""the name of the corresponding guest network"""
self.guestnetworkname = None
self.typeInfo['guestnetworkname'] = 'string'
"""the host ID for the router"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the hostname for the router"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""the first IPv6 DNS for the router"""
self.ip6dns1 = None
self.typeInfo['ip6dns1'] = 'string'
"""the second IPv6 DNS for the router"""
self.ip6dns2 = None
self.typeInfo['ip6dns2'] = 'string'
"""if this router is an redundant virtual router"""
self.isredundantrouter = None
self.typeInfo['isredundantrouter'] = 'boolean'
"""the link local IP address for the router"""
self.linklocalip = None
self.typeInfo['linklocalip'] = 'string'
"""the link local MAC address for the router"""
self.linklocalmacaddress = None
self.typeInfo['linklocalmacaddress'] = 'string'
"""the link local netmask for the router"""
self.linklocalnetmask = None
self.typeInfo['linklocalnetmask'] = 'string'
"""the ID of the corresponding link local network"""
self.linklocalnetworkid = None
self.typeInfo['linklocalnetworkid'] = 'string'
"""the name of the router"""
self.name = None
self.typeInfo['name'] = 'string'
"""the network domain for the router"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""the Pod ID for the router"""
self.podid = None
self.typeInfo['podid'] = 'string'
"""the project name of the address"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the ipaddress"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the public IP address for the router"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""the public MAC address for the router"""
self.publicmacaddress = None
self.typeInfo['publicmacaddress'] = 'string'
"""the public netmask for the router"""
self.publicnetmask = None
self.typeInfo['publicnetmask'] = 'string'
"""the ID of the corresponding public network"""
self.publicnetworkid = None
self.typeInfo['publicnetworkid'] = 'string'
"""the state of redundant virtual router"""
self.redundantstate = None
self.typeInfo['redundantstate'] = 'string'
"""true if the router template requires upgrader"""
self.requiresupgrade = None
self.typeInfo['requiresupgrade'] = 'boolean'
"""role of the domain router"""
self.role = None
self.typeInfo['role'] = 'string'
"""the version of scripts"""
self.scriptsversion = None
self.typeInfo['scriptsversion'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""the state of the router"""
self.state = None
self.typeInfo['state'] = 'state'
"""the template ID for the router"""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the version of template"""
self.version = None
self.typeInfo['version'] = 'string'
"""VPC the router belongs to"""
self.vpcid = None
self.typeInfo['vpcid'] = 'string'
"""the name of VPC the router belongs to"""
self.vpcname = None
self.typeInfo['vpcname'] = 'string'
"""the Zone ID for the router"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name for the router"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of nics associated with the router"""
self.nic = []
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators."""
# pylint: disable=g-bad-name
import threading
# Used by py_util.cc to get tracebacks.
import traceback # pylint: disable=unused-import
import weakref
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape as tape_lib
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.lib.core import _pywrap_py_func
from tensorflow.python.ops import gen_script_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
# Map from EagerPyFunc token to tuple (tape, eager args, eager outputs);
# used for differentiation.
tape_cache = {}
def _maybe_copy_to_context_device(tensor, device_name):
"""Copy an EagerTensor to the current device if it's not on `device_name`."""
in_device = tensor.backing_device
if device_name == in_device:
return tensor
else:
# Note that EagerTensor._copy bypasses the placer and copies to the context
# device, which means e.g. int32 Tensors which would normally be forced onto
# the CPU can instead be placed on the GPU. This is necessary so that the
# PyFunc kernel always returns Tensors on the device it's executing on.
return tensor._copy() # pylint: disable=protected-access
class EagerFunc:
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout, is_grad_func):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
is_grad_func: Whether this EagerFunc is the gradient of another
EagerPyFunc.
"""
self._func = func
self._out_dtypes = Tout
self._is_grad_func = is_grad_func
self._support_graph_mode_gradient = False
def set_support_graph_mode_gradient(self):
"""Indicates the object shall support gradient ops.
This function is internally used by _EagerPyFuncGrad to support
graph mode gradient of EagerFunc via tf.gradient().
"""
self._support_graph_mode_gradient = True
def _convert(self, value, dtype):
"""Converts `value` to a tensor of type `dtype`, with error checking.
Args:
value: The tensor to convert.
dtype: The desired dtype.
Returns:
A tensor of type `dtype`, or a zeros tensor if value is None and
this function is in fact a gradient function.
Raises:
RuntimeError: if `value` is a variable.
"""
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(
"Attempting to return a variable from an eagerly executed py_func. "
"Only numeric data structures like Tensors or NumPy arrays should "
"be returned; to return the value of a variable, make sure to obtain "
"the Tensor backing it by calling `.read_value()` on the variable in "
f"question: {value}")
if value is None and self._is_grad_func:
# Gradient functions may legitimately return a list that contains
# both Tensors and Python Nones. Unfortunately this breaks the
# OpKernel, so for now we replace None objects with zeros, which is
# mathematically correct but will prevent short-circuiting gradient
# computations.
#
# TODO(akshayka): Make it possible to return a list of both Tensors and
# Nones from an EagerPyFunc.
return constant_op.constant(0.0, dtype=dtype)
return ops.convert_to_tensor(value, dtype=dtype)
def __call__(self, device, token, args):
"""Calls `self._func` in eager mode, recording the tape if needed."""
use_tape_cache = (
self._support_graph_mode_gradient or tape_lib.could_possibly_record())
if use_tape_cache:
with backprop.GradientTape() as tape:
for tensor in args:
for t in nest.flatten(tensor):
if backprop_util.IsTrainable(t):
tape.watch(t)
outputs = self._call(device, args)
tape_cache[compat.as_bytes(token)] = (tape, args, outputs)
else:
outputs = self._call(device, args)
return outputs
def _call(self, device, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode():
ret = self._func(*args)
# copy the returned tensors to the PyFunc op's device if necessary.
device_name = device
if device_name is None:
# "None" here means "CPU", from the nullptr convention with C++ device
# pointers.
device_name = "/job:localhost/replica:0/task:0/device:CPU:0"
with ops.device(device):
if isinstance(ret, (tuple, list)):
outputs = [
_maybe_copy_to_context_device(self._convert(x, dtype=dtype),
device_name)
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
outputs = None
else:
outputs = _maybe_copy_to_context_device(
self._convert(ret, dtype=self._out_dtypes[0]), device_name)
return outputs
class FuncRegistry:
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
# Only store weakrefs to the functions. The strong reference is stored in
# the graph.
self._funcs = weakref.WeakValueDictionary()
@property
def _ctx(self):
# N.B. This is needed to support calling py_func with GPU tensors,
# which must be transferred to CPU if used in any of the NumPy APIs.
context.ensure_initialized()
return context.context()._handle # pylint: disable=protected-access
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
# Store a weakref to the function
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
def get(self, token, default=None):
"""Gets the registered function corresponding to `token`."""
return self._funcs.get(token, default)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = np.asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode("utf8"))(value)
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, device, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
device: Name of the device on which outputs of `token`'s corresponding
operation should be placed. Used iff the function registered for `token`
is an EagerPyFunc.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self.get(token, None)
if func is None:
raise ValueError(f"Could not find callback with key={token} in the "
"registry.")
if isinstance(func, EagerFunc):
# NB: Different invocations of the same py_func will share the same
# token, and the entries they stash in the tape_cache will collide.
# In practice, when executing a graph, this should only happen if
# the py_func is in a while_loop whose iterations are run in parallel
# or if the graph is being driven by concurrent session.run() calls.
#
# TODO(akshayka): Key the tape cache in a thread-safe way.
return func(device, token, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, bytes):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
_pywrap_py_func.initialize_py_trampoline(_py_funcs)
def _internal_py_func(func,
inp,
Tout,
stateful=None,
use_eager_py_func=False,
is_grad_func=False,
name=None):
"""See documentation for py_func and eager_py_func."""
if not callable(func):
raise ValueError(
f"Expected func to be callable. Received func={func} of type "
f"{type(func)}.")
original_func = func
func = autograph.do_not_convert(func)
inp = list(inp)
# Normalize Tout.
is_list_or_tuple = isinstance(Tout, (list, tuple))
Tout = Tout if is_list_or_tuple else [Tout]
Tout = [_as_dtype_or_type_spec(t) for t in Tout]
# Check if we need to handle CompositeTensor inputs or outputs.
handle_composite_tensors = (
use_eager_py_func and
(any(isinstance(v, composite_tensor.CompositeTensor) for v in inp) or
any(isinstance(t, type_spec.TypeSpec) for t in Tout)))
if handle_composite_tensors:
func, inp, Tout, out_structure = _wrap_for_composites(func, inp, Tout)
if use_eager_py_func:
func = EagerFunc(func, Tout, is_grad_func)
# Tying the registered function's lifetime with the current default graph is
# not reliable. For example, Estimator-based binaries may switch graphs in
# between model training end evaluation, via saved_model. Those binaries work
# because the original function is global, and break once the registered
# function is an anonymous lambda, like the one produced by do_not_convert.
# To avoid breaking those cases, we attach the wrapper to the original
# function so that their lifetime is connected.
# TODO(b/144286616): Remove this.
if tf_inspect.isfunction(original_func):
# Note: this check is needed because original_func may be a descriptor
# (https://docs.python.org/3/howto/descriptor.html)
# and we can't attach attributes to those.
original_func.ag_dnc_wrapper__ = func
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
while True:
current_graph = graph
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
if graph is current_graph:
break
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_py_funcs_used_in_graph"):
graph._py_funcs_used_in_graph = [] # pylint: disable=protected-access
# Store a reference to the function in the graph to ensure it stays alive
# as long as the graph lives. When the graph is destroyed, the function
# is left to the garbage collector for destruction as well.
graph._py_funcs_used_in_graph.append(func) # pylint: disable=protected-access
if use_eager_py_func:
result = gen_script_ops.eager_py_func(
input=inp,
token=token,
is_async=context.is_async(),
Tout=Tout,
name=name)
else:
if stateful:
result = gen_script_ops.py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops.py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
if handle_composite_tensors and Tout:
result = nest.pack_sequence_as(
out_structure, result, expand_composites=True)
return result if is_list_or_tuple else result[0]
# TODO(akshayka): Implement higher-order derivatives.
@ops.RegisterGradient("EagerPyFunc")
def _EagerPyFuncGrad(op, *dy):
"""Computes the gradient of an EagerPyFunc."""
token = op.get_attr("token")
def eagerly_executed_grad(*dy):
tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))
return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)
with ops.control_dependencies(op.outputs):
gradient_op = _internal_py_func(
func=eagerly_executed_grad,
inp=dy,
Tout=[tensor.dtype for tensor in op.inputs],
use_eager_py_func=True,
is_grad_func=True)
if not context.executing_eagerly():
# In graph mode, we find the func object from its token and
# notify the eager func object it needs to support the gradients.
func = _py_funcs.get(token.decode())
assert isinstance(func, EagerFunc), (
f"EagerPyFuncGrad called on a non-EagerFunc object: {func}.")
func.set_support_graph_mode_gradient()
return gradient_op
@tf_export("py_function")
@dispatch.add_dispatch_support
def eager_py_func(func, inp, Tout, name=None):
"""Wraps a python function into a TensorFlow op that executes it eagerly.
This function allows expressing computations in a TensorFlow graph as
Python functions. In particular, it wraps a Python function `func`
in a once-differentiable TensorFlow operation that executes it with eager
execution enabled. As a consequence, `tf.py_function` makes it
possible to express control flow using Python constructs (`if`, `while`,
`for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,
`tf.while_loop`). For example, you might use `tf.py_function` to
implement the log huber function:
```python
def log_huber(x, m):
if tf.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))
x = tf.constant(1.0)
m = tf.constant(2.0)
with tf.GradientTape() as t:
t.watch([x, m])
y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32)
dy_dx = t.gradient(y, x)
assert dy_dx.numpy() == 2.0
```
You can also use `tf.py_function` to debug your models at runtime
using Python tools, i.e., you can isolate portions of your code that
you want to debug, wrap them in Python functions and insert `pdb` tracepoints
or print statements as desired, and wrap those functions in
`tf.py_function`.
For more information on eager execution, see the
[Eager guide](https://tensorflow.org/guide/eager).
`tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike
the latter, the former lets you use TensorFlow operations in the wrapped
Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs
and wraps functions that take NumPy arrays as inputs and return NumPy arrays
as outputs, `tf.py_function` can be placed on GPUs and wraps functions
that take Tensors as inputs, execute TensorFlow operations in their bodies,
and return Tensors as outputs.
Note: We recommend to avoid using `tf.py_function` outside of prototyping
and experimentation due to the following known limitations:
* Calling `tf.py_function` will acquire the Python Global Interpreter Lock
(GIL) that allows only one thread to run at any point in time. This will
preclude efficient parallelization and distribution of the execution of the
program.
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.py_function()` and you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Currently `tf.py_function` is not compatible with XLA. Calling
`tf.py_function` inside `tf.function(jit_comiple=True)` will raise an
error.
Args:
func: A Python function that accepts `inp` as arguments, and returns a
value (or list of values) whose type is described by `Tout`.
inp: Input arguments for `func`. A list whose elements are `Tensor`s or
`CompositeTensors` (such as `tf.RaggedTensor`); or a single `Tensor` or
`CompositeTensor`.
Tout: The type(s) of the value(s) returned by `func`. One of the
following.
* If `func` returns a `Tensor` (or a value that can be converted to a
Tensor): the `tf.DType` for that value.
* If `func` returns a `CompositeTensor`: The `tf.TypeSpec` for that value.
* If `func` returns `None`: the empty list (`[]`).
* If `func` returns a list of `Tensor` and `CompositeTensor` values:
a corresponding list of `tf.DType`s and `tf.TypeSpec`s for each value.
name: A name for the operation (optional).
Returns:
The value(s) computed by `func`: a `Tensor`, `CompositeTensor`, or list of
`Tensor` and `CompositeTensor`; or an empty list if `func` returns `None`.
"""
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func, inp=inp, Tout=Tout, use_eager_py_func=True, name=name)
return _internal_py_func(
func=func, inp=inp, Tout=Tout, use_eager_py_func=True, name=name)
def py_func_common(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
arguments and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
input = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [input], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Note: It produces tensors of unknown shape and rank as shape inference
does not work on arbitrary Python code.
If you need the shape, you need to set it based on statically
available information.
E.g.
```python
import tensorflow as tf
import numpy as np
def make_synthetic_data(i):
return np.cast[np.uint8](i) * np.ones([20,256,256,3],
dtype=np.float32) / 10.
def preprocess_fn(i):
ones = tf.py_function(make_synthetic_data,[i],tf.float32)
ones.set_shape(tf.TensorShape([None, None, None, None]))
ones = tf.image.resize(ones, [224,224])
return ones
ds = tf.data.Dataset.range(10)
ds = ds.map(preprocess_fn)
```
Args:
func: A Python function, which accepts `ndarray` objects as arguments and
returns a list of `ndarray` objects (or a single `ndarray`). This function
must accept as many arguments as there are tensors in `inp`, and these
argument types will match the corresponding `tf.Tensor` objects in `inp`.
The returns `ndarray`s must match the number and types defined `Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
@compatibility(TF2)
This name was deprecated and removed in TF2, but `tf.numpy_function` is a
near-exact replacement, just drop the `stateful` argument (all
`tf.numpy_function` calls are considered stateful). It is compatible with
eager execution and `tf.function`.
`tf.py_function` is a close but not an exact replacement, passing TensorFlow
tensors to the wrapped function instead of NumPy arrays, which provides
gradients and can take advantage of accelerators.
Before:
>>> def fn_using_numpy(x):
... x[0] = 0.
... return x
>>> tf.compat.v1.py_func(fn_using_numpy, inp=[tf.constant([1., 2.])],
... Tout=tf.float32, stateful=False)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 2.], dtype=float32)>
After:
>>> tf.numpy_function(fn_using_numpy, inp=[tf.constant([1., 2.])],
... Tout=tf.float32)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 2.], dtype=float32)>
@end_compatibility
"""
if context.executing_eagerly():
result = func(*[np.array(x) for x in inp])
result = nest.flatten(result)
result = [x if x is None else ops.convert_to_tensor(x) for x in result]
if len(result) == 1:
# Mimic the automatic unwrapping in graph-mode py_func
result, = result
return result
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
use_eager_py_func=False,
name=name)
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
use_eager_py_func=False,
name=name)
@deprecation.deprecated(
date=None,
instructions="""tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
""")
@tf_export(v1=["py_func"])
@dispatch.add_dispatch_support
def py_func(func, inp, Tout, stateful=True, name=None):
return py_func_common(func, inp, Tout, stateful, name=name)
py_func.__doc__ = "%s" % py_func_common.__doc__
@tf_export("numpy_function")
@dispatch.add_dispatch_support
def numpy_function(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func` wrap this function as an operation in a
TensorFlow function. `func` must take numpy arrays as its arguments and
return numpy arrays as its outputs.
The following example creates a TensorFlow graph with `np.sinh()` as an
operation in the graph:
>>> def my_numpy_func(x):
... # x will be a numpy array with the contents of the input to the
... # tf.function
... return np.sinh(x)
>>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
... def tf_function(input):
... y = tf.numpy_function(my_numpy_func, [input], tf.float32)
... return y * y
>>> tf_function(tf.constant(1.))
<tf.Tensor: shape=(), dtype=float32, numpy=1.3810978>
Comparison to `tf.py_function`:
`tf.py_function` and `tf.numpy_function` are very similar, except that
`tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the
function to contain `tf.Tensors`, and have any TensorFlow operations executed
in the function be differentiable, please use `tf.py_function`.
Note: We recommend to avoid using `tf.numpy_function` outside of
prototyping and experimentation due to the following known limitations:
* Calling `tf.numpy_function` will acquire the Python Global Interpreter Lock
(GIL) that allows only one thread to run at any point in time. This will
preclude efficient parallelization and distribution of the execution of the
program. Therefore, you are discouraged to use `tf.numpy_function` outside
of prototyping and experimentation.
* The body of the function (i.e. `func`) will not be serialized in a
`tf.SavedModel`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.numpy_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.numpy_function` you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Currently `tf.numpy_function` is not compatible with XLA. Calling
`tf.numpy_function` inside `tf.function(jit_comiple=True)` will raise an
error.
* Since the function takes numpy arrays, you cannot take gradients
through a numpy_function. If you require something that is differentiable,
please consider using tf.py_function.
Args:
func: A Python function, which accepts `numpy.ndarray` objects as arguments
and returns a list of `numpy.ndarray` objects (or a single
`numpy.ndarray`). This function must accept as many arguments as there are
tensors in `inp`, and these argument types will match the corresponding
`tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the
number and types defined `Tout`.
Important Note: Input and output `numpy.ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `tf.Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) Setting this argument to False tells the runtime to
treat the function as stateless, which enables certain optimizations.
A function is stateless when given the same input it will return the
same output and have no side effects; its only purpose is to have a
return value.
The behavior for a stateful function with the `stateful` argument False
is undefined. In particular, caution should be taken when
mutating the input arguments as this is a stateful operation.
name: (Optional) A name for the operation.
Returns:
Single or list of `tf.Tensor` which `func` computes.
"""
return py_func_common(func, inp, Tout, stateful=stateful, name=name)
def _as_dtype_or_type_spec(t):
return t if isinstance(t, type_spec.TypeSpec) else dtypes.as_dtype(t)
def _wrap_for_composites(func, inp, Tout):
"""Wraps user inputs to support composite tensors for `py_function`.
1. Flattens `inp` to a list of Tensors (by flattening any composite tensors).
2. Creates a wrapper fuction for `func` that expects flat inputs and:
- Packs the inputs into the input structure expected by `func`.
- Calls `func` with the packed inputs.
- Checks that `func`'s output matches `Tout`.
- Flattens func`'s output to a list of Tensors (flattening any composite
tensors).
Args:
func: The function to wrap (`func` argument to `py_function`).
inp: The input arguments for func (`inp` argument to `py_function`).
Tout: The expected output types for func (`Tout` argument to `py_function).
Returns:
A tuple `(func, inp, Tout, out_structure)`, where `func` is the wrapped
function, `inp` is the flattened inputs, `Tout` is the list of expected
dtypes for the flattened outputs, and `out_structure` is the expected
output structure (which can be used to pack the output tensors).
"""
in_structure = [
v if isinstance(v, composite_tensor.CompositeTensor) else 1 for v in inp
]
inp = nest.flatten_up_to(in_structure, inp, expand_composites=True)
out_structure = Tout
Tout = [
v.dtype if isinstance(v, tensor_spec.TensorSpec) else v
for v in nest.flatten(Tout, expand_composites=True)
]
def wrapped_func(*flat_inp):
structured_inp = nest.pack_sequence_as(
in_structure, flat_inp, expand_composites=True)
out = func(*structured_inp)
if not out_structure:
return [] # Ignore return value if none is requested/expected.
if not isinstance(out, (list, tuple)):
out = [out] # func may return a single value instead of a list.
flat_out = []
for elt, expected_type in zip(out, out_structure):
if (isinstance(expected_type, type_spec.TypeSpec) and
not isinstance(expected_type, tensor_spec.TensorSpec)):
if not expected_type.is_compatible_with(elt):
# pylint: disable=protected-access
raise ValueError(
f"py_function: func={func} returned {out!r}, "
f"which did not match Tout={out_structure!r}.\nIn particular, "
f"{elt!r} is not compatible with {expected_type!r}.")
flat_out.extend(nest.flatten(elt, expand_composites=True))
else:
# Pro-actively check if the return value is a composite tensor when
# we expect a Tensor. We would catch this later (when we call
# convert_to_tensor), but checking it here lets us give a better
# error message.
if isinstance(elt, composite_tensor.CompositeTensor):
raise ValueError(
f"py_function: func={func} returned {out!r}, "
f"which did not match Tout={out_structure!r}.\nIn particular, "
f"{elt!r} is not a Tensor.")
flat_out.append(elt)
return flat_out
return wrapped_func, inp, Tout, out_structure
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
| |
"""Copy number detection using read counts, with cn.mops.
http://www.bioconductor.org/packages/release/bioc/html/cn.mops.html
"""
from contextlib import closing
import os
import re
import shutil
import subprocess
import pysam
import toolz as tz
from bcbio import bam, install, utils
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils, shared
from bcbio.provenance import do
from bcbio.structural import shared as sshared
from bcbio.variation import bedutils, vcfutils
def run(items, background=None):
"""Detect copy number variations from batched set of samples using cn.mops.
"""
if not background: background = []
names = [tz.get_in(["rgnames", "sample"], x) for x in items + background]
work_bams = [x["align_bam"] for x in items + background]
if len(items + background) < 2:
raise ValueError("cn.mops only works on batches with multiple samples")
data = items[0]
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0],
"cn_mops"))
parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1),
"progs": ["delly"]}
with closing(pysam.Samfile(work_bams[0], "rb")) as pysam_work_bam:
chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references
out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items)
for chrom in chroms],
data["config"], parallel)
out_file = _combine_out_files(out_files, work_dir, data)
out = []
for data in items:
if "sv" not in data:
data["sv"] = []
data["sv"].append({"variantcaller": "cn_mops",
"vrn_file": _prep_sample_cnvs(out_file, data)})
out.append(data)
return out
def _combine_out_files(chr_files, work_dir, data):
"""Concatenate all CNV calls into a single file.
"""
out_file = "%s.bed" % sshared.outname_from_inputs(chr_files)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for chr_file in chr_files:
with open(chr_file) as in_handle:
is_empty = in_handle.readline().startswith("track name=empty")
if not is_empty:
with open(chr_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
def _prep_sample_cnvs(cnv_file, data):
"""Convert a multiple sample CNV file into a single BED file for a sample.
Handles matching and fixing names where R converts numerical IDs (1234) into
strings by adding an X (X1234), and converts other characters into '.'s.
http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html
"""
import pybedtools
sample_name = tz.get_in(["rgnames", "sample"], data)
def make_names(name):
return re.sub("[^\w.]", '.', name)
def matches_sample_name(feat):
return (feat.name == sample_name or feat.name == "X%s" % sample_name or
feat.name == make_names(sample_name))
def update_sample_name(feat):
feat.name = sample_name
return feat
sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name)
if not utils.file_exists(sample_file):
with file_transaction(data, sample_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file)
return sample_file
@utils.map_wrap
@zeromq_aware_logging
def _run_on_chrom(chrom, work_bams, names, work_dir, items):
"""Run cn.mops on work BAMs for a specific chromosome.
"""
local_sitelib = os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
batch = sshared.get_cur_batch(items)
ext = "-%s-cnv" % batch if batch else "-cnv"
out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0],
ext, chrom if chrom else "all"))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
rcode = "%s-run.R" % os.path.splitext(out_file)[0]
with open(rcode, "w") as out_handle:
out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items),
out_file=tx_out_file,
local_sitelib=local_sitelib))
rscript = config_utils.get_program("Rscript", items[0]["config"])
try:
do.run([rscript, rcode], "cn.mops CNV detection", items[0], log_error=False)
except subprocess.CalledProcessError, msg:
# cn.mops errors out if no CNVs found. Just write an empty file.
if _allowed_cnmops_errorstates(str(msg)):
with open(tx_out_file, "w") as out_handle:
out_handle.write('track name=empty description="No CNVs found"\n')
else:
logger.exception()
raise
return [out_file]
def _allowed_cnmops_errorstates(msg):
return (msg.find("No CNV regions in result object. Rerun cn.mops with different parameters") >= 0
or msg.find("Normalization might not be applicable for this small number of segments") >= 0
or msg.find("Error in if (is.finite(mv2m)) { : argument is of length zero") >= 0)
def _prep_load_script(work_bams, names, chrom, items):
if not chrom: chrom = ""
pairmode = "paired" if bam.is_paired(work_bams[0]) else "unpaired"
if len(items) == 2 and vcfutils.get_paired_phenotype(items[0]):
load_script = _paired_load_script
else:
load_script = _population_load_script
return load_script(work_bams, names, chrom, pairmode, items)
def _get_regional_bed_file(data):
"""If we are running a non-genome analysis, pull the regional file for analysis.
"""
variant_regions = bedutils.merge_overlaps(tz.get_in(["config", "algorithm", "variant_regions"], data),
data)
is_genome = data["config"]["algorithm"].get("coverage_interval", "exome").lower() in ["genome"]
if variant_regions and utils.file_exists(variant_regions) and not is_genome:
return variant_regions
def _population_load_script(work_bams, names, chrom, pairmode, items):
"""Prepare BAMs for assessing CNVs in a population.
"""
bed_file = _get_regional_bed_file(items[0])
if bed_file:
return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names),
chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file)
else:
return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names),
chrom=chrom, num_cores=0, pairmode=pairmode)
def _paired_load_script(work_bams, names, chrom, pairmode, items):
"""Prepare BAMs for assessing CNVs in a paired tumor/normal setup.
"""
paired = vcfutils.get_paired_bams(work_bams, items)
bed_file = _get_regional_bed_file(items[0])
if bed_file:
return _paired_prep_targeted.format(case_file=paired.tumor_bam, case_name=paired.tumor_name,
ctrl_file=paired.normal_bam, ctrl_name=paired.normal_name,
num_cores=0, chrom=chrom, pairmode=pairmode, bed_file=bed_file)
else:
return _paired_prep.format(case_file=paired.tumor_bam, case_name=paired.tumor_name,
ctrl_file=paired.normal_bam, ctrl_name=paired.normal_name,
num_cores=0, chrom=chrom, pairmode=pairmode)
_script = """
.libPaths(c("{local_sitelib}"))
library(cn.mops)
library(rtracklayer)
{prep_str}
calc_cnvs <- cnvs(cnv_out)
strcn_to_cn <- function(x) {{
as.numeric(substring(x, 3, 20))}}
calc_cnvs$score <- strcn_to_cn(calc_cnvs$CN)
calc_cnvs$name <- calc_cnvs$sampleName
export.bed(calc_cnvs, "{out_file}")
"""
_population_prep = """
bam_files <- strsplit("{bam_file_str}", ",")[[1]]
sample_names <- strsplit("{names_str}", ",")[[1]]
count_drs <- getReadCountsFromBAM(bam_files, sampleNames=sample_names, mode="{pairmode}",
refSeqName="{chrom}", parallel={num_cores})
prep_counts <- cn.mops(count_drs, parallel={num_cores})
cnv_out <- calcIntegerCopyNumbers(prep_counts)
"""
_paired_prep = """
case_count <- getReadCountsFromBAM(c("{case_file}"), sampleNames=c("{case_name}"), mode="{pairmode}",
refSeqName="{chrom}", parallel={num_cores})
ctrl_count <- getReadCountsFromBAM(c("{ctrl_file}"), sampleNames=c("{ctrl_name}"), mode="{pairmode}",
refSeqName="{chrom}", parallel={num_cores},
WL=width(case_count)[[1]])
prep_counts <- referencecn.mops(case_count, ctrl_count, parallel={num_cores})
cnv_out <- calcIntegerCopyNumbers(prep_counts)
"""
_population_prep_targeted = """
bam_files <- strsplit("{bam_file_str}", ",")[[1]]
sample_names <- strsplit("{names_str}", ",")[[1]]
my_gr <- import.bed(c("{bed_file}"), trackLine=FALSE, asRangedData=FALSE)
if ("{chrom}" != "") my_gr = subset(my_gr, seqnames(my_gr) == "{chrom}")
if (length(my_gr) < 1) stop("No CNV regions in result object. Rerun cn.mops with different parameters!")
count_drs <- getSegmentReadCountsFromBAM(bam_files, sampleNames=sample_names, mode="{pairmode}",
GR=my_gr, parallel={num_cores})
prep_counts <- cn.mops(count_drs, parallel={num_cores})
cnv_out <- calcIntegerCopyNumbers(prep_counts)
"""
_paired_prep_targeted = """
my_gr <- import.bed(c("{bed_file}"), trackLine=FALSE, asRangedData=FALSE)
if ("{chrom}" != "") my_gr = subset(my_gr, seqnames(my_gr) == "{chrom}")
if (length(my_gr) < 1) stop("No CNV regions in result object. Rerun cn.mops with different parameters!")
case_count <- getSegmentReadCountsFromBAM(c("{case_file}"), GR=my_gr,
sampleNames=c("{case_name}"),
mode="{pairmode}", parallel={num_cores})
ctrl_count <- getSegmentReadCountsFromBAM(c("{ctrl_file}"), GR=my_gr,
sampleNames=c("{case_name}"),
mode="{pairmode}", parallel={num_cores})
prep_counts <- referencecn.mops(case_count, ctrl_count, parallel={num_cores})
cnv_out <- calcIntegerCopyNumbers(prep_counts)
"""
| |
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from nova.api.openstack.compute.contrib import flavor_access
from nova.api.openstack.compute.contrib import flavormanage
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
if flavorid == 'failtest':
raise exception.FlavorNotFound(flavor_id=flavorid)
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
if read_deleted != 'no':
raise test.TestingException("Should not be reading deleted")
return {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': flavorid,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7,
'is_public': True,
'disabled': False,
}
def fake_destroy(flavorname):
pass
def fake_create(context, kwargs):
flavorid = kwargs.get('flavorid')
if flavorid is None:
flavorid = 1234
newflavor = {'flavorid': flavorid}
newflavor["name"] = kwargs.get('name')
newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
newflavor["vcpus"] = int(kwargs.get('vcpus'))
newflavor["root_gb"] = int(kwargs.get('root_gb'))
newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
newflavor["swap"] = kwargs.get('swap')
newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
newflavor["is_public"] = bool(kwargs.get('is_public'))
return newflavor
class FlavorManageTest(test.NoDBTestCase):
def setUp(self):
super(FlavorManageTest, self).setUp()
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, "destroy", fake_destroy)
self.stubs.Set(db, "flavor_create", fake_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
self.controller = flavormanage.FlavorManageController()
self.app = fakes.wsgi_app(init_only=('flavors',))
self.request_body = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
self.expected_flavor = self.request_body
def test_delete(self):
req = fakes.HTTPRequest.blank('/v2/123/flavors/1234')
res = self.controller._delete(req, 1234)
self.assertEqual(res.status_int, 202)
# subsequent delete should fail
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, req, "failtest")
def _test_create_missing_parameter(self, parameter):
body = {
"flavor": {
"name": "azAZ09. -_",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
del body['flavor'][parameter]
req = fakes.HTTPRequest.blank('/v2/123/flavors')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create,
req, body)
def test_create_missing_name(self):
self._test_create_missing_parameter('name')
def test_create_missing_ram(self):
self._test_create_missing_parameter('ram')
def test_create_missing_vcpus(self):
self._test_create_missing_parameter('vcpus')
def test_create_missing_disk(self):
self._test_create_missing_parameter('disk')
def _create_flavor_success_case(self, body):
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(200, res.status_code)
return jsonutils.loads(res.body)
def test_create(self):
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_public_default(self):
del self.request_body['flavor']['os-flavor-access:is_public']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_flavor_name_with_leading_trailing_whitespace(self):
self.request_body['flavor']['name'] = " test "
body = self._create_flavor_success_case(self.request_body)
self.assertEqual("test", body["flavor"]["name"])
def test_create_without_flavorid(self):
del self.request_body['flavor']['id']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def _create_flavor_bad_request_case(self, body):
self.stubs.UnsetAll()
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(res.status_code, 400)
def test_create_invalid_name(self):
self.request_body['flavor']['name'] = 'bad !@#!$% name'
self._create_flavor_bad_request_case(self.request_body)
def test_create_flavor_name_is_whitespace(self):
self.request_body['flavor']['name'] = ' '
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_too_long(self):
self.request_body['flavor']['name'] = 'a' * 256
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_flavorname(self):
del self.request_body['flavor']['name']
self._create_flavor_bad_request_case(self.request_body)
def test_create_empty_body(self):
body = {
"flavor": {}
}
self._create_flavor_bad_request_case(body)
def test_create_no_body(self):
body = {}
self._create_flavor_bad_request_case(body)
def test_create_invalid_format_body(self):
body = {
"flavor": []
}
self._create_flavor_bad_request_case(body)
def test_create_invalid_flavorid(self):
self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
self._create_flavor_bad_request_case(self.request_body)
def test_create_check_flavor_id_length(self):
MAX_LENGTH = 255
self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
self.request_body['flavor']['id'] = " bad_id "
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_ram(self):
del self.request_body['flavor']['ram']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_ram(self):
self.request_body['flavor']['ram'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_vcpus(self):
del self.request_body['flavor']['vcpus']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_vcpus(self):
self.request_body['flavor']['vcpus'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_disk(self):
del self.request_body['flavor']['disk']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_disk(self):
self.request_body['flavor']['disk'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_ephemeral(self):
self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_swap(self):
self.request_body['flavor']['swap'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_rxtx_factor(self):
self.request_body['flavor']['rxtx_factor'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_non_boolean_is_public(self):
self.request_body['flavor']['os-flavor-access:is_public'] = 123
self._create_flavor_bad_request_case(self.request_body)
def test_flavor_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
raise exception.FlavorExists(name=name)
self.stubs.Set(flavors, "create", fake_create)
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
def test_invalid_memory_mb(self):
"""Check negative and decimal number can't be accepted."""
self.stubs.UnsetAll()
self.assertRaises(exception.InvalidInput, flavors.create, "abc",
-512, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
512.2, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
None, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
512, 2, None, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
"test_memory_mb", 2, None, 1, 1234, 512, 1, True)
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class PrivateFlavorManageTest(test.TestCase):
def setUp(self):
super(PrivateFlavorManageTest, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
self.controller = flavormanage.FlavorManageController()
self.flavor_access_controller = flavor_access.FlavorAccessController()
self.ctxt = context.RequestContext('fake', 'fake',
is_admin=True, auth_token=True)
self.app = fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self.ctxt)
self.expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1
}
}
def _get_response(self):
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(self.expected)
res = req.get_response(self.app)
return jsonutils.loads(res.body)
def test_create_private_flavor_should_not_grant_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = False
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
flavor_access_body = self.flavor_access_controller.index(
FakeRequest(), body["flavor"]["id"])
expected_flavor_access_body = {
"tenant_id": "%s" % self.ctxt.project_id,
"flavor_id": "%s" % body["flavor"]["id"]
}
self.assertNotIn(expected_flavor_access_body,
flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = True
self.mox.StubOutWithMock(flavors, "add_flavor_access")
self.mox.ReplayAll()
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
| |
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
#
# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
# * Updated to no longer generate special -dbg package, instead use the
# single system -dbg
# * Update version with ".1" to indicate this change
import os
import sys
import time
VERSION = "2.7.2"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222.2"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-dbg '
for name in sorted(self.packages):
if name.startswith("${PN}-distutils"):
if name == "${PN}-distutils":
packageLine += "%s-staticdev %s " % (name, name)
elif name != '${PN}-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'SUMMARY_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
try:
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
"_weakrefset.* sysconfig.* config/Makefile " +
"${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
"${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
m.addPackage( "${PN}-dev", "Python development package", "${PN}-core",
"${includedir} " +
"${libdir}/lib*${SOLIBSDEV} " +
"${libdir}/*.la " +
"${libdir}/*.a " +
"${libdir}/*.o " +
"${libdir}/pkgconfig " +
"${base_libdir}/*.a " +
"${base_libdir}/*.o " +
"${datadir}/aclocal " +
"${datadir}/pkgconfig " )
m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
"${bindir}/2to3 lib2to3" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.* pydoc_data" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.* smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
m.addPackage( "${PN}-bsddb", "Python bindings for the Berkeley Database", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python compiler support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils",
"config/lib*.a" ) # package
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python hotshot performance profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math ${PN}-textutils",
"lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.so" ) # package
m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.* stringold.*" )
m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
"unittest/" )
m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib compression support", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
| |
r"""
Uncertainty propagation class for arithmetic, log and exp.
Based on scalars or numpy vectors, this class allows you to store and
manipulate values+uncertainties, with propagation of gaussian error for
addition, subtraction, multiplication, division, power, exp and log.
Storage properties are determined by the numbers used to set the value
and uncertainty. Be sure to use floating point uncertainty vectors
for inplace operations since numpy does not do automatic type conversion.
Normal operations can use mixed integer and floating point. In place
operations such as *a \*= b* create at most one extra copy for each operation.
By contrast, *c = a\*b* uses four intermediate vectors, so shouldn't be used
for huge arrays.
"""
from __future__ import division
import numpy as np
import err1d
from formatnum import format_uncertainty
__all__ = ['Uncertainty']
# TODO: rename to Measurement and add support for units?
# TODO: C implementation of *,/,**?
class Uncertainty(object):
# Make standard deviation available
def _getdx(self): return np.sqrt(self.variance)
def _setdx(self,dx):
# Direct operation
# variance = dx**2
# Indirect operation to avoid temporaries
self.variance[:] = dx
self.variance **= 2
dx = property(_getdx,_setdx,doc="standard deviation")
# Constructor
def __init__(self, x, variance=None):
self.x, self.variance = x, variance
# Numpy array slicing operations
def __len__(self):
return len(self.x)
def __getitem__(self,key):
return Uncertainty(self.x[key],self.variance[key])
def __setitem__(self,key,value):
self.x[key] = value.x
self.variance[key] = value.variance
def __delitem__(self, key):
del self.x[key]
del self.variance[key]
#def __iter__(self): pass # Not sure we need iter
# Normal operations: may be of mixed type
def __add__(self, other):
if isinstance(other,Uncertainty):
return Uncertainty(*err1d.add(self.x,self.variance,other.x,other.variance))
else:
return Uncertainty(self.x+other, self.variance+0) # Force copy
def __sub__(self, other):
if isinstance(other,Uncertainty):
return Uncertainty(*err1d.sub(self.x,self.variance,other.x,other.variance))
else:
return Uncertainty(self.x-other, self.variance+0) # Force copy
def __mul__(self, other):
if isinstance(other,Uncertainty):
return Uncertainty(*err1d.mul(self.x,self.variance,other.x,other.variance))
else:
return Uncertainty(self.x*other, self.variance*other**2)
def __truediv__(self, other):
if isinstance(other,Uncertainty):
return Uncertainty(*err1d.div(self.x,self.variance,other.x,other.variance))
else:
return Uncertainty(self.x/other, self.variance/other**2)
def __pow__(self, other):
if isinstance(other,Uncertainty):
# Haven't calcuated variance in (a+/-da) ** (b+/-db)
return NotImplemented
else:
return Uncertainty(*err1d.pow(self.x,self.variance,other))
# Reverse operations
def __radd__(self, other):
return Uncertainty(self.x+other, self.variance+0) # Force copy
def __rsub__(self, other):
return Uncertainty(other-self.x, self.variance+0)
def __rmul__(self, other):
return Uncertainty(self.x*other, self.variance*other**2)
def __rtruediv__(self, other):
x,variance = err1d.pow(self.x,self.variance,-1)
return Uncertainty(x*other,variance*other**2)
def __rpow__(self, other): return NotImplemented
# In-place operations: may be of mixed type
def __iadd__(self, other):
if isinstance(other,Uncertainty):
self.x,self.variance \
= err1d.add_inplace(self.x,self.variance,other.x,other.variance)
else:
self.x+=other
return self
def __isub__(self, other):
if isinstance(other,Uncertainty):
self.x,self.variance \
= err1d.sub_inplace(self.x,self.variance,other.x,other.variance)
else:
self.x-=other
return self
def __imul__(self, other):
if isinstance(other,Uncertainty):
self.x, self.variance \
= err1d.mul_inplace(self.x,self.variance,other.x,other.variance)
else:
self.x *= other
self.variance *= other**2
return self
def __itruediv__(self, other):
if isinstance(other,Uncertainty):
self.x,self.variance \
= err1d.div_inplace(self.x,self.variance,other.x,other.variance)
else:
self.x /= other
self.variance /= other**2
return self
def __ipow__(self, other):
if isinstance(other,Uncertainty):
# Haven't calcuated variance in (a+/-da) ** (b+/-db)
return NotImplemented
else:
self.x,self.variance = err1d.pow_inplace(self.x, self.variance, other)
return self
# Use true division instead of integer division
def __div__(self, other): return self.__truediv__(other)
def __rdiv__(self, other): return self.__rtruediv__(other)
def __idiv__(self, other): return self.__itruediv__(other)
# Unary ops
def __neg__(self):
return Uncertainty(-self.x,self.variance)
def __pos__(self):
return self
def __abs__(self):
return Uncertainty(np.abs(self.x),self.variance)
def __str__(self):
#return str(self.x)+" +/- "+str(np.sqrt(self.variance))
if np.isscalar(self.x):
return format_uncertainty(self.x,np.sqrt(self.variance))
else:
return [format_uncertainty(v,dv)
for v,dv in zip(self.x,np.sqrt(self.variance))]
def __repr__(self):
return "Uncertainty(%s,%s)"%(str(self.x),str(self.variance))
# Not implemented
def __floordiv__(self, other): return NotImplemented
def __mod__(self, other): return NotImplemented
def __divmod__(self, other): return NotImplemented
def __mod__(self, other): return NotImplemented
def __lshift__(self, other): return NotImplemented
def __rshift__(self, other): return NotImplemented
def __and__(self, other): return NotImplemented
def __xor__(self, other): return NotImplemented
def __or__(self, other): return NotImplemented
def __rfloordiv__(self, other): return NotImplemented
def __rmod__(self, other): return NotImplemented
def __rdivmod__(self, other): return NotImplemented
def __rmod__(self, other): return NotImplemented
def __rlshift__(self, other): return NotImplemented
def __rrshift__(self, other): return NotImplemented
def __rand__(self, other): return NotImplemented
def __rxor__(self, other): return NotImplemented
def __ror__(self, other): return NotImplemented
def __ifloordiv__(self, other): return NotImplemented
def __imod__(self, other): return NotImplemented
def __idivmod__(self, other): return NotImplemented
def __imod__(self, other): return NotImplemented
def __ilshift__(self, other): return NotImplemented
def __irshift__(self, other): return NotImplemented
def __iand__(self, other): return NotImplemented
def __ixor__(self, other): return NotImplemented
def __ior__(self, other): return NotImplemented
def __invert__(self): return NotImplmented # For ~x
def __complex__(self): return NotImplmented
def __int__(self): return NotImplmented
def __long__(self): return NotImplmented
def __float__(self): return NotImplmented
def __oct__(self): return NotImplmented
def __hex__(self): return NotImplmented
def __index__(self): return NotImplmented
def __coerce__(self): return NotImplmented
def log(self):
return Uncertainty(*err1d.log(self.x,self.variance))
def exp(self):
return Uncertainty(*err1d.exp(self.x,self.variance))
def log(val): return self.log()
def exp(val): return self.exp()
def test():
a = Uncertainty(5,3)
b = Uncertainty(4,2)
# Scalar operations
z = a+4
assert z.x == 5+4 and z.variance == 3
z = a-4
assert z.x == 5-4 and z.variance == 3
z = a*4
assert z.x == 5*4 and z.variance == 3*4**2
z = a/4
assert z.x == 5./4 and z.variance == 3./4**2
# Reverse scalar operations
z = 4+a
assert z.x == 4+5 and z.variance == 3
z = 4-a
assert z.x == 4-5 and z.variance == 3
z = 4*a
assert z.x == 4*5 and z.variance == 3*4**2
z = 4/a
assert z.x == 4./5 and abs(z.variance - 3./5**4 * 4**2) < 1e-15
# Power operations
z = a**2
assert z.x == 5**2 and z.variance == 4*3*5**2
z = a**1
assert z.x == 5**1 and z.variance == 3
z = a**0
assert z.x == 5**0 and z.variance == 0
z = a**-1
assert z.x == 5**-1 and abs(z.variance - 3./5**4) < 1e-15
# Binary operations
z = a+b
assert z.x == 5+4 and z.variance == 3+2
z = a-b
assert z.x == 5-4 and z.variance == 3+2
z = a*b
assert z.x == 5*4 and z.variance == (5**2*2 + 4**2*3)
z = a/b
assert z.x == 5./4 and abs(z.variance - (3./5**2 + 2./4**2)*(5./4)**2) < 1e-15
# ===== Inplace operations =====
# Scalar operations
y = a+0; y += 4
z = a+4
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y -= 4
z = a-4
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y *= 4
z = a*4
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y /= 4
z = a/4
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
# Power operations
y = a+0; y **= 4
z = a**4
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
# Binary operations
y = a+0; y += b
z = a+b
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y -= b
z = a-b
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y *= b
z = a*b
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
y = a+0; y /= b
z = a/b
assert y.x == z.x and abs(y.variance-z.variance) < 1e-15
# =============== vector operations ================
# Slicing
z = Uncertainty(np.array([1,2,3,4,5]),np.array([2,1,2,3,2]))
assert z[2].x == 3 and z[2].variance == 2
assert (z[2:4].x == [3,4]).all()
assert (z[2:4].variance == [2,3]).all()
z[2:4] = Uncertainty(np.array([8,7]),np.array([4,5]))
assert z[2].x == 8 and z[2].variance == 4
A = Uncertainty(np.array([a.x]*2),np.array([a.variance]*2))
B = Uncertainty(np.array([b.x]*2),np.array([b.variance]*2))
# TODO complete tests of copy and inplace operations for vectors and slices.
# Binary operations
z = A+B
assert (z.x == 5+4).all() and (z.variance == 3+2).all()
z = A-B
assert (z.x == 5-4).all() and (z.variance == 3+2).all()
z = A*B
assert (z.x == 5*4).all() and (z.variance == (5**2*2 + 4**2*3)).all()
z = A/B
assert (z.x == 5./4).all()
assert (abs(z.variance - (3./5**2 + 2./4**2)*(5./4)**2) < 1e-15).all()
# printing; note that sqrt(3) ~ 1.7
assert str(Uncertainty(5,3)) == "5.0(17)"
assert str(Uncertainty(15,3)) == "15.0(17)"
assert str(Uncertainty(151.23356,0.324185**2)) == "151.23(32)"
if __name__ == "__main__": test()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
from keystone.auth import controllers as auth_controllers
from keystone.common import dependency
from keystone.common import serializer
from keystone import config
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.tests import mapping_fixtures
from keystone.tests import test_v3
CONF = config.CONF
LOG = log.getLogger(__name__)
def dummy_validator(*args, **kwargs):
pass
@dependency.requires('federation_api')
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on its id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity."""
keys_to_check = self.idp_keys
body = self._http_idp_input()
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'description': uuid.uuid4().hex, 'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(len(entities), 1)
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
class MappingRuleEngineTests(FederationTests):
"""A class for testing the mapping rule engine."""
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
The will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
name = values.get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(name, full_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to eviltester@example.org.
RuleProcessor should raise exception.Unauthorized exception.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
def test_rule_engine_regex_many_groups(self):
"""Should return group CONTRACTOR_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_TESTER_REGEX. This will test the case where many groups
are in the assertion, and a regex value is used to try and find
a match.
"""
mapping = mapping_fixtures.MAPPING_TESTER_REGEX
assertion = mapping_fixtures.TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertEqual(group_ids, [])
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. Since no rules are
matched expect RuleProcessor to raise exception.Unauthorized
exception.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
class FederatedTokenTests(FederationTests):
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def load_fixtures(self, fixtures):
super(FederationTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _assertSerializeToXML(self, json_body):
"""Serialize JSON body to XML.
Serialize JSON body to XML, then deserialize to JSON
again. Expect both JSON dictionaries to be equal.
"""
xml_body = serializer.to_xml(json_body)
json_deserialized = serializer.from_xml(xml_body)
self.assertDictEqual(json_deserialized, json_body)
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
raise AssertionError("You must specify either"
"project or domain.")
self.assertIn('OS-FEDERATION', token['user'])
os_federation = token['user']['OS-FEDERATION']
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _issue_unscoped_token(self,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
context = {'environment': environment or {}}
self._inject_assertion(context, assertion)
r = api.federated_authentication(context, self.IDP, self.PROTOCOL)
return r
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
# make sure that REMOTE_USER set as the empty string won't interfere
r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_serialize_to_xml(self):
"""Issue unscoped token and serialize to XML.
Make sure common.serializer doesn't complain about
the response structure and tag names.
"""
r = self._issue_unscoped_token()
token_resp = r.json_body
# Remove 'extras' if empty or None,
# as JSON and XML (de)serializers treat
# them differently, making dictionaries
# comparisons fail.
if not token_resp['token'].get('extras'):
token_resp['token'].pop('extras')
self._assertSerializeToXML(token_resp)
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(xrange(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once(self):
r = self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.proj_employees['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=401)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.v3_authenticate_token(body)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=404)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.v3_authenticate_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, self.domainA['id'])
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.v3_authenticate_token(body)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, domain_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_list_projects(self):
url = '/OS-FEDERATION/projects'
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
projects_refs = (set([self.proj_customers['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id']]))
for token, projects_ref in zip(token, projects_refs):
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects, projects_ref)
def test_list_domains(self):
url = '/OS-FEDERATION/domains'
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains, domains_ref)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to a one of available projects
"""
r = self._issue_unscoped_token()
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/OS-FEDERATION/projects',
token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_authenticate_token(v3_scope_request)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project['id'])
self._check_scoped_token_attributes(token_resp)
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = self.new_group_ref(
domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
role = self.new_role_ref()
self.assignment_api.create_role(role['id'],
role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.v3_authenticate_token(scoped_token, expected_status=500)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non defailt value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.assignment_api.create_domain(self.domainC['id'],
self.domainC)
# Create and add projects
self.proj_employees = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project_all['id'],
self.project_all)
# Create and add groups
self.group_employees = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = self.new_role_ref()
self.assignment_api.create_role(self.role_employee['id'],
self.role_employee)
self.role_customer = self.new_role_ref()
self.assignment_api.create_role(self.role_customer['id'],
self.role_customer)
self.role_admin = self.new_role_ref()
self.assignment_api.create_role(self.role_admin['id'],
self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
'any_one_of': [
'testacct@example.com'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
def _inject_assertion(self, context, variant):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = []
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:22:31 2016
@author: Tobias Jachowski
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
from .. import gui
from .. import helpers as hp
from .. import traces as tc
from ..evaluate import signal as sn
from ..graph import GraphMember
from ..picklable import InteractiveAttributes
class GraphicalMod(object):
"""
This class's subclasses should implement `_figure()` and `_update_fig()`,
which return and update a matplotlib figure, respectively. The figure can
be accessed by `self.figure`.
Parameters
----------
figure
modification : Modification
"""
def __init__(self, modification=None, **kwargs):
# Register the modification which should be graphically adjusted
self.modification = modification
# Initialize figure to None, which effectively disables
# `self.update_fig()` and Co. and prevent them from throwing an error
self._fig = None
def _set_plot_params(self, plot_params=None):
if plot_params is None:
plot_params = {}
gui.set_plot_params(plot_params=plot_params)
def display(self, plot_params=None):
self.init_fig(plot_params=plot_params)
def init_fig(self, show=True, plot_params=None):
"""
This method calls self._figure() to create an interactive figure and
interact with the user to determine the parameters necessary to
calculate the modification (see self._recalculate()). and
self._close_fig() to release all references to the actors of the
figure.
`self._figure()` and self._close_fig() should be (over)written by
subclasses.
"""
# Only create a figure, if the function `self._figure()` is implemented
if not hasattr(self, '_figure'):
return
# close the figure
# nbagg backend needs to have the figure closed and recreated
# whenever the code of the cell displaying the figure is executed.
# A simple update of the figure would let it disappear. Even a
# self.figure.show() wouldn't work anymore.
# For backends this just means a bit of extra calculation.
# Therefore, close the figure first before replotting it.
self.close_fig()
# set default plot parameters, can be recalled / overwritten in
# `self._figure()`
self._set_plot_params(plot_params=plot_params)
# create the figure
self.figure = self._figure()
# update the figure
self.update_fig()
# show the figure
if show:
self.figure.show()
def update(self, **kwargs):
self.update_fig(**kwargs)
def update_fig(self, **kwargs):
if self._fig is not None:
self._update_fig(**kwargs)
self._figure_canvas_draw()
def _update_fig(self, **kwargs):
pass
def close_fig(self):
if self._fig is not None:
self._pre_close_fig()
self._close_fig()
self._post_close_fig()
def _pre_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _close_fig(self):
# force redraw of the figure
self._figure_canvas_draw()
# close the figure
plt.close(self.figure)
# release memory
self.figure = None
def _post_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _figure_canvas_draw(self):
# Some matplotlib backends will throw an error when trying to draw the
# canvas. Simply ignoring the error that could happen here will prevent
# the figure from not beeing closed, left open, and preventing the next
# figure to be drawn. Even though the "except: pass" clause is
# considered bad, here the worst thing that could happen is that the
# figure produced by the matplotlib backend upon closing is not
# updated. Therefore, "except: pass" should be considered as an
# acceptable workaround for this case.
try:
# redraw the figure, before closing it
self.figure.canvas.draw()
except:
pass
@property
def figure(self):
"""
The matplotlib figure that represents and/or adjusts the parameters of
`self.modification`.
"""
# Automatically initialize a figure
if self._fig is None:
self.init_fig(show=False)
# Return a previously initialized figure
return self._fig
@figure.setter
def figure(self, figure):
self._fig = figure
class Modification(GraphMember, metaclass=ABCMeta):
"""
Modification is an abstract class, that implements methods to modify the
data of a `View` (`view_apply`) and adjust the parameters which control the
behaviour of the modifications applied.
Whenever one of the parameters needed to calculate the modification is
changed, the view, this modification is applied to, is informed.
`self.set_changed()` Has to be called upon any change of the modification
that influences the behaviour of `self.modify()`. In essence, these are all
parameters that are used to determine the modification. Therefore, this
should be called by all setters of the parameters/attributes.
Every subclass of Modification has to implement a constructor method
`self.__init__(self, **kwargs)`, which calls the superclasses' constructor
and sets the traces, the modification is applied to with the keyword
parameter `traces_apply`. An example could be:
super().__init__(traces_apply=['psdX', 'psdZ'], **kwargs)
"""
# set a graphical modification, which will, per default, do nothing
GRAPHICALMOD = GraphicalMod
def __init__(self, traces_apply=None, view_apply=None, view_based=None,
automatic_switch=False, datapoints=-1, **kwargs):
# Call the constructor of the superclass `GraphMember` and set the
# maximum allowed number of parents (`view_based`) and childs
# (`view_apply`) to one.
super().__init__(max_children=1, max_parents=1, **kwargs)
# A `Modification` has to be applied to a `View`!
if view_apply is None:
raise TypeError("Modification missing required positional argument"
" `view_apply`.")
# Set the view, from where the parameters for the modification are
# calculated from
if view_based is not None:
self.view_based = view_based
# Set the view, whose data is going to be modified
self.view_apply = view_apply
# Set the traces, which are modified by this `Modification`
self.traces_apply = traces_apply
# Initialize InteractiveAttributes object, which will hold all the
# parameters that the user should interact with.
self.iattributes = InteractiveAttributes()
# A checkbox to switch on/off the automatic determination of the
# parameters that are used to calculate the modification in the method
# `self.recalculate()`. The attribute `self.automatic` is checked in
# the method `self.recalculate()`. If `automatic` is True, the
# parameters are recalculated, otherwise the parameters are left
# unchanged. Whenever `automatic` is changed (by the user or
# automatically), `self.evaluate()` is called.
if automatic_switch:
self.add_iattribute('automatic', description='Automatic mode',
value=True, unset_automatic=False,
set_changed=False,
callback_functions=[self.evaluate])
# A checkbox to de-/activate this `Modification`. This attribute gets
# evaluated by `self.modify()`. If the `Modification` is active, it
# modifies data, otherwise not, i.e. modify() returns modified or
# unmodified original data, respectively.
desc = "".join((self.__class__.__name__, " active"))
self.add_iattribute('active', description=desc, value=True,
unset_automatic=False)
# Datapoints is used to calculate and/or present modification. The
# attribute `datapoints` is used to calculate a decimating factor and
# speed up the calculations and/or plot commands.
if datapoints > 0:
desc = "Datapoints to calculate/visualize modification"
self.add_iattribute('datapoints', description=desc,
value=datapoints, unset_automatic=False)
# Add a Button to manually call the method `self.evaluate()`.
self.add_iattribute('evaluate', description='Evaluate',
unset_automatic=False, set_changed=False,
callback_functions=[self.evaluate])
def add_iattribute(self, key, description=None, value=None,
unset_automatic=True, set_changed=True,
callback_functions=None, **kwargs):
"""
Add logic for automatic checkbox.
Register widget with unset_automatic=True
(-> Upon change of widget, unset automatic mode).
Change default behaviour by setting kwarg: unset_automatic = False
Add logic for triggering changed (calling self.set_changed).
Register widget with set_changed=True.
"""
if callback_functions is None:
callback_functions = []
if unset_automatic:
callback_functions.append(self._unset_automatic)
if set_changed:
callback_functions.append(self.set_changed)
self.iattributes.add(key, description=description, value=value,
callback_functions=callback_functions, **kwargs)
def _unset_automatic(self, leave_automatic=False, **kwargs):
"""
Add the logic for the automatic checkbox. If the value of an attribute
is changed and the attribute was created with `unset_automatic=True`,
deactivate the automatic mode (see `self.add_iattribute()`). To
temporarily leave the automatic mode status untouched when changing the
value of an attribute, i.e. not unset the automatic mode, set the value
of the attribute with the keyword argument `leave_automatic=True`
(see method `self.iattributes.set_value()`)
"""
if not leave_automatic:
self.iattributes.set_value('automatic', False, callback=False)
def evaluate(self):
"""
Implement the (re)calculation for the values necessary to calculate the
modification in the subclass and call recalculate() of the superclass
(this class).
"""
if self.updated:
# This method makes sure the modification is calculated with the
# current values of the View this modification is based on. It is
# called by self.modify().
# When a View requests data, it calls modify(), which in turn calls
# recalculate(). Recalculate(), if necessary, calls
# get_data_modified() from the View it is based on, which again
# triggers a call of modify() and a subsequent recalcaulte() of all
# modifications associated with this View.
# Modification need update, because view, this mod is based on,
# was changed.
# self._view_based.evaluate()is not needed, it is called via:
# recalculate() -> get_data_based() -> _view_based.get_data() ->
# get_modified_data() -> super().evaluate()
return
# Recalculate and print info of recalculated values if in automatic
# mode
if self.recalculate():
self.print_info()
# Update figure after recalculation has taken place
self.graphicalmod.update()
def recalculate(self):
# Check if recalculation of parameters is necessary
if self.updated:
return False
# Check the attribute self.automatic, whether the parameters needed for
# the calculation of the modification should be determined
# automatically or not. If values are set manually, no recalculation is
# necessary, and `self` is therefore up to date.
if not self.automatic:
self.updated = True
return True
# Recalculate the parameters, inform the view this `Modification`
# is applied to about the change, and set `self` to be updated.
self._recalculate()
self.set_changed(updated=True)
return True
def _recalculate(self):
"""
This method should be overwritten by subclasses and perform the
recalculation necessary to determine the parameters used by this
Modification to modify the data in `self._modify()`.
"""
pass
def print_info(self):
print("Values for Modification of class %s:"
% self.__class__.__name__)
if not self.automatic:
print(" Parameters set manually!")
for key, widget in self.iattributes._widgets.items():
if hasattr(widget, 'value'):
if isinstance(widget.value, float):
print(" %s: %.5f" % (widget.description, widget.value))
if isinstance(widget.value, collections.Iterable):
print(" %s: %s" % (widget.description, widget.value))
self._print_info()
def _print_info(self):
"""
This method should be overwritten by subclasses, which want to print
extra info additionally to the info of the calculated paremeters.
"""
pass
def modify(self, data, samples, traces_idx):
"""
Modifies data and returns the modified array.
Parameters
----------
data : 2D numpy.ndarray of type float
`data` holds the data to be modified
samples : index array or slice
`samples` is the index of the samples that was used to get the
`data`
traces : index array or slice
`traces` is the index of the traces that was used to get the `data`
"""
# Modification is active.
if self.active:
# Check if traces contained in data are modified by this
# modification.
data_traces = self.view_apply.idx_to_traces(traces_idx)
mod_traces = self.traces_apply
# Calculate the indices of traces contained in data and
# modification. First, calculate indices of modification traces.
mod_index = hp.overlap_index(mod_traces, data_traces)
if len(mod_index) > 0:
# At least one trace exists in both data and modification.
# Therefore, the data needs to be modified...
mod_index = hp.slicify(mod_index)
# Calculate indices of traces of the data in such a way that
# `data[:, data_index]` indexes the same traces as
# `self.traces_apply[mod_index]`
data_index = np.array([data_traces.index(trace)
for trace
in np.array(mod_traces)[mod_index]])
data_index = hp.slicify(data_index)
# Trigger a recalculation of the parameters for the
# modification (if necessary) before modifying the data.
self.evaluate()
# Modify and return the modified data
return self._modify(data=data,
samples=samples,
data_traces=data_traces,
data_index=data_index,
mod_index=mod_index)
# Return unmodified data
return data
@abstractmethod
def _modify(self, data, samples, data_traces, data_index, mod_index):
"""
Is called by self.modify() whenever data is requested and needs to be
modified.
Parameters
----------
data : 2D numpy.array()
Contains the data, indexed by samples and data_traces
samples : slice or 1D numpy.array()
Is the index of the samples contained in data, which was
given/asked by the user/process who called _get_data().
data_traces : list of str
Contains a list of traces (str) existent in data, which
was given/asked by the user/process who called _get_data().
data_index : slice or 1D numpy.array()
data[:, data_index] gives the data, which is modified by
this modification
mod_index : slice or 1D numpy.array()
np.array(self.traces_apply)[mod_index] gives the traces,
which are existent in data and also modified by this modfication.
Returns
-------
2D numpy.array()
The modified data.
"""
# modify data here, like so:
# data[:,data_index] -= modification[:,mod_index]
return data
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, value):
"""
Gets set to True, after all `Views`, this `Modification` is based on,
have been updated and after this `Modification` has been recalculated.
This is automatically taken care of by `self.evaluate()` ->
`self.recalculate()`.
Gets called by a `View`, this `Modification` is based on, whenever the
`View` (a `Modification` of the `View`) has been changed. It
automatically informs its own `View`, that there was a change, by
calling `self.set_changed()`.
"""
self._updated = value
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, the modification needs to recalculate itself, i.e.
# the modification will alter its changeing behaviour. Because an
# index_shift change is only transmitted to `level=1`, inform the
# descendants of the change itself. A change of descendants is ignored.
if index_shift is not None and not calledfromself and ancestor:
self.set_changed(includeself=False)
# Update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _get_data(self, based=True, samples=None, traces=None, window=False,
decimate=False, copy=True):
if based:
view = self.view_based
else:
view = self.view_apply
if not isinstance(window, bool) and isinstance(window, int):
window = window
elif window:
window = self.decimate
else:
window = 1
if not isinstance(decimate, bool) and isinstance(decimate, int):
decimate = decimate
elif decimate:
decimate = self.decimate
else:
decimate = 1
if not based:
old_active = self.iattributes.active
self.iattributes.set_value('active', False, callback=False)
data = view.get_data(traces=traces, samples=samples,
moving_filter='mean', window=window,
decimate=decimate, copy=copy)
if not based:
self.iattributes.set_value('active', old_active, callback=False)
return data
def _get_data_based(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=True, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def _get_data_apply(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
Get data of view apply with all modifications applied, except self.
This is achieved by setting the self.__active flag to False.
self.__active is intentionally set directly by accessing the attribute
and not using the property/set_active() method, to prevent firing the
self.set_changed() method within the set_active() method.
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=False, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def calculate_bin_means(self, data=None, traces=None, bins=None,
datapoints_per_bin=None, sorttrace=0):
"""
Calculates binned means based on the data to be fitted. The binned
means are usually used by data fitting routines.
Parameters
----------
data : 2D numpy.ndarray of type float, optional
Defaults to `self._get_data_based(traces=traces, decimate=True)`.
traces : str or list of str, optional
Defaults to `self.traces_apply`.
bins : int, optional
Number of bins that contain the datapoints to be averaged. If
possible, it defaults to (`self.iattributes.datapoints` /
`datapoints_per_bin`), otherwise bins defaults to
(`self.view_based.datapoints` / `datapoints_per_bin`).
datapoints_per_bin : int, optional
Average number of datapoints to be averaged in one bin. Defaults to
25.
sorttrace : int, optional
Trace (column) of `data` that acts as sorting index upon binning
for the rest of the data. Defaults to the first trace of the data.
Returns
-------
1D numpy.ndarray of type float
The averaged bin values.
float
The size of one bin.
"""
# Bin data and average bins to prevent arbitrary weighting of bins with
# more datapoints
if bins is None:
bins = self._bins(datapoints_per_bin=datapoints_per_bin)
# get the traces to retrieve data from
if traces is None:
traces = self.traces_apply
# get the data to bin
if data is None:
data = self._get_data_based(traces=traces, decimate=True)
# create the bins based on one trace of the data
minimum = np.min(data[:, sorttrace])
maximum = np.max(data[:, sorttrace])
edges = np.linspace(minimum, maximum, bins + 1)
# Get the indices of the bins to which each value in input array
# belongs.
bin_idx = np.digitize(data[:, sorttrace], edges)
# Find which points are on the rightmost edge.
on_edge = data[:, sorttrace] == edges[-1]
# Shift these points one bin to the left.
bin_idx[on_edge] -= 1
# fill the bins with the means of the data contained in each bin
bin_means = np.array([data[bin_idx == i].mean(axis=0)
for i in range(1, bins + 1)
if np.any(bin_idx == i)])
bin_width = edges[1] - edges[0]
return bin_means, bin_width
def _bins(self, datapoints_per_bin=None):
# On average 25 datapoints per bin
datapoints_per_bin = datapoints_per_bin or 25
if 'datapoints' in self.iattributes:
bins = self.iattributes.datapoints / datapoints_per_bin
else:
bins = self.view_based.datapoints / datapoints_per_bin
bins = max(1, int(np.round(bins)))
return bins
_NAME = {
'position': ['positionX', 'positionY'],
'psd': ['psdX', 'psdY'],
'axis': ['X', 'Y']
}
def _excited(self, traces=None):
traces = traces or ['positionX', 'positionY']
data = self._get_data_based(traces=traces, copy=False)
return sn.get_excited_signal(data)
def interact(self):
self.recalculate()
self.iattributes.display()
self.graphicalmod.display()
@property
def graphicalmod(self):
# ZODB volatile
if not hasattr(self, '_v_graphicalmod'):
self._v_graphicalmod \
= self.__class__.GRAPHICALMOD(modification=self)
return self._v_graphicalmod
@property
def active(self):
active = False
if 'active' in self.iattributes:
active = self.iattributes.active
return active
@active.setter
def active(self, active=True):
if 'active' in self.iattributes:
self.iattributes.active = active
@property
def automatic(self):
# Does the modification automatically calculate its parameters
automatic = True
if 'automatic' in self.iattributes:
automatic = self.iattributes.automatic
return automatic
@property
def datapoints(self):
if 'datapoints' in self.iattributes:
return self.iattributes.datapoints
else:
return self.view_based.datapoints
@property
def decimate(self):
if 'datapoints' in self.iattributes:
return max(1, int(np.round(self.view_based.datapoints
/ self.datapoints)))
else:
return 1
@property
def view_based(self):
return self.parent
@property
def view_apply(self):
return self.child
@view_based.setter
def view_based(self, view):
self.set_parent(view)
@view_apply.setter
def view_apply(self, view):
self.set_child(view)
def lia(self, trace):
"""
Return the local index of trace in traces_apply
"""
return self.traces_apply.index(trace)
@property
def traces_apply(self):
# return a copy to protect local copy
return self._traces_apply.copy()
@traces_apply.setter
def traces_apply(self, traces):
if traces is None:
traces_apply = []
else:
traces_apply = tc.normalize(traces)
self._traces_apply = traces_apply
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: person.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='person.proto',
package='tutorial',
syntax='proto2',
serialized_pb=_b('\n\x0cperson.proto\x12\x08tutorial\"\xdb\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\n\n\x02id\x18\x02 \x02(\x05\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12,\n\x06phones\x18\x04 \x03(\x0b\x32\x1c.tutorial.Person.PhoneNumber\x1aM\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x02(\t\x12.\n\x04type\x18\x02 \x01(\x0e\x32\x1a.tutorial.Person.PhoneType:\x04HOME\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"/\n\x0b\x41\x64\x64ressBook\x12 \n\x06people\x18\x01 \x03(\x0b\x32\x10.tutorial.Person')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
name='PhoneType',
full_name='tutorial.Person.PhoneType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MOBILE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HOME', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORK', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=203,
serialized_end=246,
)
_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE)
_PERSON_PHONENUMBER = _descriptor.Descriptor(
name='PhoneNumber',
full_name='tutorial.Person.PhoneNumber',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='tutorial.Person.PhoneNumber.number', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='tutorial.Person.PhoneNumber.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=201,
)
_PERSON = _descriptor.Descriptor(
name='Person',
full_name='tutorial.Person',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tutorial.Person.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='tutorial.Person.id', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='email', full_name='tutorial.Person.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phones', full_name='tutorial.Person.phones', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PERSON_PHONENUMBER, ],
enum_types=[
_PERSON_PHONETYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=246,
)
_ADDRESSBOOK = _descriptor.Descriptor(
name='AddressBook',
full_name='tutorial.AddressBook',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='people', full_name='tutorial.AddressBook.people', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=248,
serialized_end=295,
)
_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
_PERSON_PHONENUMBER.containing_type = _PERSON
_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER
_PERSON_PHONETYPE.containing_type = _PERSON
_ADDRESSBOOK.fields_by_name['people'].message_type = _PERSON
DESCRIPTOR.message_types_by_name['Person'] = _PERSON
DESCRIPTOR.message_types_by_name['AddressBook'] = _ADDRESSBOOK
Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict(
DESCRIPTOR = _PERSON_PHONENUMBER,
__module__ = 'person_pb2'
# @@protoc_insertion_point(class_scope:tutorial.Person.PhoneNumber)
))
,
DESCRIPTOR = _PERSON,
__module__ = 'person_pb2'
# @@protoc_insertion_point(class_scope:tutorial.Person)
))
_sym_db.RegisterMessage(Person)
_sym_db.RegisterMessage(Person.PhoneNumber)
AddressBook = _reflection.GeneratedProtocolMessageType('AddressBook', (_message.Message,), dict(
DESCRIPTOR = _ADDRESSBOOK,
__module__ = 'person_pb2'
# @@protoc_insertion_point(class_scope:tutorial.AddressBook)
))
_sym_db.RegisterMessage(AddressBook)
# @@protoc_insertion_point(module_scope)
| |
"""Tests for the Google Assistant integration."""
DEMO_DEVICES = [{
'id':
'light.kitchen_lights',
'name': {
'name': 'Kitchen Lights'
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSetting',
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id':
'switch.ac',
'name': {
'name': 'AC'
},
'traits': [
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'switch.decorative_lights',
'name': {
'name': 'Decorative Lights'
},
'traits': [
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'light.ceiling_lights',
'name': {
'name': 'Roof Lights',
'nicknames': ['top lights', 'ceiling lights']
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSetting',
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id':
'light.bed_light',
'name': {
'name': 'Bed Light'
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSetting',
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id': 'group.all_lights',
'name': {
'name': 'all lights'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'group.all_switches',
'name': {
'name': 'all switches'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'cover.living_room_window',
'name': {
'name': 'Living Room Window'
},
'traits':
['action.devices.traits.OpenClose'],
'type':
'action.devices.types.BLINDS',
'willReportState':
False
}, {
'id':
'cover.hall_window',
'name': {
'name': 'Hall Window'
},
'traits':
['action.devices.traits.OpenClose'],
'type':
'action.devices.types.BLINDS',
'willReportState':
False
}, {
'id': 'cover.garage_door',
'name': {
'name': 'Garage Door'
},
'traits': ['action.devices.traits.OpenClose'],
'type':
'action.devices.types.GARAGE',
'willReportState': False
}, {
'id': 'cover.kitchen_window',
'name': {
'name': 'Kitchen Window'
},
'traits': ['action.devices.traits.OpenClose'],
'type':
'action.devices.types.BLINDS',
'willReportState': False
}, {
'id': 'group.all_covers',
'name': {
'name': 'all covers'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'media_player.bedroom',
'name': {
'name': 'Bedroom'
},
'traits':
[
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.Modes'
],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'media_player.living_room',
'name': {
'name': 'Living Room'
},
'traits':
[
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.Modes'
],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id': 'media_player.lounge_room',
'name': {
'name': 'Lounge room'
},
'traits': ['action.devices.traits.OnOff', 'action.devices.traits.Modes'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'media_player.walkman',
'name': {
'name': 'Walkman'
},
'traits':
['action.devices.traits.OnOff', 'action.devices.traits.Brightness'],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id': 'fan.living_room_fan',
'name': {
'name': 'Living Room Fan'
},
'traits': [
'action.devices.traits.FanSpeed',
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.FAN',
'willReportState': False
}, {
'id': 'fan.ceiling_fan',
'name': {
'name': 'Ceiling Fan'
},
'traits': [
'action.devices.traits.FanSpeed',
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.FAN',
'willReportState': False
}, {
'id': 'group.all_fans',
'name': {
'name': 'all fans'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'climate.hvac',
'name': {
'name': 'Hvac'
},
'traits': ['action.devices.traits.TemperatureSetting'],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False,
'attributes': {
'availableThermostatModes': 'heat,cool,heatcool,off',
'thermostatTemperatureUnit': 'C',
},
}, {
'id': 'climate.heatpump',
'name': {
'name': 'HeatPump'
},
'traits': ['action.devices.traits.TemperatureSetting'],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False
}, {
'id': 'climate.ecobee',
'name': {
'name': 'Ecobee'
},
'traits': ['action.devices.traits.TemperatureSetting'],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False
}, {
'id': 'lock.front_door',
'name': {
'name': 'Front Door'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}, {
'id': 'lock.kitchen_door',
'name': {
'name': 'Kitchen Door'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}, {
'id': 'lock.openable_lock',
'name': {
'name': 'Openable Lock'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}]
| |
"""
Project Name: Twitter Tagcloud
Author: Alexandru Buliga
Email: bugaaa92@gmail.com
"""
import sys
import re
import logging
import json
from threading import currentThread, enumerate, Lock, Thread
from collections import Counter, OrderedDict
from datetime import datetime
import tweepy
import resource
class TweetRetriever:
"""
Retrieves tweets using the Tweeter API provided by tweepy
Performs authentication with OAuth protocol
"""
def __init__(self, creds, stopwords):
"""
Constructor method
@param creds: dictionary containins authentication tokens
@param stopwords: set of words that are not taken into account
"""
self.stopwords = stopwords
self.creds = creds
# Result per page constant defined here
self.RESULTS_PER_PAGE = 100
# OAuth Authentication
self.auth = tweepy.OAuthHandler(
creds['consumer_key'], creds['consumer_secret'])
self.auth.secure = True
self.auth.set_access_token(
creds['access_token'], creds['access_token_secret'])
# Setting the Teepy API
self.api = tweepy.API(self.auth)
# Used to guarantee atomic access to the global counter
self.lock = Lock()
# Setting global word counter
self.globalWordCounter = Counter()
def doWork(self, tweetList):
"""
Function associated with worker thread; gets all the words and its
occurances in the tweetList and updated the global counter
@param tweetList: a list of tweets for the worker thread
"""
# Get the list of words
wordList = []
cleanWordList = []
for tweetText in tweetList:
wordList.extend(re.findall(r"[\w']+", tweetText.lower()))
# Convert the strings to ascii by uncommenting the line after next
for word in wordList:
# word = word.encode('ascii', 'ignore')
if word not in self.stopwords:
cleanWordList.append(word)
# Update the global counter with the local one
with self.lock:
self.globalWordCounter.update(Counter(cleanWordList))
def run(self, durationInterval, wordCount):
"""
Tweets retrieval method
@param durationInterval: the duration of the data fetch process
@param wordCount [optional]: how many results to show
"""
counter = 0
startTime = None
tweetList = []
if durationInterval <= 0:
return
# Get tweepy cursor
cursor = tweepy.Cursor(self.api.search,
q = "a",
count = self.RESULTS_PER_PAGE,
result_type = "recent",
lang = "en").items()
# Iterate all tweets in the past durationInterval seconds using Cursor
while True:
try:
tweet = cursor.next()
except tweepy.TweepError:
print "Error. Exceeded Twitter request limit.", \
"Try again in 15 minutes."
break
# Store info about the tweet
postTime = tweet.created_at
tweetList.append(tweet.text)
if startTime:
# Check if durationInterval has passed and we have to stop
if abs((postTime - startTime).total_seconds()) > durationInterval:
# Start last worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
break
else:
# Mark the current time of the first retrieved tweet and count
# durationInterval seconds starting from here
startTime = postTime
counter += 1
if counter == self.RESULTS_PER_PAGE:
# Start worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
counter = 0
tweetList = []
# Wait threads to finish their work
main_thread = currentThread()
for thread in enumerate():
if thread is main_thread:
continue
thread.join()
if (wordCount >= 0):
# Count how many other words there are
otherWordCounter = self.globalWordCounter.most_common()[wordCount::]
otherCount = sum(count for _, count in otherWordCounter)
# Update the global counter with the special word, other
self.globalWordCounter = self.globalWordCounter.most_common(wordCount)
self.globalWordCounter.append(('other', otherCount))
else:
self.globalWordCounter = self.globalWordCounter.most_common()
# Write results to a local JSON file
self.writeResult()
def writeResult(self):
"""
Write results to a local JSON file
"""
wcList = []
# Convert list elements to dictionary for pretty printing
for elem in self.globalWordCounter:
wcList.append(OrderedDict([('word', elem[0]), ('count', elem[1])]))
with open('results.json', 'w') as out_file:
json.dump(wcList, out_file, indent = 4, separators = (',', ': '))
def main():
"""
Main function definition
"""
# Disabling some ugly warnings
logging.captureWarnings(True)
# Verifying if the command-line arguments are passed
if len(sys.argv) < 2:
print "Error. Run: python tagcloud.py <duration> [<wordCount>]"
sys.exit()
# Getting the duration of the data fetch process
durationInterval = sys.argv[1]
wordCount = -1
try:
durationInterval = int(durationInterval)
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# If the word count argument is passed, get it
if len(sys.argv) == 3:
try:
wordCount = int(sys.argv[2])
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# Start retrieving tweets
tweetRetriever = TweetRetriever(resource.creds, resource.stopwords)
tweetRetriever.run(durationInterval, wordCount)
"""
Start main
"""
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
DU task for BAR documents - see https://read02.uibk.ac.at/wiki/index.php/Document_Understanding_BAR
Here we convert the human annotation into 2 kinds of annotations:
- a semantic one: header, heading, page-number, resolution-marginalia, resolution-number, resolution-paragraph (we ignore Marginalia because only 2 occurences)
- a segmentation one: 2 complementary labels. We call them Heigh Ho. Could have been Yin Yang as well...
- also, we store the resolution number in @DU_num
These new annotations are stored in @DU_sem , @DU_sgm , @DU_num
Copyright Naver Labs(C) 2017 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os, re
from lxml import etree
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from xml_formats.PageXml import PageXml, MultiPageXml, PageXmlException
from crf.Graph_MultiPageXml import Graph_MultiPageXml
from util.Polygon import Polygon
class DU_BAR_Convert:
"""
Here we convert the human annotation into 2 kinds of annotations:
- a semantic one: header, heading, page-number, resolution-marginalia, resolution-number, resolution-paragraph (we ignore Marginalia because only 2 occurences)
- a segmentation one: 2 complementary labels. We call them Heigh Ho. Could have been Yin Yang as well...
These new annotations are store in @DU_sem and @DU_sgm
"""
sXml_HumanAnnotation_Extension = ".mpxml"
sXml_MachineAnnotation_Extension = ".du_mpxml"
sMetadata_Creator = "TranskribusDU/usecases/BAR/DU_ConvertGTAnnotation.py"
sMetadata_Comments = "Converted human annotation into semantic and segmentation annotation. See attributes @DU_sem and @DU_sgm."
dNS = {"pc":PageXml.NS_PAGE_XML}
sxpNode = ".//pc:TextRegion"
#Name of attributes for semantic / segmentation /resolution number
sSemAttr = "DU_sem"
sSgmAttr = "DU_sgm"
sNumAttr = "DU_num"
sOther = "other"
#Mapping to new semantic annotation
dAnnotMapping = {"header" :"header",
"heading" :"heading",
"page-number" :"page-number",
"marginalia" : sOther,
"p" :"resolution-paragraph",
"m" :"resolution-marginalia",
"" :"resolution-number",
None : sOther #for strange things
}
creResolutionHumanLabel = re.compile("([mp]?)([0-9]+.?)") #e.g. p1 m23 456 456a
#The two complementary segmentation labels
sSegmHeigh = "heigh"
sSegmHo = "ho"
#=== CONFIGURATION ====================================================================
def __init__(self):
pass
def convertDoc(self, sFilename):
assert sFilename.endswith(self.sXml_HumanAnnotation_Extension)
g = Graph_MultiPageXml()
doc = etree.parse(sFilename, encoding='utf-8')
#the Heigh/Ho annotation runs over consecutive pages, so we keep those values accross pages
self._initSegmentationLabel()
self.lSeenResoNum = list()
for pnum, page, domNdPage in g._iter_Page_DocNode(doc):
self._convertPageAnnotation(pnum, page, domNdPage)
MultiPageXml.setMetadata(doc, None, self.sMetadata_Creator, self.sMetadata_Comments)
assert sFilename.endswith(self.sXml_HumanAnnotation_Extension)
sDUFilename = sFilename[:-len(self.sXml_HumanAnnotation_Extension)] + self.sXml_MachineAnnotation_Extension
# doc.save(sDUFilename, encoding='utf-8', pretty_print=True)
doc.write(sDUFilename,
xml_declaration=True,
encoding="utf-8",
pretty_print=True
#compression=0, #0 to 9
)
# doc.saveFormatFileEnc(sDUFilename, "utf-8", True) #True to indent the XML
# doc.freeDoc()
return sDUFilename
# -----------------------------------------------------------------------------------------------------------
def _initSegmentationLabel(self):
self.prevResolutionNumber, self.prevSgmLbl = None, None
def _getNextSegmentationLabel(self, sPrevSegmLabel=None):
"""
alternate beween HEIGH and HO, 1st at random
"""
if sPrevSegmLabel == self.sSegmHeigh: return self.sSegmHo
elif sPrevSegmLabel == self.sSegmHo: return self.sSegmHeigh
else:
assert sPrevSegmLabel == None
return self.sSegmHeigh
def _iter_TextRegionNodeTop2Bottom(self, domNdPage, page):
"""
Get the DOM, the DOM page node, the page object
iterator on the DOM, that returns nodes
"""
assert self.sxpNode, "CONFIG ERROR: need an xpath expression to enumerate elements corresponding to graph nodes"
lNdBlock = domNdPage.xpath(self.sxpNode, namespaces=self.dNS)
#order blocks from top to bottom of page
lOrderedNdBlock = list()
for ndBlock in lNdBlock:
lXY = PageXml.getPointList(ndBlock) #the polygon
if lXY == []:
raise ValueError("Node %x has invalid coordinates" % str(ndBlock))
plg = Polygon(lXY)
_, (xg, yg) = plg.getArea_and_CenterOfMass()
lOrderedNdBlock.append( (yg, ndBlock)) #we want to order from top to bottom, so that TextRegions of different resolution are not interleaved
lOrderedNdBlock.sort()
for _, ndBlock in lOrderedNdBlock: yield ndBlock
return
def _convertPageAnnotation(self, pnum, page, domNdPage):
"""
"""
#change: on each page we start by Heigh
bRestartAtEachPageWithHeigh = True
if bRestartAtEachPageWithHeigh: self._initSegmentationLabel()
for nd in self._iter_TextRegionNodeTop2Bottom(domNdPage, page):
try:
lbl = PageXml.getCustomAttr(nd, "structure", "type")
except PageXmlException:
nd.set(self.sSemAttr, self.sOther)
nd.set(self.sSgmAttr, self.sOther)
continue #this node has no annotation whatsoever
if lbl in ["heading", "header", "page-number", "marginalia"]:
semLabel = lbl
sgmLabel = self.sOther #those elements are not part of a resolution
sResoNum = None
else:
o = self.creResolutionHumanLabel.match(lbl)
if not o: raise ValueError("%s is not a valid human annotation" % lbl)
semLabel = o.group(1) #"" for the resolution number
#now decide on the segmentation label
sResoNum = o.group(2)
if not sResoNum: raise ValueError("%s is not a valid human annotation - missing resolution number" % lbl)
#now switch between heigh and ho !! :))
if self.prevResolutionNumber == sResoNum:
sgmLabel = self.prevSgmLbl
else:
sgmLabel = self._getNextSegmentationLabel(self.prevSgmLbl)
assert bRestartAtEachPageWithHeigh or sResoNum not in self.lSeenResoNum, "ERROR: the ordering of the block has not preserved resolution number contiguity"
self.lSeenResoNum.append(sResoNum)
self.prevResolutionNumber, self.prevSgmLbl = sResoNum, sgmLabel
#always have a semantic label
sNewSemLbl = self.dAnnotMapping[semLabel]
assert sNewSemLbl
nd.set(self.sSemAttr, sNewSemLbl) #DU annotation
#resolution parts also have a segmentation label and a resolution number
assert sgmLabel
nd.set(self.sSgmAttr, sgmLabel) #DU annotation
if sResoNum:
nd.set(self.sNumAttr, sResoNum)
class DU_BAR_Convert_v2(DU_BAR_Convert):
"""
For segmentation labels, we only use 'Heigh' or 'Ho' whatever the semantic label is, so that the task is purely a segmentation task.
Heading indicate the start of a resolution, and is part of it.
Anything else (Header page-number, marginalia) is part of the resolution.
"""
def _initSegmentationLabel(self):
self.prevResolutionNumber = None
self._curSgmLbl = None
def _switchSegmentationLabel(self):
"""
alternate beween HEIGH and HO, 1st is Heigh
"""
if self._curSgmLbl == None:
self._curSgmLbl = self.sSegmHeigh
else:
self._curSgmLbl = self.sSegmHeigh if self._curSgmLbl == self.sSegmHo else self.sSegmHo
return self._curSgmLbl
def _getCurrentSegmentationLabel(self):
"""
self.curSgmLbl or Heigh if not yet set!
"""
if self._curSgmLbl == None: self._curSgmLbl = self.sSegmHeigh
return self._curSgmLbl
def _convertPageAnnotation(self, pnum, page, domNdPage):
"""
"""
for nd in self._iter_TextRegionNodeTop2Bottom(domNdPage, page):
try:
sResoNum = None
lbl = PageXml.getCustomAttr(nd, "structure", "type")
if lbl in ["heading"]:
semLabel = self.dAnnotMapping[lbl]
#heading may indicate a new resolution!
if self.prevResolutionNumber == None:
sgmLabel = self._getCurrentSegmentationLabel() #for instance 2 consecutive headings
else:
sgmLabel = self._switchSegmentationLabel()
self.prevResolutionNumber = None #so that next number does not switch Heigh/Ho label
elif lbl in ["header", "page-number", "marginalia"]:
#continuation of a resolution
semLabel = self.dAnnotMapping[lbl]
sgmLabel = self._getCurrentSegmentationLabel()
else:
o = self.creResolutionHumanLabel.match(lbl)
if not o: raise ValueError("%s is not a valid human annotation" % lbl)
semLabel = self.dAnnotMapping[o.group(1)] #"" for the resolution number
#Here we have a resolution number!
sResoNum = o.group(2)
if not sResoNum: raise ValueError("%s is not a valid human annotation - missing resolution number" % lbl)
#now switch between heigh and ho !! :))
if self.prevResolutionNumber != None and self.prevResolutionNumber != sResoNum:
#we got a new number, so switching segmentation label!
sgmLabel = self._switchSegmentationLabel()
else:
#either same number or switching already done due to a heading
sgmLabel = self._getCurrentSegmentationLabel()
self.prevResolutionNumber = sResoNum
except PageXmlException:
semLabel = self.sOther
sgmLabel = self._getCurrentSegmentationLabel()
nd.set(self.sSemAttr, semLabel)
nd.set(self.sSgmAttr, sgmLabel)
if sResoNum:
nd.set(self.sNumAttr, sResoNum) #only when the number is part of the humanannotation!
class DU_BAR_Convert_BIES(DU_BAR_Convert):
"""
For segmentation labels, we only use B I E S whatever the semantic label is, so that the task is purely a segmentation task.
Heading indicate the start of a resolution, and is part of it.
Anything else (Header page-number, marginalia) is part of the resolution.
"""
B = "B"
I = "I"
E = "E"
S = "S"
def _initSegmentationLabel(self):
self._prevNd = None
self._prevNum = False
self._prevIsB = None
def _convertPageAnnotation(self, pnum, page, domNdPage):
"""
"""
for nd in self._iter_TextRegionNodeTop2Bottom(domNdPage, page):
sResoNum = None
bCurrentIsAStart = None
try:
lbl = PageXml.getCustomAttr(nd, "structure", "type")
if lbl == "heading":
semLabel = self.dAnnotMapping[lbl]
#heading indicate the start of a new resolution, unless the previous is already a start!
if self._prevIsB:
bCurrentIsAStart = False
else:
bCurrentIsAStart = True
self._prevNum = False #to prevent starting again when find the resolution number
elif lbl in ["header", "page-number", "marginalia"]:
semLabel = self.dAnnotMapping[lbl]
#continuation of a resolution, except at very beginning (first node)
if self._prevNd == None:
bCurrentIsAStart = True
else:
bCurrentIsAStart = False
else:
o = self.creResolutionHumanLabel.match(lbl)
if not o:
if False: # strict
raise ValueError("%s is not a valid human annotation" % lbl)
else:
# relaxed
print(" ** WARNING ** strange annotation on node id=%s : '%s'"%(nd.get("id"), lbl))
semLabel = self.dAnnotMapping[None]
#Here we have a resolution number!
sResoNum = self._prevNum
else:
semLabel = self.dAnnotMapping[o.group(1)] #"" for the resolution number
#Here we have a resolution number!
sResoNum = o.group(2)
if not sResoNum: raise ValueError("%s is not a valid human annotation - missing resolution number" % lbl)
if self._prevNum != False and self._prevNum != sResoNum:
#we got a new number, so switching segmentation label!
bCurrentIsAStart = True
else:
#either same number or switching already done due to a heading
bCurrentIsAStart = False
self._prevNum = sResoNum
except PageXmlException:
semLabel = self.sOther
bCurrentIsAStart = False
#Now tagging!!
#Semantic (easy)
nd.set(self.sSemAttr, semLabel)
# BIES, tough...
if bCurrentIsAStart:
if self._prevIsB:
#make previous a singleton!
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.S)
else:
#make previous a End
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.E)
self._prevIsB = True #for next cycle!
else:
if self._prevIsB:
#confirm previous a a B
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.B)
else:
#confirm previous as a I
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.I)
self._prevIsB = False #for next cycle!
if sResoNum: nd.set(self.sNumAttr, sResoNum) #only when the number is part of the humanannotation!
self._prevNd = nd #for next cycle!
# end for
if self._prevIsB:
#make previous a singleton!
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.S)
else:
#make previous a End
if self._prevNd: self._prevNd.set(self.sSgmAttr, self.E)
return
#------------------------------------------------------------------------------------------------------
def test_RE():
cre = DU_BAR_Convert.creResolutionHumanLabel
o = cre.match("m103a")
assert o.group(1) == 'm'
assert o.group(2) == '103a'
o = cre.match("103a")
assert o.group(1) == ''
assert o.group(2) == '103a'
o = cre.match("103")
assert o.group(1) == ''
assert o.group(2) == '103'
o = cre.match("az103a")
assert o == None
#------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
from optparse import OptionParser
#prepare for the parsing of the command line
parser = OptionParser(usage="BAR annotation conversion", version="1.0")
# parser.add_option("--tst", dest='lTst', action="append", type="string"
# , help="Test a model using the given annotated collection.")
# parser.add_option("--fold-init", dest='iFoldInitNum', action="store", type="int"
# , help="Initialize the file lists for parallel cross-validating a model on the given annotated collection. Indicate the number of folds.")
# parser.add_option("--jgjhg", dest='bFoldFinish', action="store_true"
# , help="Evaluate by cross-validation a model on the given annotated collection.")
# parser.add_option("-w", "--warm", dest='warm', action="store_true"
# , help="To make warm-startable model and warm-start if a model exist already.")
#parse the command line
(options, args) = parser.parse_args()
# ---
#doer = DU_BAR_Convert()
#doer = DU_BAR_Convert_v2()
doer = DU_BAR_Convert_BIES()
for sFilename in args:
print ("- Processing %s" % sFilename)
sOutputFilename = doer.convertDoc(sFilename)
print (" done --> %s" % sOutputFilename)
print ("DONE.")
| |
import StringIO
import networkx as nx
# from networkx.readwrite import json_graph
import json
OPTLINE = '#'
CANVAS = '_canvas'
ATTRS = '_attributes'
DB = '_database'
ARCREL = 'relation'
LINECOLOR = 'linecolor'
FILL = 'fill'
BLACK = '0/0/0'
RED = '255/0/0'
GREEN = '0/255/0'
BLUE = '0/0/255'
PINK = '255/0/255'
LIGHTBLUE = '0/125/255'
YELLOW = '255/230/45'
BM_NODE_DATA = {
'Node': {'name': 'Node', 'color': '#f0f0f0'},
'Sequence': {'name': 'Sequence', 'color': '#d3f7a3'},
'Gene': {'name': 'Gene', 'color': '#d3f7a3'},
'Protein': {'name': 'Protein', 'color': '#c7f0b2'},
'Enzyme': {'name': 'Enzyme', 'color': '#c1eaae'},
'ProteinGroup': {'name': 'Protein group', 'color': '#c7f0b2'},
'AllelicVariant': {'name': 'Allelic variant', 'color': '#c7f0b2'},
'Article': {'name': 'Article', 'color': '#fae6a0'},
'HomologGroup': {'name': 'Homolog group', 'color': '#bbedd7'},
'OrthologGroup': {'name': 'Ortholog group', 'color': '#c2edda'},
'GO': {'name': 'GO', 'color': '#b9daea'},
'BiologicalProcess': {'name': 'Biological process', 'color': '#b9daea'},
'MolecularFunction': {'name': 'Molecular function', 'color': '#bddfef'},
'CellularComponent': {'name': 'Cellular component', 'color': '#aed6ea'},
'Ligand': {'name': 'Ligand', 'color': '#d2cbf0'},
'Substance': {'name': 'Substance', 'color': '#d2cbf0'},
'Compound': {'name': 'Compound', 'color': '#d2cbf0'},
'Drug': {'name': 'Drug', 'color': '#cbc4e9'},
'Glycan': {'name': 'Glycan', 'color': '#c6c8f2'},
'GenomicContext': {'name': 'Genomic context', 'color': '#ffd7fd'},
'Locus': {'name': 'Locus', 'color': '#b4c4ef'},
'Phenotype': {'name': 'Phenotype', 'color': '#c1d1ff'},
'Locus/Phenotype': {'name': 'Locus/Phenotype', 'color': '#c3c7f2'},
'Gene/Phenotype': {'name': 'Gene/Phenotype', 'color': '#d3f7a3'},
'Family': {'name': 'Family', 'color': '#b3e2c0'},
'Region': {'name': 'Region', 'color': '#c3e5cc'},
'Domain': {'name': 'Domain', 'color': '#c3e5cc'},
'Repeat': {'name': 'Repeat', 'color': '#c3e5cc'},
'Group': {'name': 'Group', 'color': '#f0f0f0'}, # this one is strange, was added manually
'Site': {'name': 'Site', 'color': '#bee5c9'},
'ActiveSite': {'name': 'Active site', 'color': '#bee5c9'},
'BindingSite': {'name': 'Binding site', 'color': '#bee5c9'},
'PostTranslationalModification': {'name': 'Post-translational modification', 'color': '#bee5c9'},
'Pathway': {'name': 'Pathway', 'color': '#d0b9e7'},
'Tissue': {'name': 'Tissue', 'color': '#e5dabd'},
'Organism': {'name': 'Organism', 'color': '#e5d7b1'},
'MeSHHeading': {'name': 'MeSH heading', 'color': '#efe7b0'},
'OMIM': {'name': 'OMIM', 'color': '#efe7b0'}
}
SYMMETRIC_EDGE_TYPES = ['interacts_with', 'is_related_to', 'is_homologous_to', 'overlaps', 'has_synonym', 'functionally_associated_to']
def load_BMG_to_networkx(data):
fp = StringIO.StringIO()
fp.write(data)
fp.flush()
fp.seek(0)
lines = fp.readlines()
fp.close()
graph = nx.MultiDiGraph()
for line in lines:
line = line.strip()
if not line:
continue
elts = line.split()
if elts[0] == OPTLINE:
if elts[1] == CANVAS:
continue
elif elts[1] == ATTRS:
node = elts[2]
attrs = elts[3:]
if not graph.has_node(node):
continue
for atr in attrs:
parts = atr.split('=')
graph.node[node][parts[0]] = parts[1].replace('+', ' ')
else:
# unknown tags
continue
# edge data
else:
# skip lines with node announcements (source nodes)
if len(elts) < 3:
continue
else:
n1 = elts[0]
n2 = elts[1]
rel = elts[2]
attrs = elts[3:]
symmetric = True if rel in SYMMETRIC_EDGE_TYPES else False
if not graph.has_node(n1):
graph.add_node(n1)
if not graph.has_node(n2):
graph.add_node(n2)
atrDict = {}
for atr in attrs:
parts = atr.split('=')
atrDict[parts[0]] = parts[1].replace('+', ' ')
# graph[n1][n2][parts[0]] = parts[1]
# NetworkX does not support mixed graphs
atrDict['symmetric'] = symmetric
# handle also the reverse case
if rel.startswith('-'):
rel = rel[1:]
graph.add_edge(n2, n1, key=rel, attr_dict=atrDict)
else:
graph.add_edge(n1, n2, key=rel, attr_dict=atrDict)
# simulate mixed graphs
if symmetric:
graph.add_edge(n2, n1, key=rel, attr_dict=atrDict)
return graph
# end
def export_to_BMG(graph):
assert (isinstance(graph, nx.Graph))
data = StringIO.StringIO()
# export arcs and their attributes
for fr in graph.edge:
for to in graph.edge[fr]:
for relType in graph.edge[fr][to]:
line = '%s %s %s' % (fr, to, relType)
for (atr, val) in graph.edge[fr][to][relType].items():
line += ' %s=%s' % (atr, val)
line += '\n'
data.write(line)
# export node attributes
for node in graph.nodes_iter():
line = '%s %s %s' % (OPTLINE, ATTRS, node)
for (atr, val) in graph.node[node].items():
line += ' %s=%s' % (atr, val.replace(' ', '+'))
line += '\n'
data.write(line)
data.flush()
return data.getvalue()
# end
# graph 'g' must be a Biomine graph!
def group_nodes(g):
# useful for finding equivalent nodes (same neighbours, same edges)
def inout_edges(g, node):
inedges = []
for (fr, to, typ) in g.in_edges_iter(node, keys=True):
inedges.append((fr, typ))
outedges = []
for (fr, to, typ) in g.out_edges_iter(node, keys=True):
outedges.append((to, typ))
return sorted(inedges), sorted(outedges)
# end
assert(isinstance(g, nx.Graph))
# cluster node types
ntypes = {}
for node in g.nodes_iter():
#if g.in_degree(node) > 0:
#continue
#prefix = node[:node.index('_')]
prefix = node[:node.index(':')]
if prefix not in ntypes:
ntypes[prefix] = [node]
else:
ntypes[prefix].append(node)
# not optimal, should be grouped also according to the number of in/out edges and the according to their type...
groups = []
for nt in ntypes:
nodes = ntypes[nt]
while nodes != []:
current = nodes.pop()
group = [current]
ioc = inout_edges(g, current)
for cand in nodes:
# check if in-out edges are the same
if ioc == inout_edges(g, cand):
group.append(cand)
nodes = list(set(nodes) - set(group))
if len(group) > 1:
groups.append(group)
for group in groups:
rep = group[0]
prefix = rep[:rep.index('_')]
db = rep[rep.index('_')+1:rep.index(':')]
size = len(group)
new = '_'.join(['Group', prefix, '(%s)' % db, 'x%d' % size])
label = '||'.join([name for name in group])
g.add_node(new)
g.node[new]['PrimaryName'] = label
for (fr, to, typ, data) in g.out_edges_iter(rep, keys=True, data=True):
g.add_edge(new, to, key=typ, attr_dict=data)
for (fr, to, typ, data) in g.in_edges_iter(rep, keys=True, data=True):
g.add_edge(fr, new, key=typ, attr_dict=data)
g.remove_nodes_from(group)
# end
def prepare_for_visjs(g):
assert (isinstance(g, nx.Graph))
# add ids so we can refer to nodes easily when building vis.js data structure
id2bmname = {}
for i, node in enumerate(g.nodes_iter()):
g.node[node]['id'] = i
id2bmname[i] = node
# find groups
nodegroups = {}
for node in g.nodes_iter():
for prefix in BM_NODE_DATA:
if node.lower().startswith(prefix.lower()):
nodegroups[node] = BM_NODE_DATA[prefix]['name']
if nodegroups.get(node) is None:
nodegroups[node] = 'Node'
# construct node data
nodes = []
for node in g.nodes_iter():
sname = g.node[node].get('ShortName', '')
sname = '\n' + sname if sname else sname
# write the type above
# name = nodegroups[node]
i = node.find('_')
if i != -1:
name = name = nodegroups[node] + '\n' + node[i + 1:]
else:
name = node
new = {'id': g.node[node]['id'],
'label': name + sname,
'bmname': node,
'type': nodegroups[node],
'PrimaryName': g.node[node].get('PrimaryName', ''),
'ShortName': g.node[node].get('ShortName', ''),
'goodness': g.node[node].get('goodness', ''),
'group': nodegroups[node],
'shape': 'box'
}
nodes.append(new)
# construct edge data
arcs = []
for fromnode, tonode, etype in g.edges_iter(keys=True):
# do not add both arcs of an undirected edge of the mixed graph simulation
if g.edge[fromnode][tonode][etype]['symmetric']:
# print fromnode, tonode, etype
skip = False
for info in arcs:
if info['from'] == g.node[tonode]['id'] and info['to'] == g.node[fromnode]['id']:
skip = True
break
if skip:
continue
estyle = 'line' if etype in SYMMETRIC_EDGE_TYPES else 'arrow'
new = {'from': g.node[fromnode]['id'],
'to': g.node[tonode]['id'],
'label': etype.replace('_', ' '),
'goodness': g.edge[fromnode][tonode][etype].get('goodness', ''),
'rarity': g.edge[fromnode][tonode][etype].get('rarity', ''),
'reliability': g.edge[fromnode][tonode][etype].get('reliability', ''),
'relevance': g.edge[fromnode][tonode][etype].get('relevance', ''),
'source_db_name': g.edge[fromnode][tonode][etype].get('source_db_name', ''),
'source_db_version': g.edge[fromnode][tonode][etype].get('source_db_version', ''),
'style': estyle,
'color': {'color': 'black', 'highlight': 'blue', 'hover': 'blue'}
}
arcs.append(new)
groups = {}
for prefix in BM_NODE_DATA:
groups[prefix] = {'color': {'border': BM_NODE_DATA[prefix]['color'],
'background': BM_NODE_DATA[prefix]['color'],
'hover': {'background': '#FF9B8C', 'border': 'red'},
'highlight': '#FF6464'},
'shape': 'box'}
return {'nodes': nodes,
'arcs': arcs,
'id2bmname': id2bmname,
'groups': groups}
# end
| |
#!/usr/bin/env python
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
"""Program to fetch power logging data from a sweetberry device
or other usb device that exports a USB power logging interface.
"""
# Note: This is a py2/3 compatible file.
from __future__ import print_function
import argparse
import array
from distutils import sysconfig
import json
import logging
import os
import pprint
import struct
import sys
import time
import traceback
import usb
from stats_manager import StatsManager
# Directory where hdctools installs configuration files into.
LIB_DIR = os.path.join(sysconfig.get_python_lib(standard_lib=False), 'servo',
'data')
# Potential config file locations: current working directory, the same directory
# as powerlog.py file or LIB_DIR.
CONFIG_LOCATIONS = [os.getcwd(), os.path.dirname(os.path.realpath(__file__)),
LIB_DIR]
def logoutput(msg):
print(msg)
sys.stdout.flush()
def process_filename(filename):
"""Find the file path from the filename.
If filename is already the complete path, return that directly. If filename is
just the short name, look for the file in the current working directory, in
the directory of the current .py file, and then in the directory installed by
hdctools. If the file is found, return the complete path of the file.
Args:
filename: complete file path or short file name.
Returns:
a complete file path.
Raises:
IOError if filename does not exist.
"""
# Check if filename is absolute path.
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
# Check if filename is relative to a known config location.
for dirname in CONFIG_LOCATIONS:
file_at_dir = os.path.join(dirname, filename)
if os.path.isfile(file_at_dir):
return file_at_dir
raise IOError('No such file or directory: \'%s\'' % filename)
class Spower(object):
"""Power class to access devices on the bus.
Usage:
bus = Spower()
Instance Variables:
_dev: pyUSB device object
_read_ep: pyUSB read endpoint for this interface
_write_ep: pyUSB write endpoint for this interface
"""
# INA interface type.
INA_POWER = 1
INA_BUSV = 2
INA_CURRENT = 3
INA_SHUNTV = 4
# INA_SUFFIX is used to differentiate multiple ina types for the same power
# rail. No suffix for when ina type is 0 (non-existent) and when ina type is 1
# (power, no suffix for backward compatibility).
INA_SUFFIX = ['', '', '_busv', '_cur', '_shuntv']
# usb power commands
CMD_RESET = 0x0000
CMD_STOP = 0x0001
CMD_ADDINA = 0x0002
CMD_START = 0x0003
CMD_NEXT = 0x0004
CMD_SETTIME = 0x0005
# Map between header channel number (0-47)
# and INA I2C bus/addr on sweetberry.
CHMAP = {
0: (3, 0x40),
1: (1, 0x40),
2: (2, 0x40),
3: (0, 0x40),
4: (3, 0x41),
5: (1, 0x41),
6: (2, 0x41),
7: (0, 0x41),
8: (3, 0x42),
9: (1, 0x42),
10: (2, 0x42),
11: (0, 0x42),
12: (3, 0x43),
13: (1, 0x43),
14: (2, 0x43),
15: (0, 0x43),
16: (3, 0x44),
17: (1, 0x44),
18: (2, 0x44),
19: (0, 0x44),
20: (3, 0x45),
21: (1, 0x45),
22: (2, 0x45),
23: (0, 0x45),
24: (3, 0x46),
25: (1, 0x46),
26: (2, 0x46),
27: (0, 0x46),
28: (3, 0x47),
29: (1, 0x47),
30: (2, 0x47),
31: (0, 0x47),
32: (3, 0x48),
33: (1, 0x48),
34: (2, 0x48),
35: (0, 0x48),
36: (3, 0x49),
37: (1, 0x49),
38: (2, 0x49),
39: (0, 0x49),
40: (3, 0x4a),
41: (1, 0x4a),
42: (2, 0x4a),
43: (0, 0x4a),
44: (3, 0x4b),
45: (1, 0x4b),
46: (2, 0x4b),
47: (0, 0x4b),
}
def __init__(self, board, vendor=0x18d1,
product=0x5020, interface=1, serialname=None):
self._logger = logging.getLogger(__name__)
self._board = board
# Find the stm32.
dev_g = usb.core.find(idVendor=vendor, idProduct=product, find_all=True)
dev_list = list(dev_g)
if dev_list is None:
raise Exception("Power", "USB device not found")
# Check if we have multiple stm32s and we've specified the serial.
dev = None
if serialname:
for d in dev_list:
dev_serial = "PyUSB dioesn't have a stable interface"
try:
dev_serial = usb.util.get_string(d, 256, d.iSerialNumber)
except ValueError:
# Incompatible pyUsb version.
dev_serial = usb.util.get_string(d, d.iSerialNumber)
if dev_serial == serialname:
dev = d
break
if dev is None:
raise Exception("Power", "USB device(%s) not found" % serialname)
else:
try:
dev = dev_list[0]
except TypeError:
# Incompatible pyUsb version.
dev = dev_list.next()
self._logger.debug("Found USB device: %04x:%04x", vendor, product)
self._dev = dev
# Get an endpoint instance.
try:
dev.set_configuration()
except usb.USBError:
pass
cfg = dev.get_active_configuration()
intf = usb.util.find_descriptor(cfg, custom_match=lambda i: \
i.bInterfaceClass==255 and i.bInterfaceSubClass==0x54)
self._intf = intf
self._logger.debug("InterfaceNumber: %s", intf.bInterfaceNumber)
read_ep = usb.util.find_descriptor(
intf,
# match the first IN endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN
)
self._read_ep = read_ep
self._logger.debug("Reader endpoint: 0x%x", read_ep.bEndpointAddress)
write_ep = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT
)
self._write_ep = write_ep
self._logger.debug("Writer endpoint: 0x%x", write_ep.bEndpointAddress)
self.clear_ina_struct()
self._logger.debug("Found power logging USB endpoint.")
def clear_ina_struct(self):
""" Clear INA description struct."""
self._inas = []
def append_ina_struct(self, name, rs, port, addr,
data=None, ina_type=INA_POWER):
"""Add an INA descriptor into the list of active INAs.
Args:
name: Readable name of this channel.
rs: Sense resistor value in ohms, floating point.
port: I2C channel this INA is connected to.
addr: I2C addr of this INA.
data: Misc data for special handling, board specific.
ina_type: INA function to use, power, voltage, etc.
"""
ina = {}
ina['name'] = name
ina['rs'] = rs
ina['port'] = port
ina['addr'] = addr
ina['type'] = ina_type
# Calculate INA231 Calibration register
# (see INA231 spec p.15)
# CurrentLSB = uA per div = 80mV / (Rsh * 2^15)
# CurrentLSB uA = 80000000nV / (Rsh mOhm * 0x8000)
ina['uAscale'] = 80000000. / (rs * 0x8000);
ina['uWscale'] = 25. * ina['uAscale'];
ina['mVscale'] = 1.25
ina['uVscale'] = 2.5
ina['data'] = data
self._inas.append(ina)
def wr_command(self, write_list, read_count=1, wtimeout=100, rtimeout=1000):
"""Write command to logger logic.
This function writes byte command values list to stm, then reads
byte status.
Args:
write_list: list of command byte values [0~255].
read_count: number of status byte values to read.
Interface:
write: [command, data ... ]
read: [status ]
Returns:
bytes read, or None on failure.
"""
self._logger.debug("Spower.wr_command(write_list=[%s] (%d), read_count=%s)",
list(bytearray(write_list)), len(write_list), read_count)
# Clean up args from python style to correct types.
write_length = 0
if write_list:
write_length = len(write_list)
if not read_count:
read_count = 0
# Send command to stm32.
if write_list:
cmd = write_list
ret = self._write_ep.write(cmd, wtimeout)
self._logger.debug("RET: %s ", ret)
# Read back response if necessary.
if read_count:
bytesread = self._read_ep.read(512, rtimeout)
self._logger.debug("BYTES: [%s]", bytesread)
if len(bytesread) != read_count:
pass
self._logger.debug("STATUS: 0x%02x", int(bytesread[0]))
if read_count == 1:
return bytesread[0]
else:
return bytesread
return None
def clear(self):
"""Clear pending reads on the stm32"""
try:
while True:
ret = self.wr_command(b"", read_count=512, rtimeout=100, wtimeout=50)
self._logger.debug("Try Clear: read %s",
"success" if ret == 0 else "failure")
except:
pass
def send_reset(self):
"""Reset the power interface on the stm32"""
cmd = struct.pack("<H", self.CMD_RESET)
ret = self.wr_command(cmd, rtimeout=50, wtimeout=50)
self._logger.debug("Command RESET: %s",
"success" if ret == 0 else "failure")
def reset(self):
"""Try resetting the USB interface until success.
Use linear back off strategy when encounter the error with 10ms increment.
Raises:
Exception on failure.
"""
max_reset_retry = 100
for count in range(1, max_reset_retry + 1):
self.clear()
try:
self.send_reset()
return
except Exception as e:
self.clear()
self.clear()
self._logger.debug("TRY %d of %d: %s", count, max_reset_retry, e)
time.sleep(count * 0.01)
raise Exception("Power", "Failed to reset")
def stop(self):
"""Stop any active data acquisition."""
cmd = struct.pack("<H", self.CMD_STOP)
ret = self.wr_command(cmd)
self._logger.debug("Command STOP: %s",
"success" if ret == 0 else "failure")
def start(self, integration_us):
"""Start data acquisition.
Args:
integration_us: int, how many us between samples, and
how often the data block must be read.
Returns:
actual sampling interval in ms.
"""
cmd = struct.pack("<HI", self.CMD_START, integration_us)
read = self.wr_command(cmd, read_count=5)
actual_us = 0
if len(read) == 5:
ret, actual_us = struct.unpack("<BI", read)
self._logger.debug("Command START: %s %dus",
"success" if ret == 0 else "failure", actual_us)
else:
self._logger.debug("Command START: FAIL")
return actual_us
def add_ina_name(self, name_tuple):
"""Add INA from board config.
Args:
name_tuple: name and type of power rail in board config.
Returns:
True if INA added, False if the INA is not on this board.
Raises:
Exception on unexpected failure.
"""
name, ina_type = name_tuple
for datum in self._brdcfg:
if datum["name"] == name:
rs = int(float(datum["rs"]) * 1000.)
board = datum["sweetberry"]
if board == self._board:
if 'port' in datum and 'addr' in datum:
port = datum['port']
addr = datum['addr']
else:
channel = int(datum["channel"])
port, addr = self.CHMAP[channel]
self.add_ina(port, ina_type, addr, 0, rs, data=datum)
return True
else:
return False
raise Exception("Power", "Failed to find INA %s" % name)
def set_time(self, timestamp_us):
"""Set sweetberry time to match host time.
Args:
timestamp_us: host timestmap in us.
"""
# 0x0005 , 8 byte timestamp
cmd = struct.pack("<HQ", self.CMD_SETTIME, timestamp_us)
ret = self.wr_command(cmd)
self._logger.debug("Command SETTIME: %s",
"success" if ret == 0 else "failure")
def add_ina(self, bus, ina_type, addr, extra, resistance, data=None):
"""Add an INA to the data acquisition list.
Args:
bus: which i2c bus the INA is on. Same ordering as Si2c.
ina_type: Ina interface: INA_POWER/BUSV/etc.
addr: 7 bit i2c addr of this INA
extra: extra data for nonstandard configs.
resistance: int, shunt resistance in mOhm
"""
# 0x0002, 1B: bus, 1B:INA type, 1B: INA addr, 1B: extra, 4B: Rs
cmd = struct.pack("<HBBBBI", self.CMD_ADDINA,
bus, ina_type, addr, extra, resistance)
ret = self.wr_command(cmd)
if ret == 0:
if data:
name = data['name']
else:
name = "ina%d_%02x" % (bus, addr)
self.append_ina_struct(name, resistance, bus, addr,
data=data, ina_type=ina_type)
self._logger.debug("Command ADD_INA: %s",
"success" if ret == 0 else "failure")
def report_header_size(self):
"""Helper function to calculate power record header size."""
result = 2
timestamp = 8
return result + timestamp
def report_size(self, ina_count):
"""Helper function to calculate full power record size."""
record = 2
datasize = self.report_header_size() + ina_count * record
# Round to multiple of 4 bytes.
datasize = int(((datasize + 3) // 4) * 4)
return datasize
def read_line(self):
"""Read a line of data from the setup INAs
Returns:
list of dicts of the values read by ina/type tuple, otherwise None.
[{ts:100, (vbat, power):450}, {ts:200, (vbat, power):440}]
"""
try:
expected_bytes = self.report_size(len(self._inas))
cmd = struct.pack("<H", self.CMD_NEXT)
bytesread = self.wr_command(cmd, read_count=expected_bytes)
except usb.core.USBError as e:
self._logger.error("READ LINE FAILED %s", e)
return None
if len(bytesread) == 1:
if bytesread[0] != 0x6:
self._logger.debug("READ LINE FAILED bytes: %d ret: %02x",
len(bytesread), bytesread[0])
return None
if len(bytesread) % expected_bytes != 0:
self._logger.debug("READ LINE WARNING: expected %d, got %d",
expected_bytes, len(bytesread))
packet_count = len(bytesread) // expected_bytes
values = []
for i in range(0, packet_count):
start = i * expected_bytes
end = (i + 1) * expected_bytes
record = self.interpret_line(bytesread[start:end])
values.append(record)
return values
def interpret_line(self, data):
"""Interpret a power record from INAs
Args:
data: one single record of bytes.
Output:
stdout of the record in csv format.
Returns:
dict containing name, value of recorded data.
"""
status, size = struct.unpack("<BB", data[0:2])
if len(data) != self.report_size(size):
self._logger.error("READ LINE FAILED st:%d size:%d expected:%d len:%d",
status, size, self.report_size(size), len(data))
else:
pass
timestamp = struct.unpack("<Q", data[2:10])[0]
self._logger.debug("READ LINE: st:%d size:%d time:%dus", status, size,
timestamp)
ftimestamp = float(timestamp) / 1000000.
record = {"ts": ftimestamp, "status": status, "berry":self._board}
for i in range(0, size):
idx = self.report_header_size() + 2*i
name = self._inas[i]['name']
name_tuple = (self._inas[i]['name'], self._inas[i]['type'])
raw_val = struct.unpack("<h", data[idx:idx+2])[0]
if self._inas[i]['type'] == Spower.INA_POWER:
val = raw_val * self._inas[i]['uWscale']
elif self._inas[i]['type'] == Spower.INA_BUSV:
val = raw_val * self._inas[i]['mVscale']
elif self._inas[i]['type'] == Spower.INA_CURRENT:
val = raw_val * self._inas[i]['uAscale']
elif self._inas[i]['type'] == Spower.INA_SHUNTV:
val = raw_val * self._inas[i]['uVscale']
self._logger.debug("READ %d %s: %fs: 0x%04x %f", i, name, ftimestamp,
raw_val, val)
record[name_tuple] = val
return record
def load_board(self, brdfile):
"""Load a board config.
Args:
brdfile: Filename of a json file decribing the INA wiring of this board.
"""
with open(process_filename(brdfile)) as data_file:
data = json.load(data_file)
#TODO: validate this.
self._brdcfg = data;
self._logger.debug(pprint.pformat(data))
class powerlog(object):
"""Power class to log aggregated power.
Usage:
obj = powerlog()
Instance Variables:
_data: a StatsManager object that records sweetberry readings and calculates
statistics.
_pwr[]: Spower objects for individual sweetberries.
"""
def __init__(self, brdfile, cfgfile, serial_a=None, serial_b=None,
sync_date=False, use_ms=False, use_mW=False, print_stats=False,
stats_dir=None, stats_json_dir=None, print_raw_data=True,
raw_data_dir=None):
"""Init the powerlog class and set the variables.
Args:
brdfile: string name of json file containing board layout.
cfgfile: string name of json containing list of rails to read.
serial_a: serial number of sweetberry A.
serial_b: serial number of sweetberry B.
sync_date: report timestamps synced with host datetime.
use_ms: report timestamps in ms rather than us.
use_mW: report power as milliwatts, otherwise default to microwatts.
print_stats: print statistics for sweetberry readings at the end.
stats_dir: directory to save sweetberry readings statistics; if None then
do not save the statistics.
stats_json_dir: directory to save means of sweetberry readings in json
format; if None then do not save the statistics.
print_raw_data: print sweetberry readings raw data in real time, default
is to print.
raw_data_dir: directory to save sweetberry readings raw data; if None then
do not save the raw data.
"""
self._logger = logging.getLogger(__name__)
self._data = StatsManager()
self._pwr = {}
self._use_ms = use_ms
self._use_mW = use_mW
self._print_stats = print_stats
self._stats_dir = stats_dir
self._stats_json_dir = stats_json_dir
self._print_raw_data = print_raw_data
self._raw_data_dir = raw_data_dir
if not serial_a and not serial_b:
self._pwr['A'] = Spower('A')
if serial_a:
self._pwr['A'] = Spower('A', serialname=serial_a)
if serial_b:
self._pwr['B'] = Spower('B', serialname=serial_b)
with open(process_filename(cfgfile)) as data_file:
names = json.load(data_file)
self._names = self.process_scenario(names)
for key in self._pwr:
self._pwr[key].load_board(brdfile)
self._pwr[key].reset()
# Allocate the rails to the appropriate boards.
used_boards = []
for name in self._names:
success = False
for key in self._pwr.keys():
if self._pwr[key].add_ina_name(name):
success = True
if key not in used_boards:
used_boards.append(key)
if not success:
raise Exception("Failed to add %s (maybe missing "
"sweetberry, or bad board file?)" % name)
# Evict unused boards.
for key in list(self._pwr.keys()):
if key not in used_boards:
self._pwr.pop(key)
for key in self._pwr.keys():
if sync_date:
self._pwr[key].set_time(time.time() * 1000000)
else:
self._pwr[key].set_time(0)
def process_scenario(self, name_list):
"""Return list of tuples indicating name and type.
Args:
json originated list of names, or [name, type]
Returns:
list of tuples of (name, type) defaulting to type "POWER"
Raises: exception, invalid INA type.
"""
names = []
for entry in name_list:
if isinstance(entry, list):
name = entry[0]
if entry[1] == "POWER":
type = Spower.INA_POWER
elif entry[1] == "BUSV":
type = Spower.INA_BUSV
elif entry[1] == "CURRENT":
type = Spower.INA_CURRENT
elif entry[1] == "SHUNTV":
type = Spower.INA_SHUNTV
else:
raise Exception("Invalid INA type", "Type of %s [%s] not recognized,"
" try one of POWER, BUSV, CURRENT" % (entry[0], entry[1]))
else:
name = entry
type = Spower.INA_POWER
names.append((name, type))
return names
def start(self, integration_us_request, seconds, sync_speed=.8):
"""Starts sampling.
Args:
integration_us_request: requested interval between sample values.
seconds: time until exit, or None to run until cancel.
sync_speed: A usb request is sent every [.8] * integration_us.
"""
# We will get back the actual integration us.
# It should be the same for all devices.
integration_us = None
for key in self._pwr:
integration_us_new = self._pwr[key].start(integration_us_request)
if integration_us:
if integration_us != integration_us_new:
raise Exception("FAIL",
"Integration on A: %dus != integration on B %dus" % (
integration_us, integration_us_new))
integration_us = integration_us_new
# CSV header
title = "ts:%dus" % integration_us
for name_tuple in self._names:
name, ina_type = name_tuple
if ina_type == Spower.INA_POWER:
unit = "mW" if self._use_mW else "uW"
elif ina_type == Spower.INA_BUSV:
unit = "mV"
elif ina_type == Spower.INA_CURRENT:
unit = "uA"
elif ina_type == Spower.INA_SHUNTV:
unit = "uV"
title += ", %s %s" % (name, unit)
name_type = name + Spower.INA_SUFFIX[ina_type]
self._data.SetUnit(name_type, unit)
title += ", status"
if self._print_raw_data:
logoutput(title)
forever = False
if not seconds:
forever = True
end_time = time.time() + seconds
try:
pending_records = []
while forever or end_time > time.time():
if (integration_us > 5000):
time.sleep((integration_us / 1000000.) * sync_speed)
for key in self._pwr:
records = self._pwr[key].read_line()
if not records:
continue
for record in records:
pending_records.append(record)
pending_records.sort(key=lambda r: r['ts'])
aggregate_record = {"boards": set()}
for record in pending_records:
if record["berry"] not in aggregate_record["boards"]:
for rkey in record.keys():
aggregate_record[rkey] = record[rkey]
aggregate_record["boards"].add(record["berry"])
else:
self._logger.info("break %s, %s", record["berry"],
aggregate_record["boards"])
break
if aggregate_record["boards"] == set(self._pwr.keys()):
csv = "%f" % aggregate_record["ts"]
for name in self._names:
if name in aggregate_record:
multiplier = 0.001 if (self._use_mW and
name[1]==Spower.INA_POWER) else 1
value = aggregate_record[name] * multiplier
csv += ", %.2f" % value
name_type = name[0] + Spower.INA_SUFFIX[name[1]]
self._data.AddSample(name_type, value)
else:
csv += ", "
csv += ", %d" % aggregate_record["status"]
if self._print_raw_data:
logoutput(csv)
aggregate_record = {"boards": set()}
for r in range(0, len(self._pwr)):
pending_records.pop(0)
except KeyboardInterrupt:
self._logger.info('\nCTRL+C caught.')
finally:
for key in self._pwr:
self._pwr[key].stop()
self._data.CalculateStats()
if self._print_stats:
print(self._data.SummaryToString())
save_dir = 'sweetberry%s' % time.time()
if self._stats_dir:
stats_dir = os.path.join(self._stats_dir, save_dir)
self._data.SaveSummary(stats_dir)
if self._stats_json_dir:
stats_json_dir = os.path.join(self._stats_json_dir, save_dir)
self._data.SaveSummaryJSON(stats_json_dir)
if self._raw_data_dir:
raw_data_dir = os.path.join(self._raw_data_dir, save_dir)
self._data.SaveRawData(raw_data_dir)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# Command line argument description.
parser = argparse.ArgumentParser(
description="Gather CSV data from sweetberry")
parser.add_argument('-b', '--board', type=str,
help="Board configuration file, eg. my.board", default="")
parser.add_argument('-c', '--config', type=str,
help="Rail config to monitor, eg my.scenario", default="")
parser.add_argument('-A', '--serial', type=str,
help="Serial number of sweetberry A", default="")
parser.add_argument('-B', '--serial_b', type=str,
help="Serial number of sweetberry B", default="")
parser.add_argument('-t', '--integration_us', type=int,
help="Target integration time for samples", default=100000)
parser.add_argument('-s', '--seconds', type=float,
help="Seconds to run capture", default=0.)
parser.add_argument('--date', default=False,
help="Sync logged timestamp to host date", action="store_true")
parser.add_argument('--ms', default=False,
help="Print timestamp as milliseconds", action="store_true")
parser.add_argument('--mW', default=False,
help="Print power as milliwatts, otherwise default to microwatts",
action="store_true")
parser.add_argument('--slow', default=False,
help="Intentionally overflow", action="store_true")
parser.add_argument('--print_stats', default=False, action="store_true",
help="Print statistics for sweetberry readings at the end")
parser.add_argument('--save_stats', type=str, nargs='?',
dest='stats_dir', metavar='STATS_DIR',
const=os.path.dirname(os.path.abspath(__file__)), default=None,
help="Save statistics for sweetberry readings to %(metavar)s if "
"%(metavar)s is specified, %(metavar)s will be created if it does "
"not exist; if %(metavar)s is not specified but the flag is set, "
"stats will be saved to where %(prog)s is located; if this flag is "
"not set, then do not save stats")
parser.add_argument('--save_stats_json', type=str, nargs='?',
dest='stats_json_dir', metavar='STATS_JSON_DIR',
const=os.path.dirname(os.path.abspath(__file__)), default=None,
help="Save means for sweetberry readings in json to %(metavar)s if "
"%(metavar)s is specified, %(metavar)s will be created if it does "
"not exist; if %(metavar)s is not specified but the flag is set, "
"stats will be saved to where %(prog)s is located; if this flag is "
"not set, then do not save stats")
parser.add_argument('--no_print_raw_data',
dest='print_raw_data', default=True, action="store_false",
help="Not print raw sweetberry readings at real time, default is to "
"print")
parser.add_argument('--save_raw_data', type=str, nargs='?',
dest='raw_data_dir', metavar='RAW_DATA_DIR',
const=os.path.dirname(os.path.abspath(__file__)), default=None,
help="Save raw data for sweetberry readings to %(metavar)s if "
"%(metavar)s is specified, %(metavar)s will be created if it does "
"not exist; if %(metavar)s is not specified but the flag is set, "
"raw data will be saved to where %(prog)s is located; if this flag "
"is not set, then do not save raw data")
parser.add_argument('-v', '--verbose', default=False,
help="Very chatty printout", action="store_true")
args = parser.parse_args(argv)
root_logger = logging.getLogger(__name__)
if args.verbose:
root_logger.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
# if powerlog is used through main, log to sys.stdout
if __name__ == "__main__":
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
root_logger.addHandler(stdout_handler)
integration_us_request = args.integration_us
if not args.board:
raise Exception("Power", "No board file selected, see board.README")
if not args.config:
raise Exception("Power", "No config file selected, see board.README")
brdfile = args.board
cfgfile = args.config
seconds = args.seconds
serial_a = args.serial
serial_b = args.serial_b
sync_date = args.date
use_ms = args.ms
use_mW = args.mW
print_stats = args.print_stats
stats_dir = args.stats_dir
stats_json_dir = args.stats_json_dir
print_raw_data = args.print_raw_data
raw_data_dir = args.raw_data_dir
boards = []
sync_speed = .8
if args.slow:
sync_speed = 1.2
# Set up logging interface.
powerlogger = powerlog(brdfile, cfgfile, serial_a=serial_a, serial_b=serial_b,
sync_date=sync_date, use_ms=use_ms, use_mW=use_mW,
print_stats=print_stats, stats_dir=stats_dir,
stats_json_dir=stats_json_dir,
print_raw_data=print_raw_data,raw_data_dir=raw_data_dir)
# Start logging.
powerlogger.start(integration_us_request, seconds, sync_speed=sync_speed)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# based on an almost identical script by: jyrki@google.com (Jyrki Alakuijala)
"""Prints out include dependencies in chrome.
Since it ignores defines, it gives just a rough estimation of file size.
Usage:
tools/include_tracer.py chrome/browser/ui/browser.h
"""
import os
import sys
# Created by copying the command line for prerender_browsertest.cc, replacing
# spaces with newlines, and dropping everything except -F and -I switches.
# TODO(port): Add windows, linux directories.
INCLUDE_PATHS = [
'',
'gpu',
'skia/config',
'skia/ext',
'testing/gmock/include',
'testing/gtest/include',
'third_party/WebKit/Source',
'third_party/WebKit/Source/core',
'third_party/WebKit/Source/core/accessibility',
'third_party/WebKit/Source/core/accessibility/chromium',
'third_party/WebKit/Source/core/bindings',
'third_party/WebKit/Source/core/bindings/generic',
'third_party/WebKit/Source/core/bindings/v8',
'third_party/WebKit/Source/core/bindings/v8/custom',
'third_party/WebKit/Source/core/bindings/v8/specialization',
'third_party/WebKit/Source/core/bridge',
'third_party/WebKit/Source/core/bridge/jni',
'third_party/WebKit/Source/core/bridge/jni/v8',
'third_party/WebKit/Source/core/css',
'third_party/WebKit/Source/core/dom',
'third_party/WebKit/Source/core/dom/default',
'third_party/WebKit/Source/core/editing',
'third_party/WebKit/Source/core/fileapi',
'third_party/WebKit/Source/core/history',
'third_party/WebKit/Source/core/html',
'third_party/WebKit/Source/core/html/canvas',
'third_party/WebKit/Source/core/html/parser',
'third_party/WebKit/Source/core/html/shadow',
'third_party/WebKit/Source/core/inspector',
'third_party/WebKit/Source/core/loader',
'third_party/WebKit/Source/core/loader/appcache',
'third_party/WebKit/Source/core/loader/archive',
'third_party/WebKit/Source/core/loader/cache',
'third_party/WebKit/Source/core/loader/icon',
'third_party/WebKit/Source/core/mathml',
'third_party/WebKit/Source/core/notifications',
'third_party/WebKit/Source/core/page',
'third_party/WebKit/Source/core/page/animation',
'third_party/WebKit/Source/core/page/chromium',
'third_party/WebKit/Source/core/platform',
'third_party/WebKit/Source/core/platform/animation',
'third_party/WebKit/Source/core/platform/audio',
'third_party/WebKit/Source/core/platform/audio/chromium',
'third_party/WebKit/Source/core/platform/audio/mac',
'third_party/WebKit/Source/core/platform/chromium',
'third_party/WebKit/Source/core/platform/cocoa',
'third_party/WebKit/Source/core/platform/graphics',
'third_party/WebKit/Source/core/platform/graphics/cg',
'third_party/WebKit/Source/core/platform/graphics/chromium',
'third_party/WebKit/Source/core/platform/graphics/cocoa',
'third_party/WebKit/Source/core/platform/graphics/filters',
'third_party/WebKit/Source/core/platform/graphics/gpu',
'third_party/WebKit/Source/core/platform/graphics/mac',
'third_party/WebKit/Source/core/platform/graphics/opentype',
'third_party/WebKit/Source/core/platform/graphics/skia',
'third_party/WebKit/Source/core/platform/graphics/transforms',
'third_party/WebKit/Source/core/platform/image-decoders',
'third_party/WebKit/Source/core/platform/image-decoders/bmp',
'third_party/WebKit/Source/core/platform/image-decoders/gif',
'third_party/WebKit/Source/core/platform/image-decoders/ico',
'third_party/WebKit/Source/core/platform/image-decoders/jpeg',
'third_party/WebKit/Source/core/platform/image-decoders/png',
'third_party/WebKit/Source/core/platform/image-decoders/skia',
'third_party/WebKit/Source/core/platform/image-decoders/webp',
'third_party/WebKit/Source/core/platform/image-decoders/xbm',
'third_party/WebKit/Source/core/platform/image-encoders/skia',
'third_party/WebKit/Source/core/platform/mac',
'third_party/WebKit/Source/core/platform/mock',
'third_party/WebKit/Source/core/platform/network',
'third_party/WebKit/Source/core/platform/network/chromium',
'third_party/WebKit/Source/core/platform/sql',
'third_party/WebKit/Source/core/platform/text',
'third_party/WebKit/Source/core/platform/text/mac',
'third_party/WebKit/Source/core/platform/text/transcoder',
'third_party/WebKit/Source/core/plugins',
'third_party/WebKit/Source/core/plugins/chromium',
'third_party/WebKit/Source/core/rendering',
'third_party/WebKit/Source/core/rendering/style',
'third_party/WebKit/Source/core/rendering/svg',
'third_party/WebKit/Source/core/storage',
'third_party/WebKit/Source/core/storage/chromium',
'third_party/WebKit/Source/core/svg',
'third_party/WebKit/Source/core/svg/animation',
'third_party/WebKit/Source/core/svg/graphics',
'third_party/WebKit/Source/core/svg/graphics/filters',
'third_party/WebKit/Source/core/svg/properties',
'third_party/WebKit/Source/core/webaudio',
'third_party/WebKit/Source/core/websockets',
'third_party/WebKit/Source/core/workers',
'third_party/WebKit/Source/core/xml',
'third_party/WebKit/Source/public',
'third_party/WebKit/Source/web',
'third_party/WebKit/Source/wtf',
'third_party/google_toolbox_for_mac/src',
'third_party/icu/public/common',
'third_party/icu/public/i18n',
'third_party/npapi',
'third_party/npapi/bindings',
'third_party/protobuf',
'third_party/protobuf/src',
'third_party/skia/gpu/include',
'third_party/skia/include/config',
'third_party/skia/include/core',
'third_party/skia/include/effects',
'third_party/skia/include/gpu',
'third_party/skia/include/pdf',
'third_party/skia/include/ports',
'v8/include',
'xcodebuild/Debug/include',
'xcodebuild/DerivedSources/Debug/chrome',
'xcodebuild/DerivedSources/Debug/policy',
'xcodebuild/DerivedSources/Debug/protoc_out',
'xcodebuild/DerivedSources/Debug/webkit',
'xcodebuild/DerivedSources/Debug/webkit/bindings',
]
def Walk(seen, filename, parent, indent):
"""Returns the size of |filename| plus the size of all files included by
|filename| and prints the include tree of |filename| to stdout. Every file
is visited at most once.
"""
total_bytes = 0
# .proto(devel) filename translation
if filename.endswith('.pb.h'):
basename = filename[:-5]
if os.path.exists(basename + '.proto'):
filename = basename + '.proto'
else:
print 'could not find ', filename
# Show and count files only once.
if filename in seen:
return total_bytes
seen.add(filename)
# Display the paths.
print ' ' * indent + filename
# Skip system includes.
if filename[0] == '<':
return total_bytes
# Find file in all include paths.
resolved_filename = filename
for root in INCLUDE_PATHS + [os.path.dirname(parent)]:
if os.path.exists(os.path.join(root, filename)):
resolved_filename = os.path.join(root, filename)
break
# Recurse.
if os.path.exists(resolved_filename):
lines = open(resolved_filename).readlines()
else:
print ' ' * (indent + 2) + "-- not found"
lines = []
for line in lines:
line = line.strip()
if line.startswith('#include "'):
total_bytes += Walk(
seen, line.split('"')[1], resolved_filename, indent + 2)
elif line.startswith('#include '):
include = '<' + line.split('<')[1].split('>')[0] + '>'
total_bytes += Walk(
seen, include, resolved_filename, indent + 2)
elif line.startswith('import '):
total_bytes += Walk(
seen, line.split('"')[1], resolved_filename, indent + 2)
return total_bytes + len("".join(lines))
def main():
bytes = Walk(set(), sys.argv[1], '', 0)
print
print float(bytes) / (1 << 20), "megabytes of chrome source"
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic NMR shielding tensor
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
from pyscf.scf import ucphf
from pyscf.scf import _response_functions
from pyscf.ao2mo import _ao2mo
from pyscf.prop.nmr import rhf as rhf_nmr
def dia(nmrobj, gauge_orig=None, shielding_nuc=None, dm0=None):
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
if not (isinstance(dm0, numpy.ndarray) and dm0.ndim == 2):
dm0 = dm0[0] + dm0[1]
return rhf_nmr.dia(nmrobj, gauge_orig, shielding_nuc, dm0)
def para(nmrobj, mo10=None, mo_coeff=None, mo_occ=None,
shielding_nuc=None):
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if shielding_nuc is None: shielding_nuc = nmrobj.shielding_nuc
mol = nmrobj.mol
para_vir = numpy.empty((len(shielding_nuc),3,3))
para_occ = numpy.empty((len(shielding_nuc),3,3))
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = mo_occ[0] == 0
viridxb = mo_occ[1] == 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
nao = mo_coeff[0].shape[0]
dm10_oo = numpy.empty((3,nao,nao))
dm10_vo = numpy.empty((3,nao,nao))
for i in range(3):
dm10_oo[i] = reduce(numpy.dot, (orboa, mo10[0][i][occidxa], orboa.conj().T))
dm10_oo[i]+= reduce(numpy.dot, (orbob, mo10[1][i][occidxb], orbob.conj().T))
dm10_vo[i] = reduce(numpy.dot, (orbva, mo10[0][i][viridxa], orboa.conj().T))
dm10_vo[i]+= reduce(numpy.dot, (orbvb, mo10[1][i][viridxb], orbob.conj().T))
for n, atm_id in enumerate(shielding_nuc):
mol.set_rinv_origin(mol.atom_coord(atm_id))
h01 = mol.intor_asymmetric('int1e_prinvxp', 3)
para_occ[n] = numpy.einsum('xji,yij->xy', dm10_oo, h01) * 2
para_vir[n] = numpy.einsum('xji,yij->xy', dm10_vo, h01) * 2
msc_para = para_occ + para_vir
return msc_para, para_vir, para_occ
def make_h10(mol, dm0, gauge_orig=None, verbose=logger.WARN):
log = logger.new_logger(mol, verbose=verbose)
if gauge_orig is None:
# A10_i dot p + p dot A10_i consistents with <p^2 g>
# A10_j dot p + p dot A10_j consistents with <g p^2>
# A10_j dot p + p dot A10_j => i/2 (rjxp - pxrj) = irjxp
log.debug('First-order GIAO Fock matrix')
h1 = -.5 * mol.intor('int1e_giao_irjxp', 3) + make_h10giao(mol, dm0)
else:
with mol.with_common_origin(gauge_orig):
h1 = -.5 * mol.intor('int1e_cg_irxp', 3)
h1 = (h1, h1)
return h1
def make_h10giao(mol, dm0):
vj, vk = rhf_nmr.get_jk(mol, dm0)
h1 = vj[0] + vj[1] - vk
h1 -= mol.intor_asymmetric('int1e_ignuc', 3)
if mol.has_ecp():
h1 -= mol.intor_asymmetric('ECPscalar_ignuc', 3)
h1 -= mol.intor('int1e_igkin', 3)
return h1
def get_fock(nmrobj, dm0=None, gauge_orig=None):
r'''First order partial derivatives of Fock matrix wrt external magnetic
field. \frac{\partial F}{\partial B}
'''
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
if gauge_orig is None: gauge_orig = nmrobj.gauge_orig
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
h1 = make_h10(nmrobj.mol, dm0, gauge_orig, log)
if nmrobj.chkfile:
lib.chkfile.dump(nmrobj.chkfile, 'nmr/h1', h1)
return h1
def _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1):
'''uncoupled first order equation'''
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
nocca = numpy.count_nonzero(occidxa)
noccb = numpy.count_nonzero(occidxb)
nmoa, nmob = mo_occ[0].size, mo_occ[1].size
eai_a = mo_energy[0][viridxa,None] - mo_energy[0][occidxa]
eai_b = mo_energy[1][viridxb,None] - mo_energy[1][occidxb]
dim0 = len(s1[0])
s1_a = s1[0].reshape(dim0,nmoa,nocca)
s1_b = s1[1].reshape(dim0,nmob,noccb)
hs_a = mo1_a = h1[0].reshape(dim0,nmoa,nocca) - s1_a * mo_energy[0][occidxa]
hs_b = mo1_b = h1[1].reshape(dim0,nmob,noccb) - s1_b * mo_energy[1][occidxb]
mo_e1_a = hs_a[:,occidxa].copy()
mo_e1_b = hs_b[:,occidxb].copy()
mo1_a[:,viridxa]/= -eai_a
mo1_b[:,viridxb]/= -eai_b
mo1_a[:,occidxa] = -s1_a[:,occidxa] * .5
mo1_b[:,occidxb] = -s1_b[:,occidxb] * .5
mo_e1_a += mo1_a[:,occidxa] * (mo_energy[0][occidxa,None] - mo_energy[0][occidxa])
mo_e1_b += mo1_b[:,occidxb] * (mo_energy[1][occidxb,None] - mo_energy[1][occidxb])
return (mo1_a, mo1_b), (mo_e1_a, mo_e1_b)
def solve_mo1(nmrobj, mo_energy=None, mo_coeff=None, mo_occ=None,
h1=None, s1=None, with_cphf=None):
'''Solve the first order equation
Kwargs:
with_cphf : boolean or function(dm_mo) => v1_mo
If a boolean value is given, the value determines whether CPHF
equation will be solved or not. The induced potential will be
generated by the function gen_vind.
If a function is given, CPHF equation will be solved, and the
given function is used to compute induced potential
'''
cput1 = (time.clock(), time.time())
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
if mo_energy is None: mo_energy = nmrobj._scf.mo_energy
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if with_cphf is None: with_cphf = nmrobj.cphf
mol = nmrobj.mol
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
if h1 is None:
dm0 = nmrobj._scf.make_rdm1(mo_coeff, mo_occ)
h1 = nmrobj.get_fock(dm0)
h1 = (lib.einsum('xpq,pi,qj->xij', h1[0], mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', h1[1], mo_coeff[1].conj(), orbob))
cput1 = log.timer('first order Fock matrix', *cput1)
if s1 is None:
s1 = nmrobj.get_ovlp(mol)
s1 = (lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[1].conj(), orbob))
if with_cphf:
if callable(with_cphf):
vind = with_cphf
else:
vind = gen_vind(nmrobj._scf, mo_coeff, mo_occ)
mo10, mo_e10 = ucphf.solve(vind, mo_energy, mo_occ, h1, s1,
nmrobj.max_cycle_cphf, nmrobj.conv_tol,
verbose=log)
else:
mo10, mo_e10 = _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1)
logger.timer(nmrobj, 'solving mo1 eqn', *cput1)
return mo10, mo_e10
def gen_vind(mf, mo_coeff, mo_occ):
'''Induced potential'''
vresp = mf.gen_response(hermi=2)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
nao, nmo = mo_coeff[0].shape
nvira = nmo - nocca
def vind(mo1):
mo1a = mo1.reshape(3,-1)[:,:nocca*nmo].reshape(3,nmo,nocca)
mo1b = mo1.reshape(3,-1)[:,nocca*nmo:].reshape(3,nmo,noccb)
dm1a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T.conj())) for x in mo1a]
dm1b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T.conj())) for x in mo1b]
dm1 = numpy.asarray(([d1-d1.conj().T for d1 in dm1a],
[d1-d1.conj().T for d1 in dm1b]))
v1ao = vresp(dm1)
v1a = [reduce(numpy.dot, (mo_coeff[0].T.conj(), x, orboa)) for x in v1ao[0]]
v1b = [reduce(numpy.dot, (mo_coeff[1].T.conj(), x, orbob)) for x in v1ao[1]]
v1mo = numpy.hstack((numpy.asarray(v1a).reshape(3,-1),
numpy.asarray(v1b).reshape(3,-1)))
return v1mo.ravel()
return vind
class NMR(rhf_nmr.NMR):
def shielding(self, mo1=None):
if getattr(self._scf, 'spin_square', None):
s2 = self._scf.spin_square()[0]
if s2 > 1e-4:
logger.warn(self, '<S^2> = %s. UHF-NMR shielding may have large error.\n'
'paramagnetic NMR should include this result plus '
'g-tensor and HFC tensors.', s2)
return rhf_nmr.NMR.shielding(self, mo1)
dia = dia
para = para
get_fock = get_fock
solve_mo1 = solve_mo1
from pyscf import scf
scf.uhf.UHF.NMR = lib.class_as_method(NMR)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
[1 , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ])
mol.nucmod = {'F': 2} # gaussian nuclear model
mol.basis = {'H': '6-31g',
'F': '6-31g',}
mol.build()
mf = scf.UHF(mol).run()
nmr = mf.NMR()
nmr.cphf = True
#nmr.gauge_orig = (0,0,0)
msc = nmr.kernel() # _xx,_yy = 375.232839, _zz = 483.002139
print(lib.finger(msc) - -132.22895063293751)
nmr.cphf = True
nmr.gauge_orig = (1,1,1)
msc = nmr.shielding()
print(lib.finger(msc) - -108.48536089934709)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(lib.finger(msc) - -133.26526049655627)
mol.atom.extend([
[1 , (1. , 0.3, .417)],
[1 , (0.2, 1. , 0.)],])
mol.build()
mf = scf.UHF(mol).run()
nmr = NMR(mf)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(lib.finger(msc) - -123.98596361883168)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
import testtools
from senlin.common import consts
from senlin.engine import event
class TestEvent(testtools.TestCase):
def setUp(self):
super(TestEvent, self).setUp()
logging.register_options(cfg.CONF)
@mock.patch('stevedore.named.NamedExtensionManager')
def test_load_dispatcher(self, mock_mgr):
class FakeDispatcher(object):
values = {'a': 1, 'b': 2}
def __iter__(self):
return iter(self.values)
def __getitem__(self, key):
return self.values.get(key, '')
def __contains__(self, name):
return name in self.values
def names(self):
return self.values.keys()
mock_mgr.return_value = FakeDispatcher()
res = event.load_dispatcher()
self.assertIsNone(res)
mock_mgr.assert_called_once_with(
namespace='senlin.dispatchers',
names=cfg.CONF.event_dispatchers,
invoke_on_load=True,
propagate_map_exceptions=True)
def test_event_data(self):
entity = mock.Mock(id='ENTITY_ID')
entity.name = 'FAKE_ENTITY'
action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity)
res = event._event_data(action)
self.assertEqual({'name': 'FAKE_ENTITY', 'obj_id': 'ENTITY_I',
'action': 'ACTION', 'phase': None, 'reason': None,
'id': 'ACTION_I'},
res)
def test_event_data_with_phase_reason(self):
entity = mock.Mock(id='ENTITY_ID')
entity.name = 'FAKE_ENTITY'
action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity)
res = event._event_data(action, phase='PHASE1', reason='REASON1')
self.assertEqual({'name': 'FAKE_ENTITY', 'id': 'ACTION_I',
'action': 'ACTION', 'phase': 'PHASE1',
'obj_id': 'ENTITY_I', 'reason': 'REASON1'},
res)
def test_dump(self):
cfg.CONF.set_override('debug', True)
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
action = mock.Mock(cause=consts.CAUSE_RPC)
try:
event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1')
event.dispatchers.map_method.assert_called_once_with(
'dump', logging.INFO, action,
phase='Phase1', reason='Reason1', timestamp='TS1')
finally:
event.dispatchers = saved_dispathers
def test_dump_without_timestamp(self):
cfg.CONF.set_override('debug', True)
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
action = mock.Mock(cause=consts.CAUSE_RPC)
try:
event._dump(logging.INFO, action, 'Phase1', 'Reason1', None)
event.dispatchers.map_method.assert_called_once_with(
'dump', logging.INFO, action,
phase='Phase1', reason='Reason1', timestamp=mock.ANY)
finally:
event.dispatchers = saved_dispathers
def test_dump_guarded(self):
cfg.CONF.set_override('debug', False)
cfg.CONF.set_override('priority', 'warning', group='dispatchers')
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
action = mock.Mock(cause=consts.CAUSE_RPC)
try:
event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1')
# (temporary)Remove map_method.call_count for coverage test
# self.assertEqual(0, event.dispatchers.map_method.call_count)
finally:
event.dispatchers = saved_dispathers
def test_dump_exclude_derived_actions_positive(self):
cfg.CONF.set_override('exclude_derived_actions', True,
group='dispatchers')
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
action = mock.Mock(cause=consts.CAUSE_DERIVED)
try:
event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1')
self.assertEqual(0, event.dispatchers.map_method.call_count)
finally:
event.dispatchers = saved_dispathers
def test_dump_exclude_derived_actions_negative(self):
cfg.CONF.set_override('exclude_derived_actions', False,
group='dispatchers')
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
action = mock.Mock(cause=consts.CAUSE_DERIVED)
try:
event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1')
event.dispatchers.map_method.assert_called_once_with(
'dump', logging.INFO, action,
phase='Phase1', reason='Reason1', timestamp='TS1')
finally:
event.dispatchers = saved_dispathers
def test_dump_with_exception(self):
cfg.CONF.set_override('debug', True)
saved_dispathers = event.dispatchers
event.dispatchers = mock.Mock()
event.dispatchers.map_method.side_effect = Exception('fab')
action = mock.Mock(cause=consts.CAUSE_RPC)
try:
res = event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1')
self.assertIsNone(res) # exception logged only
event.dispatchers.map_method.assert_called_once_with(
'dump', logging.INFO, action,
phase='Phase1', reason='Reason1', timestamp='TS1')
finally:
event.dispatchers = saved_dispathers
@mock.patch.object(event, '_dump')
class TestLogMethods(testtools.TestCase):
def test_critical(self, mock_dump):
entity = mock.Mock(id='1234567890')
entity.name = 'fake_obj'
action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME')
res = event.critical(action, 'P1', 'R1', 'TS1')
self.assertIsNone(res)
mock_dump.assert_called_once_with(logging.CRITICAL, action,
'P1', 'R1', 'TS1')
def test_error(self, mock_dump):
entity = mock.Mock(id='1234567890')
entity.name = 'fake_obj'
action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME')
res = event.error(action, 'P1', 'R1', 'TS1')
self.assertIsNone(res)
mock_dump.assert_called_once_with(logging.ERROR, action,
'P1', 'R1', 'TS1')
def test_warning(self, mock_dump):
entity = mock.Mock(id='1234567890')
entity.name = 'fake_obj'
action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME')
res = event.warning(action, 'P1', 'R1', 'TS1')
self.assertIsNone(res)
mock_dump.assert_called_once_with(logging.WARNING, action,
'P1', 'R1', 'TS1')
def test_info(self, mock_dump):
entity = mock.Mock(id='1234567890')
entity.name = 'fake_obj'
action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME')
res = event.info(action, 'P1', 'R1', 'TS1')
self.assertIsNone(res)
mock_dump.assert_called_once_with(logging.INFO, action,
'P1', 'R1', 'TS1')
def test_debug(self, mock_dump):
entity = mock.Mock(id='1234567890')
entity.name = 'fake_obj'
action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME')
res = event.debug(action, 'P1', 'R1', 'TS1')
self.assertIsNone(res)
mock_dump.assert_called_once_with(logging.DEBUG, action,
'P1', 'R1', 'TS1')
| |
# -*- coding: utf-8 -*-
'''
Module to provide MongoDB functionality to Salt
:configuration: This module uses PyMongo, and accepts configuration details as
parameters as well as configuration settings::
mongodb.host: 'localhost'
mongodb.port: 27017
mongodb.user: ''
mongodb.password: ''
This data can also be passed into pillar. Options passed into opts will
overwrite options passed into pillar.
'''
from __future__ import absolute_import
# Import python libs
import logging
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import json
# Import salt libs
from salt.ext.six import string_types
# Import third party libs
try:
import pymongo
HAS_MONGODB = True
except ImportError:
HAS_MONGODB = False
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load this module if pymongo is installed
'''
if HAS_MONGODB:
return 'mongodb'
else:
return False
def _connect(user=None, password=None, host=None, port=None, database='admin'):
'''
Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values.
'''
if not user:
user = __salt__['config.option']('mongodb.user')
if not password:
password = __salt__['config.option']('mongodb.password')
if not host:
host = __salt__['config.option']('mongodb.host')
if not port:
port = __salt__['config.option']('mongodb.port')
try:
conn = pymongo.MongoClient(host=host, port=port)
mdb = pymongo.database.Database(conn, database)
if user and password:
mdb.authenticate(user, password)
except pymongo.errors.PyMongoError:
log.error('Error connecting to database {0}'.format(database))
return False
return conn
def _to_dict(objects):
"""
Potentially interprets a string as JSON for usage with mongo
"""
try:
if isinstance(objects, string_types):
objects = json.loads(objects)
except ValueError as err:
log.error("Could not parse objects: %s", err)
raise err
return objects
def db_list(user=None, password=None, host=None, port=None):
'''
List all Mongodb databases
CLI Example:
.. code-block:: bash
salt '*' mongodb.db_list <user> <password> <host> <port>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Listing databases')
return conn.database_names()
except pymongo.errors.PyMongoError as err:
log.error(err)
return str(err)
def db_exists(name, user=None, password=None, host=None, port=None):
'''
Checks if a database exists in Mongodb
CLI Example:
.. code-block:: bash
salt '*' mongodb.db_exists <name> <user> <password> <host> <port>
'''
dbs = db_list(user, password, host, port)
if isinstance(dbs, string_types):
return False
return name in dbs
def db_remove(name, user=None, password=None, host=None, port=None):
'''
Remove a Mongodb database
CLI Example:
.. code-block:: bash
salt '*' mongodb.db_remove <name> <user> <password> <host> <port>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Removing database {0}'.format(name))
conn.drop_database(name)
except pymongo.errors.PyMongoError as err:
log.error(
'Removing database {0} failed with error: {1}'.format(
name, str(err)
)
)
return str(err)
return True
def user_list(user=None, password=None, host=None, port=None, database='admin'):
'''
List users of a Mongodb database
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_list <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Listing users')
mdb = pymongo.database.Database(conn, database)
output = []
mongodb_version = mdb.eval('db.version()')
if LooseVersion(mongodb_version) >= LooseVersion('2.6'):
for user in mdb.eval('db.getUsers()'):
output.append([
('user', user['user']),
('roles', user['roles'])
])
else:
for user in mdb.system.users.find():
output.append([
('user', user['user']),
('readOnly', user.get('readOnly', 'None'))
])
return output
except pymongo.errors.PyMongoError as err:
log.error(
'Listing users failed with error: {0}'.format(
str(err)
)
)
return str(err)
def user_exists(name, user=None, password=None, host=None, port=None,
database='admin'):
'''
Checks if a user exists in Mongodb
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database>
'''
users = user_list(user, password, host, port, database)
if isinstance(users, string_types):
return 'Failed to connect to mongo database'
for user in users:
if name == dict(user).get('user'):
return True
return False
def user_create(name, passwd, user=None, password=None, host=None, port=None,
database='admin'):
'''
Create a Mongodb user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_create <name> <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Creating user {0}'.format(name))
mdb = pymongo.database.Database(conn, database)
mdb.add_user(name, passwd)
except pymongo.errors.PyMongoError as err:
log.error(
'Creating database {0} failed with error: {1}'.format(
name, str(err)
)
)
return str(err)
return True
def user_remove(name, user=None, password=None, host=None, port=None,
database='admin'):
'''
Remove a Mongodb user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Removing user {0}'.format(name))
mdb = pymongo.database.Database(conn, database)
mdb.remove_user(name)
except pymongo.errors.PyMongoError as err:
log.error(
'Creating database {0} failed with error: {1}'.format(
name, str(err)
)
)
return str(err)
return True
def user_roles_exists(name, roles, database, user=None, password=None, host=None,
port=None):
'''
Checks if a user of a Mongodb database has specified roles
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
'''
try:
roles = _to_dict(roles)
except Exception:
return 'Roles provided in wrong format'
users = user_list(user, password, host, port, database)
if isinstance(users, string_types):
return 'Failed to connect to mongo database'
for user in users:
if name == dict(user).get('user'):
for role in roles:
# if the role was provided in the shortened form, we convert it to a long form
if not isinstance(role, dict):
role = {'role': role, 'db': database}
if role not in dict(user).get('roles', []):
return False
return True
return False
def user_grant_roles(name, roles, database, user=None, password=None, host=None,
port=None):
'''
Grant one or many roles to a Mongodb user
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
roles = _to_dict(roles)
except Exception:
return 'Roles provided in wrong format'
try:
log.info('Granting roles {0} to user {1}'.format(roles, name))
mdb = pymongo.database.Database(conn, database)
mdb.eval("db.grantRolesToUser('{0}', {1})".format(name, roles))
except pymongo.errors.PyMongoError as err:
log.error(
'Granting roles {0} to user {1} failed with error: {2}'.format(
roles, name, str(err)
)
)
return str(err)
return True
def user_revoke_roles(name, roles, database, user=None, password=None, host=None,
port=None):
'''
Revoke one or many roles to a Mongodb user
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
roles = _to_dict(roles)
except Exception:
return 'Roles provided in wrong format'
try:
log.info('Revoking roles {0} from user {1}'.format(roles, name))
mdb = pymongo.database.Database(conn, database)
mdb.eval("db.revokeRolesFromUser('{0}', {1})".format(name, roles))
except pymongo.errors.PyMongoError as err:
log.error(
'Revoking roles {0} from user {1} failed with error: {2}'.format(
roles, name, str(err)
)
)
return str(err)
return True
def insert(objects, collection, user=None, password=None,
host=None, port=None, database='admin'):
"""
Insert an object or list of objects into a collection
CLI Example:
.. code-block:: bash
salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database>
"""
conn = _connect(user, password, host, port, database)
if not conn:
return "Failed to connect to mongo database"
try:
objects = _to_dict(objects)
except Exception as err:
return err
try:
log.info("Inserting %r into %s.%s", objects, database, collection)
mdb = pymongo.database.Database(conn, database)
col = getattr(mdb, collection)
ids = col.insert(objects)
return ids
except pymongo.errors.PyMongoError as err:
log.error("Inserting objects %r failed with error %s", objects, err)
return err
def find(collection, query=None, user=None, password=None,
host=None, port=None, database='admin'):
conn = _connect(user, password, host, port, database)
if not conn:
return 'Failed to connect to mongo database'
try:
query = _to_dict(query)
except Exception as err:
return err
try:
log.info("Searching for %r in %s", query, collection)
mdb = pymongo.database.Database(conn, database)
col = getattr(mdb, collection)
ret = col.find(query)
return list(ret)
except pymongo.errors.PyMongoError as err:
log.error("Removing objects failed with error: %s", err)
return err
def remove(collection, query=None, user=None, password=None,
host=None, port=None, database='admin', w=1):
"""
Remove an object or list of objects into a collection
CLI Example:
.. code-block:: bash
salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database>
"""
conn = _connect(user, password, host, port, database)
if not conn:
return 'Failed to connect to mongo database'
try:
query = _to_dict(query)
except Exception as err:
return err.message
try:
log.info("Removing %r from %s", query, collection)
mdb = pymongo.database.Database(conn, database)
col = getattr(mdb, collection)
ret = col.remove(query, w=w)
return "{0} objects removed".format(ret['n'])
except pymongo.errors.PyMongoError as err:
log.error("Removing objects failed with error: %s", err.message)
return err.message
| |
from .game_data import get_db_handle
from battleground.utils import bg_trueskill
import bson
def get_agents(owner=None,
game_type=None,
agent_id=None,
has_file=False,
fields=None,
db_handle=None):
"""
get agent data for conditions:
owner == owner
game_type == game_type
has_file: if True, only return records that have code_string
fields:
only return these field from the database
db_handle: used for testing
"""
if db_handle is None:
db_handle = get_db_handle("agents")
collection = db_handle.agents
query = {}
if agent_id is not None:
if not isinstance(agent_id, bson.ObjectId):
agent_id = bson.ObjectId(str(agent_id))
query['_id'] = agent_id
else:
if owner is not None:
query['owner'] = owner
if game_type is not None:
query['game_type'] = game_type
if has_file:
query['code'] = {'$exists': True, '$ne': 'null'}
if fields is None:
projection = {'_id': True, 'game_type': True, 'owner': True, 'name': True}
else:
projection = {field: True for field in fields}
result = collection.find(query, projection=projection)
return list(result)
def insert_new_agent(owner, name, game_type, db_handle):
collection = db_handle.agents
doc = {"owner": owner, "name": name, "game_type": game_type}
agent_id = collection.insert_one(doc).inserted_id
return agent_id
def get_owners(db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
owners = set()
collection = db_handle.agents
for document in collection.find():
owners.add(document["owner"])
return owners
def get_agent_id(owner, name, game_type, db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
if "agents" not in db_handle.list_collection_names():
agent_id = insert_new_agent(owner, name, game_type, db_handle)
else:
collection = db_handle.agents
result = list(collection.find({"owner": owner,
"name": name,
"game_type": game_type}))
if result:
agent_id = result[0]["_id"]
else:
agent_id = insert_new_agent(owner, name, game_type, db_handle)
return agent_id
def save_agent_code(owner, name, game_type, code, db_handle=None):
agent_id = get_agent_id(owner, name, game_type, db_handle=db_handle)
save_agent_data(agent_id, data=code, key="code", db_handle=db_handle)
return agent_id
def load_agent_code(owner, name, game_type, db_handle=None):
agent_id = get_agent_id(owner, name, game_type, db_handle=db_handle)
code = load_agent_data(agent_id, key="code", db_handle=db_handle)
return code
def save_agent_data(agent_id, data, key, db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
if not isinstance(agent_id, bson.ObjectId):
agent_id = bson.ObjectId(str(agent_id))
collection = db_handle.agents
update_spec = {"$set": {key: data}}
data_id = collection.update_one({"_id": agent_id}, update_spec)
return data_id
def load_agent_data(agent_id, key, db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
if not isinstance(agent_id, bson.ObjectId):
agent_id = bson.ObjectId(str(agent_id))
collection = db_handle.agents
doc = collection.find_one(agent_id)
if doc is not None:
if key in doc:
return doc[key]
return None
def save_game_result(agent_ids,
game_id,
game_type,
scores,
time,
db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
collection_a = db_handle.agents
collection_r = db_handle.results
# save results and get agents
agents = []
for index, agent_id in enumerate(agent_ids):
# save to results table (used to search results by player/agent)
score = scores[index]
win = max(scores) == score and min(scores) != score
result = {
'agent_id': str(agent_id),
'game_id': str(game_id),
'game_type': game_type,
'score': score,
'win': win,
'time': time
}
collection_r.insert_one(result)
# get agents
if not isinstance(agent_id, bson.ObjectId):
agent_id = bson.ObjectId(str(agent_id))
agent_db_entry = list(collection_a.find({"_id": agent_id}))
if agent_db_entry:
agent = agent_db_entry[0]
else:
raise Exception("agent not found: {}".format(agent_id))
agents.append(agent)
agents = update_ratings(agents, scores)
# save agents
for agent in agents:
collection_a.update_one({'_id': agent['_id']}, {'$set': agent})
def update_ratings(agents, scores):
"""
A ranking system based on TrueSkill(TM)
:param agents: list of DB agents
:param scores: list of scores
:return: updated list of DB agents
"""
# get ratings or initialize new ones in a free-for-all
ratings = []
for agent in agents:
if "results" in agents:
ratings.append((bg_trueskill.Rating(**agent["results"]["rating"]),))
else:
ratings.append((bg_trueskill.Rating(),))
# lower rank is better
ranks = [(0,) if score else (1,) for score in scores]
new_ratings = bg_trueskill.rate(ratings, ranks=ranks, scores=scores)
for index, agent in enumerate(agents):
score = scores[index]
win = max(scores) == score and min(scores) != score
rank = bg_trueskill.expose(new_ratings[index][0])
rating = {"mu": new_ratings[index][0].mu,
"sigma": new_ratings[index][0].sigma}
if "results" in agent:
num_games = agent["results"]["num_games"]
avg_score = agent["results"]["avg_score"]
agent["results"]["num_games"] += 1
agent["results"]["avg_score"] = (avg_score * num_games + score) / (num_games + 1)
agent["results"]["rank"] = rank
agent["results"]["rating"] = rating
if win:
agent["results"]["num_wins"] += 1
else:
agent["results"] = {}
agent["results"]["num_games"] = 1
agent["results"]["avg_score"] = score
agent["results"]["rank"] = rank
agent["results"]["rating"] = rating
agent["results"]["num_wins"] = 1 if win else 0
return agents
def load_agent_results(agent_id, limit=10, db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
collection = db_handle.results
result = collection.find({"agent_id": str(agent_id)})
result = result.sort("time", -1).limit(limit)
return list(result)
def load_game_results(game_type, db_handle=None):
if db_handle is None:
db_handle = get_db_handle("agents")
collection = db_handle.agents
result = list(collection.find({"game_type": game_type}))
stats = []
for agent in result:
if "results" in agent:
# win_rate = agent["results"]["num_wins"] / agent["results"]["num_games"]
rank = agent["results"]["rank"]
stats.append((str(agent["_id"]), agent["owner"], agent["name"], rank))
sorted_stats = sorted(stats, key=lambda x: x[-1], reverse=True)
return sorted_stats
| |
import pytz
import json
from unicodedata import normalize
from distutils.version import StrictVersion
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException
from api.base.serializers import is_anonymized
from api.base.utils import absolute_reverse, get_user_auth, is_truthy
from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import (
NodeSerializer,
NodeStorageProviderSerializer,
NodeLicenseRelationshipField,
NodeLinksSerializer,
update_institutions,
NodeLicenseSerializer,
NodeContributorsSerializer,
RegistrationProviderRelationshipField,
get_license_details,
)
from api.base.serializers import (
IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField,
HideIfWithdrawalOrWikiDisabled,
)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError, NodeStateError
from osf.models import Node, AbstractNode
from osf.utils.registrations import strip_registered_meta_comments
from osf.utils.workflows import ApprovalStates
from framework.sentry import log_exception
class RegistrationSerializer(NodeSerializer):
admin_only_editable_fields = [
'custom_citation',
'is_pending_retraction',
'is_public',
'withdrawal_justification',
]
# Remember to add new RegistrationSerializer fields to this list
# if you don't need them to be anonymized
non_anonymized_fields = NodeSerializer.non_anonymized_fields + [
'archiving',
'article_doi',
'date_registered',
'date_withdrawn',
'embargo_end_date',
'embargoed',
'latest_response',
'original_response',
'pending_embargo_approval',
'pending_embargo_termination_approval',
'pending_registration_approval',
'pending_withdrawal',
'provider',
'provider_specific_metadata',
'registered_by',
'registered_from',
'registered_meta',
'registration_responses',
'registration_schema',
'registration_supplement',
'schema_responses',
'withdrawal_justification',
'withdrawn',
]
# Union filterable fields unique to the RegistrationSerializer with
# filterable fields from the NodeSerializer
filterable_fields = NodeSerializer.filterable_fields ^ frozenset([
'revision_state',
])
ia_url = ser.URLField(read_only=True)
reviews_state = ser.CharField(source='moderation_state', read_only=True)
title = ser.CharField(read_only=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string)
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license'))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
article_doi = ser.CharField(required=False, allow_null=True)
public = HideIfWithdrawal(ser.BooleanField(
source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes',
))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this node.',
))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.',
))
pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo_termination',
help_text='The associated Embargo early termination is awaiting approval by project admins',
))
embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(
source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.',
))
archiving = HideIfWithdrawal(ser.BooleanField(read_only=True))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawn = ser.BooleanField(
source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.',
)
has_project = ser.SerializerMethodField()
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False))
withdrawal_justification = ser.CharField(read_only=True)
template_from = HideIfWithdrawal(ser.CharField(
read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.',
))
registration_supplement = ser.SerializerMethodField()
# Will be deprecated in favor of registration_responses
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registration_responses = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'},
))
registered_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'},
)
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={
'unread': 'get_unread_comments_count',
'count': 'get_total_comments_count',
},
filter={'target': '<_id>'},
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'},
)
bibliographic_contributors = RelationshipField(
related_view='registrations:registration-bibliographic-contributors',
related_view_kwargs={'node_id': '<_id>'},
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.',
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-storage-providers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_files_count'},
))
wikis = HideIfWithdrawalOrWikiDisabled(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_wiki_page_count'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'},
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'},
))
license = HideIfWithdrawal(NodeLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_forks_count'},
))
groups = HideIfRegistration(RelationshipField(
related_view='nodes:node-groups',
related_view_kwargs={'node_id': '<_id>'},
))
node_links = ShowIfVersion(
HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.',
)), min_version='2.0', max_version='2.0',
)
linked_by_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_nodes_count'},
))
linked_by_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_registrations_count'},
))
parent = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node',
)
root = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'},
)
region = HideIfWithdrawal(RelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': '<osfstorage_region._id>'},
read_only=True,
))
affiliated_institutions = RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'},
self_view='registrations:registration-relationships-institutions',
self_view_kwargs={'node_id': '<_id>'},
read_only=False,
many=True,
required=False,
)
registration_schema = RelationshipField(
related_view='schemas:registration-schema-detail',
related_view_kwargs={'schema_id': '<registered_schema_id>'},
)
settings = HideIfRegistration(RelationshipField(
related_view='nodes:node-settings',
related_view_kwargs={'node_id': '<_id>'},
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'},
)))
identifiers = RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'},
)
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'},
))
provider = RegistrationProviderRelationshipField(
related_view='providers:registration-providers:registration-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=True,
)
review_actions = RelationshipField(
related_view='registrations:registration-actions-list',
related_view_kwargs={'node_id': '<_id>'},
)
requests = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-requests-list',
related_view_kwargs={'node_id': '<_id>'},
))
provider_specific_metadata = ser.JSONField(required=False)
schema_responses = HideIfWithdrawal(RelationshipField(
related_view='registrations:schema-responses-list',
related_view_kwargs={'node_id': '<_id>'},
))
original_response = HideIfWithdrawal(RelationshipField(
related_view='schema_responses:schema-responses-detail',
related_view_kwargs={'schema_response_id': 'get_original_response_id'},
))
latest_response = HideIfWithdrawal(RelationshipField(
related_view='schema_responses:schema-responses-detail',
related_view_kwargs={'schema_response_id': 'get_latest_response_id'},
))
revision_state = HideIfWithdrawal(ser.CharField(read_only=True, required=False))
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-subjects'
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-relationships-subjects'
links = LinksField({'html': 'get_absolute_html_url'})
def get_has_project(self, obj):
return obj.has_project
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = self.anonymize_registered_meta(obj)
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_registration_responses(self, obj):
latest_approved_response = obj.root.schema_responses.filter(
reviews_state=ApprovalStates.APPROVED.db_name,
).first()
if latest_approved_response is not None:
return self.anonymize_fields(obj, latest_approved_response.all_responses)
if obj.registration_responses:
return self.anonymize_registration_responses(obj)
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def get_view_only_links_count(self, obj):
return obj.private_links.filter(is_deleted=False).count()
def get_total_comments_count(self, obj):
return obj.comment_set.filter(page='node', is_deleted=False).count()
def get_files_count(self, obj):
return obj.files_count or 0
def get_original_response_id(self, obj):
original_response = obj.root.schema_responses.last()
if original_response:
return original_response._id
return None
def get_latest_response_id(self, obj):
latest_approved = obj.root.schema_responses.filter(
reviews_state=ApprovalStates.APPROVED.db_name,
).first()
if latest_approved:
return latest_approved._id
return None
def anonymize_registered_meta(self, obj):
"""
Looks at every question on every page of the schema, for any titles
that have a contributor-input block type. If present, deletes that question's response
from meta_values.
"""
cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0])
return self.anonymize_fields(obj, cleaned_registered_meta)
def anonymize_registration_responses(self, obj):
"""
For any questions that have a `contributor-input` block type, delete
that question's response from registration_responses.
We want to make sure author's names that need to be anonymized
aren't surfaced when viewed through an anonymous VOL
"""
return self.anonymize_fields(obj, obj.registration_responses)
def anonymize_fields(self, obj, data):
"""
Consolidates logic to anonymize fields with contributor information
on both registered_meta and registration_responses
"""
if is_anonymized(self.context['request']):
anonymous_registration_response_keys = obj.get_contributor_registration_response_keys()
for key in anonymous_registration_response_keys:
if key in data:
del data[key]
return data
def check_perms(self, registration, user, validated_data):
"""
While admin/write users can make both make modifications to registrations,
most fields are restricted to admin-only edits. You must be an admin
contributor on the registration; you cannot have gotten your admin
permissions through group membership.
Additionally, provider_specific_metadata fields are only editable by
provder admins/moderators, but those users are not allowed to edit
any other fields.
Add fields that need admin perms to admin_only_editable_fields
"""
is_admin = registration.is_admin_contributor(user)
can_edit = registration.can_edit(user=user)
is_moderator = False
if registration.provider:
is_moderator = user.has_perm('accept_submissions', registration.provider)
# Fail if non-moderator tries to edit provider_specific_metadata
if 'provider_specific_metadata' in validated_data and not is_moderator:
raise exceptions.PermissionDenied()
# Fail if non-contributor moderator tries to edit
# fields other than provider_specific_metdata
if any(field != 'provider_specific_metadata' for field in validated_data) and not can_edit:
raise exceptions.PermissionDenied()
# Fail if non-admin attempts to modify admin_only fields
admin_only_fields_present = any(
field in self.admin_only_editable_fields for field in validated_data
)
if admin_only_fields_present and not is_admin:
raise exceptions.PermissionDenied()
def update_registration_tags(self, registration, validated_data, auth):
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(str(err))
def retract_registration(self, registration, validated_data, user):
is_pending_retraction = validated_data.pop('is_pending_retraction', None)
withdrawal_justification = validated_data.pop('withdrawal_justification', None)
if withdrawal_justification and not is_pending_retraction:
raise exceptions.ValidationError(
'You cannot provide a withdrawal_justification without a concurrent withdrawal request.',
)
if is_truthy(is_pending_retraction):
if registration.is_pending_retraction:
raise exceptions.ValidationError('This registration is already pending withdrawal.')
try:
retraction = registration.retract_registration(user, withdrawal_justification, save=True)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
retraction.ask(registration.get_active_contributors_recursive(unique_users=True))
elif is_pending_retraction is not None:
raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.')
def update(self, registration, validated_data):
user = self.context['request'].user
auth = Auth(user)
self.check_perms(registration, user, validated_data)
validated_data.pop('_id', None)
if 'tags' in validated_data:
self.update_registration_tags(registration, validated_data, auth)
if 'custom_citation' in validated_data:
registration.update_custom_citation(validated_data.pop('custom_citation'), auth)
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(registration, validated_data)
validated_data['node_license'] = license_details
validated_data.pop('license_type', None)
validated_data.pop('license', None)
if 'affiliated_institutions' in validated_data:
institutions_list = validated_data.pop('affiliated_institutions')
new_institutions = [{'_id': institution} for institution in institutions_list]
update_institutions(registration, new_institutions, user)
registration.save()
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(registration, subjects, auth)
if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data:
self.retract_registration(registration, validated_data, user)
if 'is_public' in validated_data:
if validated_data.get('is_public') is False:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
if 'provider_specific_metadata' in validated_data:
try:
registration.update_provider_specific_metadata(
validated_data.pop('provider_specific_metadata'),
)
except ValueError as e:
raise exceptions.ValidationError(str(e))
try:
registration.update(validated_data, auth=auth)
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
return registration
class Meta:
type_ = 'registrations'
class RegistrationCreateSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields -
"""
def expect_cleaner_attributes(self, request):
return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION)
def __init__(self, *args, **kwargs):
super(RegistrationCreateSerializer, self).__init__(*args, **kwargs)
request = kwargs['context']['request']
# required fields defined here for the different versions
if self.expect_cleaner_attributes(request):
self.fields['draft_registration_id'] = ser.CharField(write_only=True)
else:
self.fields['draft_registration'] = ser.CharField(write_only=True)
# For newer versions
embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None)
included_node_ids = ser.ListField(write_only=True, required=False)
# For older versions
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
children = ser.ListField(write_only=True, required=False)
registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo'])
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
always_embed=True,
required=False,
)
def get_registration_choice_by_version(self, validated_data):
"""
Old API versions should pass in "immediate" or "embargo" under `registration_choice`.
New API versions should pass in an "embargo_end_date" if it should be embargoed, else it will be None
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('registration_choice'):
raise JSONAPIException(
source={'pointer': '/data/attributes/registration_choice'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate'
return validated_data.get('registration_choice', 'immediate')
def get_embargo_end_date_by_version(self, validated_data):
"""
Old API versions should pass in "lift_embargo".
New API versions should pass in "embargo_end_date"
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('lift_embargo'):
raise JSONAPIException(
source={'pointer': '/data/attributes/lift_embargo'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return validated_data.get('embargo_end_date', None)
return validated_data.get('lift_embargo')
def get_children_by_version(self, validated_data):
"""
Old API versions should pass in 'children'
New API versions should pass in 'included_node_ids'.
"""
if self.expect_cleaner_attributes(self.context['request']):
return validated_data.get('included_node_ids', [])
return validated_data.get('children', [])
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft', None)
registration_choice = self.get_registration_choice_by_version(validated_data)
embargo_lifted = self.get_embargo_end_date_by_version(validated_data)
children = self.get_children_by_version(validated_data)
if children:
# First check that all children are valid
child_nodes = Node.objects.filter(guids___id__in=children)
if child_nodes.count() != len(children):
raise exceptions.ValidationError('Some child nodes could not be found.')
# Second check that metadata doesn't have files that are not in the child nodes being registered.
registering = children + [draft.branched_from._id]
orphan_files = self._find_orphan_files(registering, draft)
if orphan_files:
orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files]
raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. '
'The following file(s) are attached, but are not part of a component being'
' registered: {}'.format(', '.join(orphan_files_names)))
try:
# Still validating metadata, but whether `registration_responses` or `registration_metadata` were populated
# on the draft, the other field was built and populated as well. Both should exist.
draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True)
except ValidationValueError:
log_exception() # Probably indicates a bug on our end, so log to sentry
# TODO: Raise an error once our JSON schemas are updated
try:
registration = draft.register(auth, save=True, child_ids=children)
except NodeStateError as err:
raise exceptions.ValidationError(err)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def _find_orphan_files(self, registering, draft):
from website.archiver.utils import find_selected_files
files = find_selected_files(draft.registration_schema, draft.registration_metadata)
orphan_files = []
for key, value in files.items():
if 'extra' in value:
for file_metadata in value['extra']:
if not self._is_attached_file_valid(file_metadata, registering):
orphan_files.append(file_metadata)
return orphan_files
def _is_attached_file_valid(self, file_metadata, registering):
"""
Validation of file information on registration_metadata. Theoretically, the file information
on registration_responses does not have to be valid, so we enforce their accuracy here,
to ensure file links load properly.
Verifying that nodeId in the file_metadata is one of the files we're registering. Verify
that selectedFileName is the name of a file on the node. Verify that the sha256 matches
a version on that file.
:param file_metadata - under "registration_metadata"
:param registering - node ids you are registering
:return boolean
"""
node_id = file_metadata.get('nodeId')
if node_id not in registering:
return False
node = AbstractNode.load(node_id)
if not node:
# node in registration_metadata doesn't exist
return False
specified_sha = file_metadata.get('sha256', '')
file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \
node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first()
if not file:
# file with this name does not exist on the node
return False
match = False
for version in file.versions.all():
if specified_sha == version.metadata.get('sha256'):
match = True
if not match:
# Specified sha256 does not match a version on the specified file
return False
return True
class RegistrationDetailSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer make _id required and other fields writeable
"""
id = IDField(source='_id', required=True)
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', required=False,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawal_justification = ser.CharField(required=False)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
)
comments = FileRelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<target._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'},
)
node = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<target._id>'},
help_text='The registration that this file belongs to',
)
class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer):
"""
Overrides NodeStorageProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True,
)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon_lib import exceptions
from horizon_lib.workflows import views
from openstack_horizon import api
from openstack_horizon.dashboards.identity.projects import workflows
from openstack_horizon.test import helpers as test
from openstack_horizon import usage
from openstack_horizon.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='1')
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# add role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1')
# Group assignments
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group - try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
for role in roles:
api.keystone.remove_group_role(IsA(http.HttpRequest),
role=role.id,
group='1',
project=self.tenant.id)
# member group 1 - has role 1, will remove it
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='1',
group='2',
project=self.tenant.id)
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='2',
project=self.tenant.id)
# member group 3 - has role 2
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
# add role 1
api.keystone.add_group_role(IsA(http.HttpRequest),
role='1',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# Group assignment
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group 1- try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
# member group 1 - has no change
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[1],))
# member group 3 - has role 1
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Uptime (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
| |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Exceptions for the :mod:`dxpy` package.
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import sys, json, traceback, errno, socket
import requests
from requests.exceptions import HTTPError
import dxpy
EXPECTED_ERR_EXIT_STATUS = 3
class DXError(Exception):
'''Base class for exceptions in this package.'''
class DXAPIError(DXError):
'''
Exception for when the API server responds with a code that is not 200 (OK). See
https://wiki.dnanexus.com/API-Specification-v1.0.0/Protocols#Errors for complete documentation of API errors,
including those reflected by subclasses of this class.
'''
def __init__(self, content, code, timestamp="", req_id=""):
self.name = content["error"]["type"]
self.msg = content["error"]["message"]
if "details" in content["error"]:
self.details = content["error"]["details"]
else:
self.details = None
self.code = code
self.timestamp = timestamp
self.req_id = req_id
def error_message(self):
"Returns a one-line description of the error."
output = self.msg + ", code " + str(self.code)
output += ". Request Time={}, Request ID={}".format(self.timestamp, self.req_id)
if self.name != self.__class__.__name__:
output = self.name + ": " + output
return output
def __str__(self):
output = self.error_message()
if self.details:
output += "\nDetails: " + json.dumps(self.details, indent=4)
return output
class MalformedJSON(DXAPIError):
''' Raised when the input could not be parsed as JSON. '''
class InvalidAuthentication(DXAPIError):
''' Raised when the provided OAuth2 token is invalid. '''
class PermissionDenied(DXAPIError):
''' Raised when the supplied credentials have insufficient permissions to perform this action. '''
class SpendingLimitExceeded(DXAPIError):
''' Raised when the spending limit has been reached for the account that would be billed for this action. '''
class ResourceNotFound(DXAPIError):
''' Raised when a specified entity or resource could not be found. '''
class InvalidInput(DXAPIError):
''' Raised when the input is syntactically correct (JSON), but semantically incorrect (for example, a JSON array
is provided where a hash was required; or a required parameter was missing, etc.). '''
class InvalidState(DXAPIError):
''' Raised when the operation is not allowed at this object state. '''
class InvalidType(DXAPIError):
''' Raised when an object specified in the request is of invalid type. '''
class RateLimitConditional(DXAPIError):
''' Raised when the rate of invalid requests is too high. '''
class InternalError(DXAPIError):
''' Raised when the server encountered an internal error. '''
class ServiceUnavailable(DXAPIError):
''' Raised when an API service was temporarily unavailable. '''
class DXFileError(DXError):
'''Exception for :class:`dxpy.bindings.dxfile.DXFile`.'''
class DXIncompleteReadsError(DXError):
'''Exception for :class:`dxpy.bindings.dxfile.DXFile` when returned read data is shorter than requested'''
class DXPartLengthMismatchError(DXFileError):
'''Exception raised by :class:`dxpy.bindings.dxfile.DXFile` on part length mismatch.'''
class DXChecksumMismatchError(DXFileError):
'''Exception raised by :class:`dxpy.bindings.dxfile.DXFile` on checksum mismatch.'''
class DXGTableError(DXError):
'''Exception for :class:`dxpy.bindings.dxgtable.DXGTable`.'''
class DXSearchError(DXError):
'''Exception for :mod:`dxpy.bindings.search` methods.'''
class DXAppletError(DXError):
'''Exception for :class:`dxpy.bindings.dxapplet.DXApplet`.'''
class DXJobFailureError(DXError):
'''Exception produced by :class:`dxpy.bindings.dxjob.DXJob` when a job fails.'''
class ProgramError(DXError):
'''Deprecated. Use :class:`AppError` instead.'''
class AppError(ProgramError):
'''
Base class for fatal exceptions to be raised while using :mod:`dxpy` inside
DNAnexus execution containers.
This exception is thrown for user errors, and the error message is
presented to the user. Throwing this exception will cause the Python
execution template to write exception information into the file
*job_error.json* in the current working directory, allowing reporting of
the error state through the DNAnexus API.
'''
class AppInternalError(DXError):
'''
Base class for fatal exceptions to be raised while using :mod:`dxpy` inside
DNAnexus execution containers.
This exception is intended for internal App errors, whose message goes to
the App developer. Throwing this exception will cause the Python execution
template to write exception information into the file ``job_error.json`` in
the current working directory, allowing reporting of the error state
through the DNAnexus API.
'''
class DXCLIError(DXError):
'''
Exception class for generic errors in the command-line client
'''
class ContentLengthError(HTTPError):
'''
Raised when actual content length received from the server does not
match the "Content-Length" header
'''
class BadJSONInReply(ValueError):
'''
Raised when the server returned invalid JSON in the response body. Possible reasons
for this are the network connection breaking, or overload on the server.
'''
class UrllibInternalError(AttributeError):
'''
Exception class for AttributeError from urllib3
'''
def format_exception(e):
"""Returns a string containing the type and text of the exception.
"""
from .utils.printing import fill
return '\n'.join(fill(line) for line in traceback.format_exception_only(type(e), e))
def exit_with_exc_info(code=1, message='', print_tb=False, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised
'''
exc_type, exc_value = (exception.__class__, exception) \
if exception is not None else sys.exc_info()[:2]
if exc_type is not None:
if print_tb:
traceback.print_exc()
elif isinstance(exc_value, KeyboardInterrupt):
sys.stderr.write('^C\n')
else:
for line in traceback.format_exception_only(exc_type, exc_value):
sys.stderr.write(line)
sys.stderr.write(message)
if message != '' and not message.endswith('\n'):
sys.stderr.write('\n')
sys.exit(code)
network_exceptions = (requests.packages.urllib3.exceptions.ProtocolError,
requests.packages.urllib3.exceptions.DecodeError,
requests.packages.urllib3.exceptions.ConnectTimeoutError,
requests.packages.urllib3.exceptions.ReadTimeoutError,
requests.packages.urllib3.connectionpool.HTTPException,
HTTPError,
socket.error)
default_expected_exceptions = network_exceptions + (DXAPIError,
DXCLIError,
KeyboardInterrupt)
def err_exit(message='', code=None, expected_exceptions=default_expected_exceptions, arg_parser=None,
ignore_sigpipe=True, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
Uses **expected_exceptions** to set the error code decide whether to
suppress the error traceback.
:param message: Message to be printed after the exception information.
:type message: string
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param expected_exceptions: Exceptions for which to exit with error code 3 (expected error condition) and suppress the stack trace (unless the _DX_DEBUG environment variable is set).
:type expected_exceptions: iterable
:param arg_parser: argparse.ArgumentParser object used in the program (optional)
:param ignore_sigpipe: Whether to exit silently with code 3 when IOError with code EPIPE is raised. Default true.
:type ignore_sigpipe: boolean
:param exception: an exception to use in place of the last exception raised
'''
if arg_parser is not None:
message = arg_parser.prog + ": " + message
exc = exception if exception is not None else sys.exc_info()[1]
if isinstance(exc, SystemExit):
raise
elif isinstance(exc, expected_exceptions):
exit_with_exc_info(EXPECTED_ERR_EXIT_STATUS, message, print_tb=dxpy._DEBUG > 0, exception=exception)
elif ignore_sigpipe and isinstance(exc, IOError) and getattr(exc, 'errno', None) == errno.EPIPE:
if dxpy._DEBUG > 0:
print("Broken pipe", file=sys.stderr)
sys.exit(3)
else:
if code is None:
code = 1
exit_with_exc_info(code, message, print_tb=True, exception=exception)
| |
import asyncio
from base import Event
class TerminalEmulator(object):
def __init__(self):
super(TerminalEmulator, self).__init__()
self._logger = None
self.revision = 0
self._width = 0
self._height = 0
self.lines = []
self.cursorX = 0
self.cursorY = 0
self.coroutine = None
self.peekBuffer = None
self.resize(80, 24)
# TerminalEmulator
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, logger):
self._logger = logger
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@width.setter
def width(self, width):
self.resize(width, self.height)
@height.setter
def height(self, height):
self.resize(self.width, height)
def clear(self):
for y in range(0, self.height):
self.lines[y] = [" "] * self.width
def reset(self):
self.clear()
self.cursorX = 0
self.cursorY = 0
def resize(self, w, h):
if self._width != w:
self.lines = [line[0:w] for line in self.lines]
for y in range(0, self._height):
line = self.lines[y]
while len(line) < w:
line.append(" ")
self._width = w
if self._height != h:
self.lines = self.lines[0:h]
while h > len(self.lines):
self.lines.append([" "] * self._width)
self._height = h
self.clampCursor()
@property
def buffer(self):
return "\n".join([self.bufferLine(y) for y in range(0, self.height)])
def bufferLineRange(self, startLine, endLine):
return "\n".join([self.bufferLine(y) for y in range(startLine, endLine)])
def bufferLine(self, line):
if line != self.cursorY: return "".join(self.lines[line]).rstrip()
return "".join(self.lines[line][0:self.cursorX]) + "\u200B\u0332" + "".join(self.lines[line][(self.cursorX):])
def write(self, bytes):
if self.coroutine is None:
self.coroutine = self.processInput()
next(self.coroutine)
for b in bytes:
self.coroutine.send(b)
# Internal
def clampCursor(self):
self.cursorX = max(0, min(self.cursorX, self.width - 1))
self.cursorY = max(0, min(self.cursorY, self.height - 1))
def setCursorPos(self, x, y):
self.cursorX = x
self.cursorY = y
self.clampCursor()
@asyncio.coroutine
def processInput(self):
while True:
c = yield from self.readCharacter()
if c == "\x08":
if self.cursorX == 0:
if self.cursorY > 0:
self.setCursorPos(self.width - 1, self.cursorY - 1)
else:
pass
else:
self.setCursorPos(self.cursorX - 1, self.cursorY)
elif c == "\r":
self.processCarriageReturn()
elif c == "\n":
self.processLineFeed()
elif c == "\x1b":
c = yield from self.peekCharacter()
if c == "[":
self.advance()
c = yield from self.peekCharacter()
if c == "?": self.advance()
c = yield from self.peekCharacter()
if c == "A":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY - 1)
elif c == "B":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY + 1)
elif c == "C":
self.advance()
self.setCursorPos(self.cursorX + 1, self.cursorY)
elif c == "D":
self.advance()
self.setCursorPos(self.cursorX - 1, self.cursorY)
elif c == "H":
self.advance()
self.setCursorPos(0, 0)
elif c == "J":
self.advance()
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
self.clearLineRange(self.cursorY + 1, self.height)
elif c == "K":
self.advance()
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
else:
nString = yield from self.readNumber()
mString = ""
semicolon = False
if (yield from self.acceptCharacter(";")):
semicolon = True
mString = yield from self.readNumber()
n = int(nString) if len(nString) > 0 else 1
m = int(mString) if len(mString) > 0 else 1
c = yield from self.peekCharacter()
if c == "A":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY - n)
elif c == "B":
self.advance()
self.setCursorPos(self.cursorX, self.cursorY + n)
elif c == "C":
self.advance()
self.setCursorPos(self.cursorX + n, self.cursorY)
elif c == "D":
self.advance()
self.setCursorPos(self.cursorX - n, self.cursorY)
elif c == "G":
self.advance()
self.setCursorPos(n - 1, self.cursorY)
elif c == "H":
self.advance()
self.setCursorPos(m - 1, n - 1)
elif c == "J":
self.advance()
if n == 0 or n == 1:
rejectedString = "\\x1b[" + nString
if semicolon:
rejectedString += ";" + mString
rejectedString += "J"
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log ("TerminalEmulator: Rejecting " + rejectedString)
elif n == 2:
self.reset()
elif c == "K":
self.advance()
if n == 0:
self.clearCharacterSpan(self.cursorY, self.cursorX, self.width)
elif n == 1:
self.clearCharacterSpan(self.cursorY, 0, self.cursorX)
elif n == 2:
self.clearLineRange(self, self.cursorY, self.cursorY + 1)
else:
raise RuntimeError()
elif c == "P":
self.advance()
for x in range(0, n):
del self.lines[self.cursorY][self.cursorX]
self.lines[self.cursorY].append(" ")
elif c == "d":
self.advance()
self.setCursorPos(self.cursorX, n - 1)
elif c == "h" or c == "l" or c == "m" or c == "r":
self.advance()
else:
rejectedString = "\\x1b[" + nString
if semicolon:
rejectedString += ";" + mString
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log("TerminalEmulator: Rejecting " + rejectedString + ("\\x%02x" % ord(c)) + " / " + c)
elif c == ">": self.advance()
elif c == "(": self.advance() # Set default font
elif c == ")": self.advance() # Set alternate font
elif c == "D": self.advance() # Scroll down one line
elif c == "M": self.advance() # Scroll up one line
else:
rejectedString = "\\x1b" + c
self.writeString(rejectedString)
if self.logger is not None:
self.logger.log("TerminalEmulator: Rejecting \\x1b" + ("\\x%02x" % ord(c)) + " / " + c)
else:
self.writeCharacter(c)
# Input
def advance(self):
if self.peekBuffer is None:
raise RuntimeError()
self.peekBuffer = None
@asyncio.coroutine
def acceptByte(self, uint8):
if uint8 == (yield from self.peekByte()):
self.advance()
return True
return False
@asyncio.coroutine
def acceptCharacter(self, c):
if c == (yield from self.peekCharacter()):
self.advance()
return True
return False
@asyncio.coroutine
def peekByte(self):
if self.peekBuffer == None:
self.peekBuffer = yield
return self.peekBuffer
@asyncio.coroutine
def peekCharacter(self):
return chr((yield from self.peekByte()))
@asyncio.coroutine
def readByte(self):
uint8 = yield from self.peekByte()
self.advance()
return uint8
@asyncio.coroutine
def readCharacter(self):
return chr((yield from self.readByte()))
@asyncio.coroutine
def readNumber(self):
number = ""
while True:
c = yield from self.peekCharacter()
if not c.isdigit(): break
self.peekBuffer = None
number += c
return number
# Output
def clearCharacterSpan(self, line, startColumn, endColumn):
for x in range(startColumn, endColumn):
self.lines[self.cursorY][x] = " "
def clearLineRange(self, startLine, endLine):
for y in range(startLine, endLine):
self.lines[y] = [" "] * self.width
def processCarriageReturn(self):
self.cursorX = 0
def processLineFeed(self):
self.cursorX = 0
self.cursorY += 1
if self.cursorY >= self.height:
del self.lines[0]
self.lines.append([" "] * self.width)
self.cursorY = self.cursorY - 1
def writeCharacter(self, c):
self.lines[self.cursorY][self.cursorX] = c
self.cursorX += 1
if self.cursorX >= self.width:
self.processLineFeed()
def writeString(self, s):
for c in s:
self.writeCharacter(c)
| |
import hashlib
import re
import time
import socket
import exceptions
from twisted.internet import task, defer
from twisted.python import log
class Event(object):
"""Tensor Event object
All sources pass these to the queue, which form a proxy object
to create protobuf Event objects
:param state: Some sort of string < 255 chars describing the state
:param service: The service name for this event
:param description: A description for the event, ie. "My house is on fire!"
:param metric: int or float metric for this event
:param ttl: TTL (time-to-live) for this event
:param tags: List of tag strings
:param hostname: Hostname for the event (defaults to system fqdn)
:param aggregation: Aggregation function
:param attributes: A dictionary of key/value attributes for this event
:param evtime: Event timestamp override
"""
def __init__(
self,
state,
service,
description,
metric,
ttl,
tags=None,
hostname=None,
aggregation=None,
evtime=None,
attributes=None,
type='riemann'):
self.state = state
self.service = service
self.description = description
self.metric = metric
self.ttl = ttl
self.tags = tags if tags is not None else []
self.attributes = attributes
self.aggregation = aggregation
self._type = type
if evtime:
self.time = evtime
else:
self.time = time.time()
if hostname:
self.hostname = hostname
else:
self.hostname = socket.gethostbyaddr(socket.gethostname())[0]
def id(self):
return self.hostname + '.' + self.service
def __repr__(self):
ser = ['%s=%s' % (k, repr(v)) for k,v in {
'hostname': self.hostname,
'state': self.state,
'service': self.service,
'metric': self.metric,
'ttl': self.ttl,
'tags': self.tags,
'aggregation': self.aggregation
}.items()]
return "<Event %s>" % (','.join(ser))
def copyWithMetric(self, m):
return Event(
self.state, self.service, self.description, m, self.ttl, self.tags,
self.hostname, self.aggregation
)
class Output(object):
"""Output parent class
Outputs can inherit this object which provides a construct
for a working output
:param config: Dictionary config for this queue (usually read from the
yaml configuration)
:param tensor: A TensorService object for interacting with the queue manager
"""
def __init__(self, config, tensor):
self.config = config
self.tensor = tensor
def createClient(self):
"""Deferred which sets up the output
"""
pass
def eventsReceived(self):
"""Receives a list of events and processes them
Arguments:
events -- list of `tensor.objects.Event`
"""
pass
def stop(self):
"""Called when the service shuts down
"""
pass
class Source(object):
"""Source parent class
Sources can inherit this object which provides a number of
utility methods.
:param config: Dictionary config for this queue (usually read from the
yaml configuration)
:param queueBack: A callback method to recieve a list of Event objects
:param tensor: A TensorService object for interacting with the queue manager
"""
sync = False
def __init__(self, config, queueBack, tensor):
self.config = config
self.t = task.LoopingCall(self.tick)
self.td = None
self.attributes = None
self.service = config['service']
self.inter = float(config['interval'])
self.ttl = float(config['ttl'])
if 'tags' in config:
self.tags = [tag.strip() for tag in config['tags'].split(',')]
else:
self.tags = []
attributes = config.get("attributes")
if isinstance(attributes, dict):
self.attributes = attributes
self.hostname = config.get('hostname')
if self.hostname is None:
self.hostname = socket.gethostbyaddr(socket.gethostname())[0]
self.tensor = tensor
self.queueBack = self._queueBack(queueBack)
self.running = False
def _queueBack(self, caller):
return lambda events: caller(self, events)
def startTimer(self):
"""Starts the timer for this source"""
self.td = self.t.start(self.inter)
def stopTimer(self):
"""Stops the timer for this source"""
self.td = None
self.t.stop()
@defer.inlineCallbacks
def _get(self):
event = yield defer.maybeDeferred(self.get)
if self.config.get('debug', False):
log.msg("[%s] Tick: %s" % (self.config['service'], event))
defer.returnValue(event)
@defer.inlineCallbacks
def tick(self):
"""Called for every timer tick. Calls self.get which can be a deferred
and passes that result back to the queueBack method
Returns a deferred"""
if self.sync:
if self.running:
defer.returnValue(None)
self.running = True
try:
event = yield self._get()
if event:
self.queueBack(event)
except Exception, e:
log.msg("[%s] Unhandled error: %s" % (self.service, e))
self.running = False
def createEvent(self, state, description, metric, prefix=None,
hostname=None, aggregation=None, evtime=None):
"""Creates an Event object from the Source configuration"""
if prefix:
service_name = self.service + "." + prefix
else:
service_name = self.service
return Event(state, service_name, description, metric, self.ttl,
hostname=hostname or self.hostname, aggregation=aggregation,
evtime=evtime, tags=self.tags, attributes=self.attributes
)
def createLog(self, type, data, evtime=None, hostname=None):
"""Creates an Event object from the Source configuration"""
return Event(None, type, data, 0, self.ttl,
hostname=hostname or self.hostname, evtime=evtime, tags=self.tags, type='log'
)
def get(self):
raise exceptions.NotImplementedError()
| |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import mock
import string
import time
from brick import exception
from brick.i18n import _
from brick.initiator import connector
from brick.initiator import host_driver
from brick.openstack.common import log as logging
from brick.openstack.common import loopingcall
from brick.openstack.common import processutils as putils
from brick import test
LOG = logging.getLogger(__name__)
class ConnectorTestCase(test.TestCase):
def setUp(self):
super(ConnectorTestCase, self).setUp()
self.cmds = []
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def test_connect_volume(self):
self.connector = connector.InitiatorConnector(None)
self.assertRaises(NotImplementedError,
self.connector.connect_volume, None)
def test_disconnect_volume(self):
self.connector = connector.InitiatorConnector(None)
self.assertRaises(NotImplementedError,
self.connector.disconnect_volume, None, None)
def test_factory(self):
obj = connector.InitiatorConnector.factory('iscsi', None)
self.assertEqual(obj.__class__.__name__, "ISCSIConnector")
obj = connector.InitiatorConnector.factory('fibre_channel', None)
self.assertEqual(obj.__class__.__name__, "FibreChannelConnector")
obj = connector.InitiatorConnector.factory('aoe', None)
self.assertEqual(obj.__class__.__name__, "AoEConnector")
obj = connector.InitiatorConnector.factory(
'nfs', None, nfs_mount_point_base='/mnt/test')
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory(
'glusterfs', None, glusterfs_mount_point_base='/mnt/test')
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory('local', None)
self.assertEqual(obj.__class__.__name__, "LocalConnector")
self.assertRaises(ValueError,
connector.InitiatorConnector.factory,
"bogus", None)
def test_check_valid_device_with_wrong_path(self):
self.connector = connector.InitiatorConnector(None)
self.connector._execute = \
lambda *args, **kwargs: ("", None)
self.assertFalse(self.connector.check_valid_device('/d0v'))
def test_check_valid_device(self):
self.connector = connector.InitiatorConnector(None)
self.connector._execute = \
lambda *args, **kwargs: ("", "")
self.assertTrue(self.connector.check_valid_device('/dev'))
def test_check_valid_device_with_cmd_error(self):
def raise_except(*args, **kwargs):
raise putils.ProcessExecutionError
self.connector = connector.InitiatorConnector(None)
self.connector._execute = mock.Mock()
self.connector._execute.side_effect = raise_except
self.assertFalse(self.connector.check_valid_device('/dev'))
class HostDriverTestCase(test.TestCase):
def setUp(self):
super(HostDriverTestCase, self).setUp()
isdir_mock = mock.Mock()
isdir_mock.return_value = True
os.path.isdir = isdir_mock
self.devlist = ['device1', 'device2']
listdir_mock = mock.Mock()
listdir_mock.return_value = self.devlist
os.listdir = listdir_mock
def test_host_driver(self):
expected = ['/dev/disk/by-path/' + dev for dev in self.devlist]
driver = host_driver.HostDriver()
actual = driver.get_all_block_devices()
self.assertEqual(expected, actual)
class ISCSIConnectorTestCase(ConnectorTestCase):
def setUp(self):
super(ISCSIConnectorTestCase, self).setUp()
self.connector = connector.ISCSIConnector(
None, execute=self.fake_execute, use_multipath=False)
get_name_mock = mock.Mock()
get_name_mock.return_value = "/dev/sdb"
self.connector._linuxscsi.get_name_from_path = get_name_mock
def iscsi_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
}
}
def test_get_initiator(self):
def initiator_no_file(*args, **kwargs):
raise putils.ProcessExecutionError('No file')
def initiator_get_text(*arg, **kwargs):
text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n'
'## If you remove this file, the iSCSI daemon '
'will not start.\n'
'## If you change the InitiatorName, existing '
'access control lists\n'
'## may reject this initiator. The InitiatorName must '
'be unique\n'
'## for each iSCSI initiator. Do NOT duplicate iSCSI '
'InitiatorNames.\n'
'InitiatorName=iqn.1234-56.foo.bar:01:23456789abc')
return text, None
self.connector._execute = initiator_no_file
initiator = self.connector.get_initiator()
self.assertIsNone(initiator)
self.connector._execute = initiator_get_text
initiator = self.connector.get_initiator()
self.assertEqual(initiator, 'iqn.1234-56.foo.bar:01:23456789abc')
@test.testtools.skipUnless(os.path.exists('/dev/disk/by-path'),
'Test requires /dev/disk/by-path')
def test_connect_volume(self):
self.stubs.Set(os.path, 'exists', lambda x: True)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
device = self.connector.connect_volume(connection_info['data'])
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(device['type'], 'block')
self.assertEqual(device['path'], dev_str)
self.connector.disconnect_volume(connection_info['data'], device)
expected_commands = [('iscsiadm -m node -T %s -p %s' %
(iqn, location)),
('iscsiadm -m session'),
('iscsiadm -m node -T %s -p %s --login' %
(iqn, location)),
('iscsiadm -m node -T %s -p %s --op update'
' -n node.startup -v automatic'
% (iqn, location)),
('iscsiadm -m node --rescan'),
('iscsiadm -m session --rescan'),
('blockdev --flushbufs /dev/sdb'),
('tee -a /sys/block/sdb/device/delete'),
('iscsiadm -m node -T %s -p %s --op update'
' -n node.startup -v manual' % (iqn, location)),
('iscsiadm -m node -T %s -p %s --logout' %
(iqn, location)),
('iscsiadm -m node -T %s -p %s --op delete' %
(iqn, location)), ]
LOG.debug("self.cmds = %s" % self.cmds)
LOG.debug("expected = %s" % expected_commands)
self.assertEqual(expected_commands, self.cmds)
def test_connect_volume_with_multipath(self):
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_properties = self.iscsi_connection(vol, location, iqn)
self.connector_with_multipath = \
connector.ISCSIConnector(None, use_multipath=True)
self.connector_with_multipath._run_iscsiadm_bare = \
lambda *args, **kwargs: "%s %s" % (location, iqn)
portals_mock = mock.Mock()
portals_mock.return_value = [[location, iqn]]
self.connector_with_multipath.\
_get_target_portals_from_iscsiadm_output = portals_mock
connect_to_mock = mock.Mock()
connect_to_mock.return_value = None
self.connector_with_multipath._connect_to_iscsi_portal = \
connect_to_mock
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
self.connector_with_multipath._rescan_iscsi = rescan_iscsi_mock
rescan_multipath_mock = mock.Mock()
rescan_multipath_mock.return_value = None
self.connector_with_multipath._rescan_multipath = \
rescan_multipath_mock
get_device_mock = mock.Mock()
get_device_mock.return_value = 'iqn.2010-10.org.openstack:%s' % name
self.connector_with_multipath._get_multipath_device_name = \
get_device_mock
exists_mock = mock.Mock()
exists_mock.return_value = True
os.path.exists = exists_mock
result = self.connector_with_multipath.connect_volume(
connection_properties['data'])
expected_result = {'path': 'iqn.2010-10.org.openstack:volume-00000001',
'type': 'block'}
self.assertEqual(result, expected_result)
def test_connect_volume_with_not_found_device(self):
exists_mock = mock.Mock()
exists_mock.return_value = False
os.path.exists = exists_mock
sleep_mock = mock.Mock()
sleep_mock.return_value = None
time.sleep = sleep_mock
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
self.assertRaises(exception.VolumeDeviceNotFound,
self.connector.connect_volume,
connection_info['data'])
def test_get_target_portals_from_iscsiadm_output(self):
connector = self.connector
test_output = '''10.15.84.19:3260 iqn.1992-08.com.netapp:sn.33615311
10.15.85.19:3260 iqn.1992-08.com.netapp:sn.33615311'''
res = connector._get_target_portals_from_iscsiadm_output(test_output)
ip_iqn1 = ['10.15.84.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
ip_iqn2 = ['10.15.85.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
expected = [ip_iqn1, ip_iqn2]
self.assertEqual(expected, res)
def test_get_multipath_device_name(self):
realpath = mock.Mock()
realpath.return_value = None
os.path.realpath = realpath
multipath_return_string = [('mpath2 (20017380006c00036)'
'dm-7 IBM,2810XIV')]
self.connector._run_multipath = \
lambda *args, **kwargs: multipath_return_string
expected = '/dev/mapper/mpath2'
self.assertEqual(expected,
self.connector.
_get_multipath_device_name('/dev/md-1'))
def test_get_iscsi_devices(self):
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
'com.netapp:node.netapp02-lun-0')]
walk_mock = lambda x: [(['.'], ['by-path'], paths)]
os.walk = walk_mock
self.assertEqual(self.connector._get_iscsi_devices(), paths)
def test_get_iscsi_devices_with_empty_dir(self):
walk_mock = mock.Mock()
walk_mock.return_value = []
os.walk = walk_mock
self.assertEqual(self.connector._get_iscsi_devices(), [])
def test_get_multipath_iqn(self):
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
'com.netapp:node.netapp02-lun-0')]
realpath = lambda x: '/dev/disk/by-path/%s' % paths[0]
os.path.realpath = realpath
get_iscsi_mock = mock.Mock()
get_iscsi_mock.return_value = paths
self.connector._get_iscsi_devices = get_iscsi_mock
get_multipath_device_mock = mock.Mock()
get_multipath_device_mock.return_value = paths[0]
self.connector._get_multipath_device_name = get_multipath_device_mock
self.assertEqual(self.connector._get_multipath_iqn(paths[0]),
'iqn.2013-01.ro.com.netapp:node.netapp02')
def test_disconnect_volume_multipath_iscsi(self):
result = []
def fake_disconnect_from_iscsi_portal(properties):
result.append(properties)
iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01'
iqn2 = 'iqn.2013-01.ro.com.netapp:node.netapp02'
iqns = [iqn1, iqn2]
portal = '10.0.0.1:3260'
dev = ('ip-%s-iscsi-%s-lun-0' % (portal, iqn1))
get_portals_mock = mock.Mock()
get_portals_mock.return_value = [[portal, iqn1]]
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
rescan_multipath = mock.Mock()
rescan_multipath.return_value = None
get_block_devices_mock = mock.Mock()
get_block_devices_mock.return_value = [dev, '/dev/mapper/md-1']
get_multipath_name_mock = mock.Mock()
get_multipath_name_mock.return_value = '/dev/mapper/md-3'
self.connector._get_multipath_iqn = lambda x: iqns.pop()
disconnect_mock = fake_disconnect_from_iscsi_portal
self.connector._disconnect_from_iscsi_portal = disconnect_mock
fake_property = {'target_portal': portal,
'target_iqn': iqn1}
self.connector._disconnect_volume_multipath_iscsi(fake_property,
'fake/multipath')
# Target in use by other mp devices, don't disconnect
self.assertEqual([], result)
def test_disconnect_volume_multipath_iscsi_without_other_mp_devices(self):
result = []
def fake_disconnect_from_iscsi_portal(properties):
result.append(properties)
portal = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
get_portals_mock = mock.Mock()
get_portals_mock.return_value = [[portal, iqn]]
self.connector._get_target_portals_from_iscsiadm_output = \
get_portals_mock
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
self.connector._rescan_iscsi = rescan_iscsi_mock
rescan_multipath_mock = mock.Mock()
rescan_multipath_mock.return_value = None
self.connector._rescan_multipath = rescan_multipath_mock
get_all_devices_mock = mock.Mock()
get_all_devices_mock.return_value = []
self.connector.driver.get_all_block_devices = get_all_devices_mock
self.connector._disconnect_from_iscsi_portal = \
fake_disconnect_from_iscsi_portal
fake_property = {'target_portal': portal,
'target_iqn': iqn}
self.connector._disconnect_volume_multipath_iscsi(fake_property,
'fake/multipath')
# Target not in use by other mp devices, disconnect
self.assertEqual([fake_property], result)
class FibreChannelConnectorTestCase(ConnectorTestCase):
def setUp(self):
super(FibreChannelConnectorTestCase, self).setUp()
self.connector = connector.FibreChannelConnector(
None, execute=self.fake_execute, use_multipath=False)
self.assertIsNotNone(self.connector)
self.assertIsNotNone(self.connector._linuxfc)
self.assertIsNotNone(self.connector._linuxscsi)
def fake_get_fc_hbas(self):
return [{'ClassDevice': 'host1',
'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
'/0000:05:00.2/host1/fc_host/host1',
'dev_loss_tmo': '30',
'fabric_name': '0x1000000533f55566',
'issue_lip': '<store method only>',
'max_npiv_vports': '255',
'maxframe_size': '2048 bytes',
'node_name': '0x200010604b019419',
'npiv_vports_inuse': '0',
'port_id': '0x680409',
'port_name': '0x100010604b019419',
'port_state': 'Online',
'port_type': 'NPort (fabric via point-to-point)',
'speed': '10 Gbit',
'supported_classes': 'Class 3',
'supported_speeds': '10 Gbit',
'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
'tgtid_bind_type': 'wwpn (World Wide Port Name)',
'uevent': None,
'vport_create': '<store method only>',
'vport_delete': '<store method only>'}]
def fake_get_fc_hbas_info(self):
hbas = self.fake_get_fc_hbas()
info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
'node_name': hbas[0]['node_name'].replace('0x', ''),
'host_device': hbas[0]['ClassDevice'],
'device_path': hbas[0]['ClassDevicePath']}]
return info
def fibrechan_connection(self, volume, location, wwn):
return {'driver_volume_type': 'fibrechan',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_wwn': wwn,
'target_lun': 1,
}}
def test_connect_volume(self):
self.connector._linuxfc.get_fc_hbas = self.fake_get_fc_hbas
self.connector._linuxfc.get_fc_hbas_info = \
self.fake_get_fc_hbas_info
exists_mock = mock.Mock()
exists_mock.return_value = True
os.path.exists = exists_mock
realpath_mock = mock.Mock()
realpath_mock.return_value = '/dev/sdb'
os.path.realpath = realpath_mock
multipath_devname = '/dev/md-1'
devices = {"device": multipath_devname,
"id": "1234567890",
"devices": [{'device': '/dev/sdb',
'address': '1:0:0:1',
'host': 1, 'channel': 0,
'id': 0, 'lun': 1}]}
find_device_mock = mock.Mock()
find_device_mock.return_value = devices
self.connector._linuxscsi.find_multipath_device = find_device_mock
remove_device_mock = mock.Mock()
remove_device_mock.return_value = None
self.connector._linuxscsi.remove_scsi_device = remove_device_mock
get_device_info_mock = mock.Mock()
get_device_info_mock.return_value = devices['devices'][0]
self.connector._linuxscsi.get_device_info = get_device_info_mock
location = '10.0.2.15:3260'
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
# Should work for string, unicode, and list
wwns = ['1234567890123456', unicode('1234567890123456'),
['1234567890123456', '1234567890123457']]
for wwn in wwns:
connection_info = self.fibrechan_connection(vol, location, wwn)
dev_info = self.connector.connect_volume(connection_info['data'])
exp_wwn = wwn[0] if isinstance(wwn, list) else wwn
dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' %
exp_wwn)
self.assertEqual(dev_info['type'], 'block')
self.assertEqual(dev_info['path'], dev_str)
self.connector.disconnect_volume(connection_info['data'], dev_info)
expected_commands = []
self.assertEqual(expected_commands, self.cmds)
# Should not work for anything other than string, unicode, and list
connection_info = self.fibrechan_connection(vol, location, 123)
self.assertRaises(exception.NoFibreChannelHostsFound,
self.connector.connect_volume,
connection_info['data'])
get_fc_hbas_mock = mock.Mock()
get_fc_hbas_mock.return_value = []
self.connector._linuxfc.get_fc_hbas = get_fc_hbas_mock
get_fc_hbas_info_mock = mock.Mock()
get_fc_hbas_info_mock.return_value = []
self.connector._linuxfc.get_fc_hbas_info = get_fc_hbas_info_mock
self.assertRaises(exception.NoFibreChannelHostsFound,
self.connector.connect_volume,
connection_info['data'])
class FakeFixedIntervalLoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._stop = False
def stop(self):
self._stop = True
def wait(self):
return self
def start(self, interval, initial_delay=None):
while not self._stop:
try:
self.f(*self.args, **self.kw)
except loopingcall.LoopingCallDone:
return self
except Exception:
LOG.exception(_('in fixed duration looping call'))
raise
class AoEConnectorTestCase(ConnectorTestCase):
"""Test cases for AoE initiator class."""
def setUp(self):
super(AoEConnectorTestCase, self).setUp()
self.connector = connector.AoEConnector('sudo')
self.connection_properties = {'target_shelf': 'fake_shelf',
'target_lun': 'fake_lun'}
loopingcall.FixedIntervalLoopingCall = FakeFixedIntervalLoopingCall
def _mock_path_exists(self, aoe_path, mock_values=None):
exists_mock = mock.Mock()
exists_mock.return_value = mock_values
os.path.exists = exists_mock
def test_connect_volume(self):
"""Ensure that if path exist aoe-revaliadte was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
self._mock_path_exists(aoe_path, [True, True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.connector.connect_volume(self.connection_properties)
def test_connect_volume_without_path(self):
"""Ensure that if path doesn't exist aoe-discovery was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
expected_info = {
'type': 'block',
'device': aoe_device,
'path': aoe_path,
}
self._mock_path_exists(aoe_path, [False, True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
volume_info = self.connector.connect_volume(
self.connection_properties)
self.assertDictMatch(volume_info, expected_info)
def test_connect_volume_could_not_discover_path(self):
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
exists_mock = mock.Mock()
exists_mock.return_value = False
os.path.exists = exists_mock
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.assertRaises(exception.VolumeDeviceNotFound,
self.connector.connect_volume,
self.connection_properties)
def test_disconnect_volume(self):
"""Ensure that if path exist aoe-revaliadte was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
self._mock_path_exists(aoe_path, [True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.connector.disconnect_volume(self.connection_properties, {})
class RemoteFsConnectorTestCase(ConnectorTestCase):
"""Test cases for Remote FS initiator class."""
TEST_DEV = '172.18.194.100:/var/nfs'
TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328'
def setUp(self):
super(RemoteFsConnectorTestCase, self).setUp()
self.connection_properties = {
'export': self.TEST_DEV,
'name': '9c592d52-ce47-4263-8c21-4ecf3c029cdb'}
self.connector = connector.RemoteFsConnector(
'nfs', root_helper='sudo', nfs_mount_point_base='/mnt/test',
nfs_mount_options='vers=3')
def test_connect_volume(self):
"""Test the basic connect volume case."""
client = self.connector._remotefsclient
client.mount = mock.Mock()
client.get_mount_point = mock.Mock()
client.get_mount_point.return_value = "ass"
self.connector.connect_volume(self.connection_properties)
def test_disconnect_volume(self):
"""Nothing should happen here -- make sure it doesn't blow up."""
self.connector.disconnect_volume(self.connection_properties, {})
class LocalConnectorTestCase(test.TestCase):
def setUp(self):
super(LocalConnectorTestCase, self).setUp()
self.connection_properties = {'name': 'foo',
'device_path': '/tmp/bar'}
def test_connect_volume(self):
self.connector = connector.LocalConnector(None)
cprops = self.connection_properties
dev_info = self.connector.connect_volume(cprops)
self.assertEqual(dev_info['type'], 'local')
self.assertEqual(dev_info['path'], cprops['device_path'])
def test_connect_volume_with_invalid_connection_data(self):
self.connector = connector.LocalConnector(None)
cprops = {}
self.assertRaises(ValueError,
self.connector.connect_volume, cprops)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GuestConfigurationAssignmentsOperations:
"""GuestConfigurationAssignmentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.guestconfig.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
guest_configuration_assignment_name: str,
resource_group_name: str,
vm_name: str,
parameters: "_models.GuestConfigurationAssignment",
**kwargs: Any
) -> "_models.GuestConfigurationAssignment":
"""Creates an association between a VM and guest configuration.
:param guest_configuration_assignment_name: Name of the guest configuration assignment.
:type guest_configuration_assignment_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the create or update guest configuration assignment.
:type parameters: ~azure.mgmt.guestconfig.models.GuestConfigurationAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.guestconfig.models.GuestConfigurationAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestConfigurationAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-25"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'guestConfigurationAssignmentName': self._serialize.url("guest_configuration_assignment_name", guest_configuration_assignment_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GuestConfigurationAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GuestConfigurationAssignment', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GuestConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{guestConfigurationAssignmentName}'} # type: ignore
async def get(
self,
resource_group_name: str,
guest_configuration_assignment_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.GuestConfigurationAssignment":
"""Get information about a guest configuration assignment.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param guest_configuration_assignment_name: The guest configuration assignment name.
:type guest_configuration_assignment_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.guestconfig.models.GuestConfigurationAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestConfigurationAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-25"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'guestConfigurationAssignmentName': self._serialize.url("guest_configuration_assignment_name", guest_configuration_assignment_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GuestConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{guestConfigurationAssignmentName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
guest_configuration_assignment_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""Delete a guest configuration assignment.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param guest_configuration_assignment_name: Name of the guest configuration assignment.
:type guest_configuration_assignment_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-25"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'guestConfigurationAssignmentName': self._serialize.url("guest_configuration_assignment_name", guest_configuration_assignment_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{guestConfigurationAssignmentName}'} # type: ignore
def list(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GuestConfigurationAssignmentList"]:
"""List all guest configuration assignments for a virtual machine.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GuestConfigurationAssignmentList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.guestconfig.models.GuestConfigurationAssignmentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestConfigurationAssignmentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-25"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GuestConfigurationAssignmentList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments'} # type: ignore
| |
import json
import uuid
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.template import RequestContext
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from coinbase4py.coinbasev1 import CoinbaseV1
from webapp.models import CoinbaseUser,CoinbaseButton
from webapp.forms import ButtonForm
# Create your views here.
def index(request,
template_name="index.html"):
# projects = Project.objects.all()
return render_to_response(template_name,
{},
context_instance=RequestContext(request))
@login_required(login_url='/login.html')
def home(request,
template_name="home.html"):
#get the user
cbuser = CoinbaseUser.objects.get(user=request.user)
user_buttons = CoinbaseButton.objects.filter(owner=cbuser)
context = {"user_buttons":user_buttons}
#create the client instance
client_coinbase = CoinbaseV1()
#get the user object using the access token
cbuser_response = client_coinbase.get_oauth_users(
cbuser.coinbase_access_token,
cbuser.coinbase_refresh_token,
settings.COINBASE_OAUTH_CLIENT_ID,
settings.COINBASE_OAUTH_CLIENT_SECRET)
oauth_user = cbuser_response['users'][0]['user']
context['coinbase_user_json'] = \
json.dumps(oauth_user, sort_keys=True, indent=4, separators=(',', ': '))
if request.method == 'GET':
button_form = ButtonForm()
context['button_form'] = button_form
elif request.method == 'POST':
button_form = ButtonForm(request.POST)
context['button_form'] = button_form
if button_form.is_valid():
#make a button id that will persist for callback
button_guuid = str(uuid.uuid1())
# when the button is paid a callback to the application
# will be made with the transaction details
callback_url = '{0}/{1}/?secret={2}'.format(
settings.COINBASE_ORDER_CALLBACK,
cbuser.user.username,
cbuser.coinbase_callback_secret)
button_request = {
'button':{
'name':'{0} {1}'.format(str(button_form.cleaned_data['payment_type']), button_guuid ),
'custom':button_guuid,
'description':str(button_form.cleaned_data['description']),
'price_string':button_form.cleaned_data['amount'],
'price_currency_iso':'BTC',
'button_type':str(button_form.cleaned_data['payment_type']),
'callback_url':callback_url
}
}
button_response = client_coinbase.make_button(
button_request,
cbuser.coinbase_access_token,
cbuser.coinbase_refresh_token,
settings.COINBASE_OAUTH_CLIENT_ID,
settings.COINBASE_OAUTH_CLIENT_SECRET)
#now save the refresh token from the unit call
cbuser.coinbase_refresh_token = button_response['refresh_token']
cbuser.coinbase_access_token = button_response['access_token']
cbuser.save()
if button_response['error_code']:
return render_to_response('error.html',
{'error':'{0}\n{1}'.format(
json.dumps(button_request),
json.dumps(button_response),
)},
context_instance=RequestContext(request))
else:
#add the created buttons
button_created = CoinbaseButton.objects.create(
code=button_response['button']['code'],
external_id=button_guuid,
button_response=json.dumps(button_response),
button_guid=button_guuid,
callback_url=callback_url,
type=str(button_form.cleaned_data['payment_type']),
owner=cbuser,
enabled=True)
return render_to_response(template_name,
context,
context_instance=RequestContext(request))
def logout_user(request):
logout(request)
return HttpResponseRedirect('{0}/index.html'.format(settings.COINBASE4PY_APP_URL))
def login_user(request):
coinbase_client = CoinbaseV1()
# we need to get the the OAUth redirect url, this will sent the user to coinbase
# once they authorize the application coinbase will send the browser to the URL
# set to settings.COINBASE_OAUTH_CLIENT_CALLBACK
redirect = coinbase_client.get_oauth_redirect(
settings.COINBASE_OAUTH_CLIENT_ID,
settings.COINBASE_OAUTH_CLIENT_CALLBACK)
return HttpResponseRedirect(redirect)
def cb_auth_redirect(request, template_name="home.html"):
context = {}
if request.method == 'GET':
#use the code to get an access token
#use the code to POST and get an access_token
coinbase_client = CoinbaseV1()
#def get_oauth_response(self, code, client_callback, client_id, client_secret):
response_obj = coinbase_client.post_oauth_response(
request.GET['code'],
settings.COINBASE_OAUTH_CLIENT_CALLBACK,
settings.COINBASE_OAUTH_CLIENT_ID,
settings.COINBASE_OAUTH_CLIENT_SECRET)
print '=== before user call: {0}'.format(json.dumps(response_obj))
#get the user object using the access token
cbuser_response = coinbase_client.get_oauth_users(
response_obj['access_token'],
response_obj['refresh_token'],
settings.COINBASE_OAUTH_CLIENT_ID,
settings.COINBASE_OAUTH_CLIENT_SECRET)
print '=== after user call: {0}'.format(json.dumps(cbuser_response))
oauth_user = cbuser_response['users'][0]['user']
# try to find the user if it doesnt exist then make it,
# otherwise update it
cbuser = None
try:
cbuser = CoinbaseUser.objects.get(user__email=oauth_user['email'])
cbuser.coinbase_refresh_token = cbuser_response['refresh_token']
cbuser.coinbase_access_token = cbuser_response['access_token']
cbuser.save()
except ObjectDoesNotExist:
# every user has the same passowrd, thats because passwords dont matter
# when you have oauth, want an extra level of security? build a password
# workflow.
# create_user(self, username, email=None, password=None, **extra_fields):
user = User.objects.create_user(
oauth_user['email'],
oauth_user['email'],
settings.COINBASE4PY_PW_SECRET_KEY)
# user = models.ForeignKey(User)
# name = models.CharField(max_length=250, unique=False, blank=True, null=True)
# coinbase_access_token = models.CharField(max_length=250, unique=False, blank=True, null=True)
# coinbase_refresh_token = models.CharField(max_length=250, unique=False, blank=True, null=True)
# coinbase_callback_secret = models.CharField(max_length=128, unique=False, blank=True, null=True)
cbuser = CoinbaseUser.objects.create(
user=user,
name=oauth_user['name'],
coinbase_access_token=response_obj['access_token'],
coinbase_refresh_token=response_obj['refresh_token'],
coinbase_callback_secret=settings.COINBASE4PY_PW_SECRET_KEY
)
if cbuser:
auth_user = authenticate(username=oauth_user['email'], password=settings.COINBASE4PY_PW_SECRET_KEY)
if auth_user is not None:
if auth_user.is_active:
login(request, auth_user)
# Redirect to a success page.
#needs the full app url for redirect
return HttpResponseRedirect('{0}/home.html'.format(settings.COINBASE4PY_APP_URL))
else:
# # Return a 'disabled account' error message
# context['message']=request.POST['username']+' account has been suspended.'
return render_to_response('error.html',{'message':'auth user is not empty but us unactive'},context_instance=RequestContext(request))
else:
return render_to_response('error.html',{'message':'auth user is empty or (most likely) the GITPATRON_PW_SECRET_KEY is incorrect '},context_instance=RequestContext(request))
#use the new user to make the home page
context['cbuser'] = cbuser
return render_to_response(template_name,
context,
context_instance=RequestContext(request))
@require_http_methods(["POST"])
@csrf_exempt
def cbcallback(request,button_owner_username):
data = {}
return HttpResponse(json.dumps(data), mimetype='application/json')
| |
from collections import defaultdict
import threading
from .. import BaseResponder
from ..lib import MemoryCache, get_url, parse_command, catch_other
from ...message import Message
from ...signals import on_exception, message_out, config_changed
class APIError(Exception):
pass
class EventParser(object):
"""Converts events downloaded from the API into readable texts."""
def parse_CreateEvent(self, events):
repos = []
branches = []
tags = []
for event in events:
if event['payload']['ref_type'] == 'repository':
repos.append(event['repo']['name'])
if event['payload']['ref_type'] == 'branch':
branches.append(event['payload']['ref'])
if event['payload']['ref_type'] == 'tag':
branches.append(event['payload']['ref'])
text = []
if repos:
text.append('created repositories: %s' % ', '.join(repos))
if branches:
text.append('created branches: %s' % ', '.join(branches))
if tags:
text.append('created tags: %s' % ', '.join(tags))
return text
def parse_ForkEvent(self, events):
forks = [e['payload']['forkee']['html_url'] for e in events]
text = 'forked to: %s' % ', '.join(forks)
return [text]
def parse_IssueCommentEvent(self, events):
comments = [e['payload']['comment']['html_url'] for e in events]
text = 'issue comments created: %s' % ', '.join(comments)
return [text]
def parse_IssuesEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['issue']['html_url'], e['payload']['action']))
text = 'issues: %s' % ', '.join(actions)
return [text]
def parse_PullRequestEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['pull_request']['html_url'], e['payload']['action']))
text = 'pull requests: %s' % ', '.join(actions)
return [text]
def parse_PushEvent(self, events):
texts = []
for e in events:
text = '%s commits to %s' % (e['payload']['size'], e['payload']['ref'])
texts.append(text)
return texts
def parse_ReleaseEvent(self, events):
actions = []
for e in events:
actions.append('%s was %s' % (e['payload']['release']['html_url'], e['payload']['action']))
text = 'releases: %s' % ', '.join(actions)
return [text]
def parse_WatchEvent(self, events):
starred_by = [e['actor']['login'] for e in events]
text = 'starred by: %s' % ', '.join(starred_by)
return [text]
def parse(self, event_dict):
"""Call this to convert `event_dict` into a list of human readable
strings.
Event dict should contain events of the same type grouped under one key:
{
'<event_type>': [ {<event_data}, ... ]
}
"""
texts = []
for event_type, events in event_dict.items():
f = getattr(self, 'parse_' + event_type, None)
if f is not None:
texts.extend(f(events))
return texts
class GithubAPI(object):
url_root = 'https://api.github.com'
def __init__(self):
self._repo_cache = MemoryCache(default_timeout=600)
self._user_cache = MemoryCache(default_timeout=600)
# { '<owner>/<repo>': id of the last processed event }
self._last_events = {}
self._ep = EventParser()
def _get(self, url, **params):
"""Performs an API GET request.
params: GET request parameters.
"""
url = self.url_root + url
try:
r = get_url(url, params=params)
r.raise_for_status()
return r.json()
except Exception as e:
raise APIError('API error')
def search_repositories(self, q):
rw = self._repo_cache.get(q)
if rw is None:
rw = self._get('/search/repositories', q=q)
self._repo_cache.set(q, rw)
return rw
def search_users(self, q):
rw = self._user_cache.get(q)
if rw is None:
rw = self._get('/search/users', q=q)
self._user_cache.set(q, rw)
return rw
def get_raw_repo_events(self, owner, repo):
"""Gets the fresh event data directly from the API."""
return self._get('/repos/%s/%s/events' % (owner, repo))
def get_new_repo_events(self, owner, repo):
"""Gets the fresh event data directly from the API, selects only
new ones and puts them in the dictionary."""
key = '%s/%s' % (owner, repo)
last_id = self._last_events.get(key, -1)
highest_id = -1
events = defaultdict(list)
d = self.get_raw_repo_events(owner, repo)
for event in d:
event['id'] = int(event['id'])
highest_id = max(highest_id, event['id'])
if last_id >= 0 and event['id'] > last_id:
events[event['type']].append(event)
self._last_events[key] = highest_id
return events
def get_event_texts(self, owner, repo):
"""Returns a new array with human readable string about events in the
repository which occured since the last call to this function with
the same parameters.
"""
all_events = self.get_new_repo_events(owner, repo)
texts = self._ep.parse(all_events)
return texts
class Github(BaseResponder):
"""Implements Github search and tracks Github repository events.
Example module config:
"botnet": {
"github": {
"track": [
{
"owner": "boreq",
"repo": "botnet",
"channels": ["#botnet-dev"]
}
]
}
}
"""
config_namespace = 'botnet'
config_name = 'github'
api_class = GithubAPI
deltatime = 300
def __init__(self, config):
super(Github, self).__init__(config)
self.api = self.api_class()
def start(self):
super(Github, self).start()
# run the code checking the events in a separate thread
self.stop_event = threading.Event()
self.t = threading.Thread(target=self.run)
self.t.start()
def stop(self):
super(Github, self).stop()
self.stop_event.set()
def run(self):
"""Runs in a separate threads to query the event API periodically."""
while not self.stop_event.is_set():
try:
self.update()
self.stop_event.wait(self.deltatime)
except Exception as e:
on_exception.send(self, e=e)
def update(self):
"""Queries the event API."""
self.logger.debug('Performing event update')
for data in self.config_get('track', []):
try:
# prepare the text
texts = self.api.get_event_texts(data['owner'], data['repo'])
info = 'https://github.com/{owner}/{repo} new events: '.format(
owner=data['owner'],
repo=data['repo']
)
text = info + ' | '.join(texts)
# send the text
if texts:
for channel in data['channels']:
msg = Message(command='PRIVMSG', params=[channel, text])
message_out.send(self, msg=msg)
except Exception as e:
on_exception.send(self, e=e)
@catch_other(APIError, 'API error')
def get_repo(self, phrase):
r = self.api.search_repositories(phrase)
return self.get_first(r)
@catch_other(APIError, 'API error')
def get_user(self, phrase):
r = self.api.search_users(phrase)
return self.get_first(r)
def get_first(self, r):
d = r['items']
if not d:
raise APIError('No results')
return d[0]['html_url']
def in_background(self, f):
"""Launches a function in a separate thread."""
t = threading.Thread(target=f)
t.daemon = True
t.run()
def config_get_tracking_data(self, owner, repo):
tracked = self.config_get('track', [])
for data in tracked:
if data['owner'] == owner and data['repo'] == repo:
return data
return None
def get_subscription_info_text(self, owner, repo):
d = self.config_get_tracking_data(owner, repo)
if d is not None:
text = 'Channels subscribed to %s/%s: %s' % (owner, repo, ', '.join(d['channels']))
else:
text = '%s/%s is not being tracked' % (owner, repo)
return text
@parse_command([('owner', 1), ('repo', 1), ('channels', '+')], launch_invalid=False)
def admin_command_github_track(self, msg, args):
"""Starts tracking a repo. Events from a tracked repository (such as new
created issues or pushed commits) are sent to the specified channels.
If the repo is already tracked subscribes additional channels to the
updates.
Syntax: github_track OWNER REPO CHANNEL ...
"""
owner = args.owner[0]
repo = args.repo[0]
d = self.config_get_tracking_data(owner, repo)
if d is not None:
for channel in args.channels:
if channel not in d['channels']:
d['channels'].append(channel)
config_changed.send(self)
else:
data = {
'owner': owner,
'repo': repo,
'channels': args.channels
}
self.config_append('track', data)
text = self.get_subscription_info_text(owner, repo)
self.respond(msg, text)
@parse_command([('owner', 1), ('repo', 1), ('channels', '*')], launch_invalid=False)
def admin_command_github_untrack(self, msg, args):
"""Unsubscribes a channel from receiving updates about events occuring
in a repository. If no CHANNELs are passed as an argument all channels
are unsubscribed from the updates and the repository is in effect no
longer tracked.
Syntax: github_untrack OWNER REPO [CHANNEL ...]
"""
owner = args.owner[0]
repo = args.repo[0]
d = self.config_get_tracking_data(owner, repo)
if d is not None:
# remove channels
if not args.channels:
d['channels'] = []
else:
d['channels'] = [c for c in d['channels'] if c not in args.channels]
# remove entire entry if no channels left
if not d['channels']:
self.config_get('track').remove(d)
config_changed.send(self)
# info text
text = 'Channels removed. ' + self.get_subscription_info_text(owner, repo)
self.respond(msg, text)
else:
self.respond(msg, 'This repository is not being tracked')
def admin_command_github_tracked(self, msg):
"""Lists tracked repositories.
Syntax: github_tracked
"""
texts = []
for data in self.config_get('track', []):
texts.append('{owner}/{repo}: {channels}'.format(
owner=data['owner'],
repo=data['repo'],
channels=', '.join(data['channels']))
)
if texts:
text = ' | '.join(texts)
else:
text = 'No tracked repositories'
self.respond(msg, text)
@parse_command([('phrase', '+')], launch_invalid=False)
def command_github(self, msg, args):
"""Search Github repositories.
Syntax: github PHRASE
"""
phrase = ' '.join(args.phrase)
def f():
try:
r = self.get_repo(phrase)
self.respond(msg, r)
except Exception as e:
self.respond(msg, str(e))
self.in_background(f)
@parse_command([('phrase', '+')], launch_invalid=False)
def command_github_user(self, msg, args):
"""Search Github users.
Syntax: github_user PHRASE
"""
phrase = ' '.join(args.phrase)
def f():
try:
r = self.get_user(phrase)
self.respond(msg, r)
except Exception as e:
self.respond(msg, str(e))
self.in_background(f)
mod = Github
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-16 22:59:56
import sys
import six
import time
import logging
import traceback
logger = logging.getLogger("processor")
from six.moves import queue as Queue
from pyspider.libs import utils
from pyspider.libs.log import LogFormatter
from pyspider.libs.utils import pretty_unicode, hide_me
from pyspider.libs.response import rebuild_response
from .project_module import ProjectManager, ProjectFinder
class ProcessorResult(object):
"""The result and logs producted by a callback"""
def __init__(self, result=None, follows=(), messages=(),
logs=(), exception=None, extinfo={}, save=None):
self.result = result
self.follows = follows
self.messages = messages
self.logs = logs
self.exception = exception
self.extinfo = extinfo
self.save = save
def rethrow(self):
"""rethrow the exception"""
if self.exception:
raise self.exception
def logstr(self):
"""handler the log records to formatted string"""
result = []
formater = LogFormatter(color=False)
for record in self.logs:
if isinstance(record, six.string_types):
result.append(pretty_unicode(record))
else:
if record.exc_info:
a, b, tb = record.exc_info
tb = hide_me(tb, globals())
record.exc_info = a, b, tb
result.append(pretty_unicode(formater.format(record)))
result.append(u'\n')
return u''.join(result)
class Processor(object):
PROCESS_TIME_LIMIT = 30
EXCEPTION_LIMIT = 3
RESULT_LOGS_LIMIT = 1000
RESULT_RESULT_LIMIT = 10
def __init__(self, projectdb, inqueue, status_queue, newtask_queue, result_queue,
enable_stdout_capture=True,
enable_projects_import=True):
self.inqueue = inqueue
self.status_queue = status_queue
self.newtask_queue = newtask_queue
self.result_queue = result_queue
self.projectdb = projectdb
self.enable_stdout_capture = enable_stdout_capture
self._quit = False
self._exceptions = 10
self.project_manager = ProjectManager(projectdb, dict(
result_queue=self.result_queue,
enable_stdout_capture=self.enable_stdout_capture,
))
if enable_projects_import:
self.enable_projects_import()
def enable_projects_import(self):
'''
Enable import other project as module
`from project import project_name`
'''
if six.PY2:
sys.meta_path.append(ProjectFinder(self.projectdb))
def __del__(self):
pass
def on_task(self, task, response):
'''Deal one task'''
start_time = time.time()
response = rebuild_response(response)
try:
assert 'taskid' in task, 'need taskid in task'
project = task['project']
updatetime = task.get('project_updatetime', None)
md5sum = task.get('project_md5sum', None)
project_data = self.project_manager.get(project, updatetime, md5sum)
assert project_data, "no such project!"
if project_data.get('exception'):
ret = ProcessorResult(logs=(project_data.get('exception_log'), ),
exception=project_data['exception'])
else:
ret = project_data['instance'].run_task(
project_data['module'], task, response)
except Exception as e:
logstr = traceback.format_exc()
ret = ProcessorResult(logs=(logstr, ), exception=e)
process_time = time.time() - start_time
if not ret.extinfo.get('not_send_status', False):
if ret.exception:
track_headers = dict(response.headers)
else:
track_headers = {}
for name in ('etag', 'last-modified'):
if name not in response.headers:
continue
track_headers[name] = response.headers[name]
status_pack = {
'taskid': task['taskid'],
'project': task['project'],
'url': task.get('url'),
'track': {
'fetch': {
'ok': response.isok(),
'redirect_url': response.url if response.url != response.orig_url else None,
'time': response.time,
'error': response.error,
'status_code': response.status_code,
'encoding': response.encoding,
'headers': track_headers,
'content': response.text[:500] if ret.exception else None,
'mark': task.get('fetch', {}).get('mark'),
},
'process': {
'ok': not ret.exception,
'time': process_time,
'follows': len(ret.follows),
'result': (
None if ret.result is None
else utils.text(ret.result)[:self.RESULT_RESULT_LIMIT]
),
'logs': ret.logstr()[-self.RESULT_LOGS_LIMIT:],
'exception': ret.exception,
},
'save': ret.save,
},
}
if 'schedule' in task:
status_pack['schedule'] = task['schedule']
# FIXME: unicode_obj should used in scheduler before store to database
# it's used here for performance.
self.status_queue.put(utils.unicode_obj(status_pack))
# FIXME: unicode_obj should used in scheduler before store to database
# it's used here for performance.
if ret.follows:
for each in (ret.follows[x:x + 1000] for x in range(0, len(ret.follows), 1000)):
self.newtask_queue.put([utils.unicode_obj(newtask) for newtask in each])
for project, msg, url in ret.messages:
try:
self.on_task({
'taskid': utils.md5string(url),
'project': project,
'url': url,
'process': {
'callback': '_on_message',
}
}, {
'status_code': 200,
'url': url,
'save': (task['project'], msg),
})
except Exception as e:
logger.exception('Sending message error.')
continue
if ret.exception:
logger_func = logger.error
else:
logger_func = logger.info
logger_func('process %s:%s %s [%s] -> [%d] len:%d -> result:%.10r fol:%d msg:%d err:%r' % (
task['project'], task['taskid'], task.get('url'),
task.get('fetch', {}).get('mark'), response.status_code, len(response.content),
ret.result, len(ret.follows), len(ret.messages), ret.exception))
return True
def quit(self):
'''Set quit signal'''
self._quit = True
def run(self):
'''Run loop'''
logger.info("processor starting...")
while not self._quit:
try:
task, response = self.inqueue.get(timeout=1)
self.on_task(task, response)
self._exceptions = 0
except Queue.Empty as e:
continue
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("processor exiting...")
| |
import math
import unittest
import mock
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'shape': (8, 7), 'normalize': True},
{'shape': (8, 7), 'normalize': False},
{'shape': (8, 7), 'normalize': True, 'ignore_all': True},
# too large shape causes int32 -> float64 issue
{'shape': (65536, 1), 'normalize': False},
)
class TestSigmoidCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
if getattr(self, 'ignore_all', False):
self.t = -numpy.ones(self.shape).astype(numpy.int32)
else:
self.t = numpy.random.randint(-1, 2,
self.shape).astype(numpy.int32)
self.gy = numpy.random.random(self.shape).astype(numpy.float32)
def check_forward(self, x_data, t_data, use_cudnn='always'):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
loss = functions.sigmoid_cross_entropy(x_val, t_val,
self.normalize)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0
non_ignore_count = 0
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
continue
loss_expect -= xd * (td - (xd >= 0)) \
- math.log(1 + math.exp(-numpy.abs(xd)))
non_ignore_count += 1
if non_ignore_count == 0:
loss_expect = 0
elif self.normalize:
loss_expect /= non_ignore_count
else:
loss_expect /= self.t.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=5)
def check_forward_no_reduction(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(
x_val, t_val, self.normalize, reduce='no')
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
if not getattr(self, 'ignore_all', False):
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
loss_expect = 0
else:
loss_expect = -(
xd * (td - (xd >= 0)) -
math.log(1 + math.exp(-numpy.abs(xd))))
self.assertAlmostEqual(
loss_expect, loss_value[i, j], places=5)
@condition.retry(3)
def test_forward_cpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward(self.x, self.t)
@condition.retry(3)
def test_forward_no_reduction_cpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward_no_reduction(self.x, self.t)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_no_reduction_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_no_reduction_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
def check_backward(self, x_data, t_data, y_grad):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
gradient_check.check_backward(
functions.SigmoidCrossEntropy(),
(x_data, t_data), None, eps=1e-2)
def check_backward_no_reduction(
self, x_data, t_data, y_grad):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
gradient_check.check_backward(
functions.SigmoidCrossEntropy(reduce='no'),
(x_data, t_data), y_grad, eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(self.x, self.t, self.gy)
@condition.retry(3)
def test_backward_no_reduction_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward_no_reduction(self.x, self.t, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_no_reduction_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_no_reduction_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
@testing.parameterize(
{'use_cudnn': 'always'},
{'use_cudnn': 'auto'},
{'use_cudnn': 'never'},
)
@attr.cudnn
class TestSigmoidCrossEntropyCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = cuda.cupy.random.randint(0, 3, (4, 3)).astype(numpy.int32)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
return functions.sigmoid_cross_entropy(x, t)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
if cuda.cudnn.cudnn.getVersion() >= 4000:
patch = 'cupy.cudnn.cudnn.activationForward_v4'
else:
patch = 'cupy.cudnn.cudnn.activationForward_v3'
with mock.patch(patch) as func:
y.backward()
self.assertEqual(func.called, self.expect)
# Note that SoftmaxCrossEntropy does not use cudnn on backward
testing.run_module(__name__, __file__)
| |
"""The commutator: [A,B] = A*B - B*A."""
from __future__ import print_function, division
from sympy import S, Expr, Mul, Add
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.operator import Operator
__all__ = [
'Commutator'
]
#-----------------------------------------------------------------------------
# Commutator
#-----------------------------------------------------------------------------
class Commutator(Expr):
"""The standard commutator, in an unevaluated state.
Evaluating a commutator is defined [1]_ as: ``[A, B] = A*B - B*A``. This
class returns the commutator in an unevaluated form. To evaluate the
commutator, use the ``.doit()`` method.
Cannonical ordering of a commutator is ``[A, B]`` for ``A < B``. The
arguments of the commutator are put into canonical order using ``__cmp__``.
If ``B < A``, then ``[B, A]`` is returned as ``-[A, B]``.
Parameters
==========
A : Expr
The first argument of the commutator [A,B].
B : Expr
The second argument of the commutator [A,B].
Examples
========
>>> from sympy.physics.quantum import Commutator, Dagger, Operator
>>> from sympy.abc import x, y
>>> A = Operator('A')
>>> B = Operator('B')
>>> C = Operator('C')
Create a commutator and use ``.doit()`` to evaluate it:
>>> comm = Commutator(A, B)
>>> comm
[A,B]
>>> comm.doit()
A*B - B*A
The commutator orders it arguments in canonical order:
>>> comm = Commutator(B, A); comm
-[A,B]
Commutative constants are factored out:
>>> Commutator(3*x*A, x*y*B)
3*x**2*y*[A,B]
Using ``.expand(commutator=True)``, the standard commutator expansion rules
can be applied:
>>> Commutator(A+B, C).expand(commutator=True)
[A,C] + [B,C]
>>> Commutator(A, B+C).expand(commutator=True)
[A,B] + [A,C]
>>> Commutator(A*B, C).expand(commutator=True)
[A,C]*B + A*[B,C]
>>> Commutator(A, B*C).expand(commutator=True)
[A,B]*C + B*[A,C]
Adjoint operations applied to the commutator are properly applied to the
arguments:
>>> Dagger(Commutator(A, B))
-[Dagger(A),Dagger(B)]
References
==========
.. [1] http://en.wikipedia.org/wiki/Commutator
"""
is_commutative = False
def __new__(cls, A, B):
r = cls.eval(A, B)
if r is not None:
return r
obj = Expr.__new__(cls, A, B)
return obj
@classmethod
def eval(cls, a, b):
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
# [xA,yB] -> xy*[A,B]
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = ca + cb
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
# Canonical ordering of arguments
# The Commutator [A, B] is in canonical form if A < B.
if a.compare(b) == 1:
return S.NegativeOne*cls(b, a)
def _eval_expand_commutator(self, **hints):
A = self.args[0]
B = self.args[1]
if isinstance(A, Add):
# [A + B, C] -> [A, C] + [B, C]
sargs = []
for term in A.args:
comm = Commutator(term, B)
if isinstance(comm, Commutator):
comm = comm._eval_expand_commutator()
sargs.append(comm)
return Add(*sargs)
elif isinstance(B, Add):
# [A, B + C] -> [A, B] + [A, C]
sargs = []
for term in B.args:
comm = Commutator(A, term)
if isinstance(comm, Commutator):
comm = comm._eval_expand_commutator()
sargs.append(comm)
return Add(*sargs)
elif isinstance(A, Mul):
# [A*B, C] -> A*[B, C] + [A, C]*B
a = A.args[0]
b = Mul(*A.args[1:])
c = B
comm1 = Commutator(b, c)
comm2 = Commutator(a, c)
if isinstance(comm1, Commutator):
comm1 = comm1._eval_expand_commutator()
if isinstance(comm2, Commutator):
comm2 = comm2._eval_expand_commutator()
first = Mul(a, comm1)
second = Mul(comm2, b)
return Add(first, second)
elif isinstance(B, Mul):
# [A, B*C] -> [A, B]*C + B*[A, C]
a = A
b = B.args[0]
c = Mul(*B.args[1:])
comm1 = Commutator(a, b)
comm2 = Commutator(a, c)
if isinstance(comm1, Commutator):
comm1 = comm1._eval_expand_commutator()
if isinstance(comm2, Commutator):
comm2 = comm2._eval_expand_commutator()
first = Mul(comm1, c)
second = Mul(b, comm2)
return Add(first, second)
# No changes, so return self
return self
def doit(self, **hints):
""" Evaluate commutator """
A = self.args[0]
B = self.args[1]
if isinstance(A, Operator) and isinstance(B, Operator):
try:
comm = A._eval_commutator(B, **hints)
except NotImplementedError:
try:
comm = -1*B._eval_commutator(A, **hints)
except NotImplementedError:
comm = None
if comm is not None:
return comm.doit(**hints)
return (A*B - B*A).doit(**hints)
def _eval_adjoint(self):
return Commutator(Dagger(self.args[1]), Dagger(self.args[0]))
def _sympyrepr(self, printer, *args):
return "%s(%s,%s)" % (
self.__class__.__name__, printer._print(
self.args[0]), printer._print(self.args[1])
)
def _sympystr(self, printer, *args):
return "[%s,%s]" % (self.args[0], self.args[1])
def _pretty(self, printer, *args):
pform = printer._print(self.args[0], *args)
pform = prettyForm(*pform.right((prettyForm(u(',')))))
pform = prettyForm(*pform.right((printer._print(self.args[1], *args))))
pform = prettyForm(*pform.parens(left='[', right=']'))
return pform
def _latex(self, printer, *args):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg, *args) for arg in self.args])
| |
"""
A component which allows you to update your custom cards and components.
For more details about this component, please refer to the documentation at
https://github.com/custom-components/custom_updater
"""
import logging
import os.path
from datetime import timedelta
import voluptuous as vol
from aiohttp import web
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.helpers.config_validation as cv
from homeassistant.components.http import HomeAssistantView
from homeassistant.helpers.event import async_track_time_interval
VERSION = '5.1.1'
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyupdate==1.4.0']
CONF_TRACK = 'track'
CONF_HIDE_SENSOR = 'hide_sensor'
CONF_SHOW_INSTALLABLE = 'show_installable'
CONF_CARD_CONFIG_URLS = 'card_urls'
CONF_COMPONENT_CONFIG_URLS = 'component_urls'
CONF_PYTHON_SCRIPT_CONFIG_URLS = 'python_script_urls'
DOMAIN = 'custom_updater'
INTERVAL = timedelta(days=1)
ATTR_CARD = 'card'
ATTR_COMPONENT = 'component'
ATTR_ELEMENT = 'element'
DEFAULT_TRACK = ['components', 'cards']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_TRACK, default=DEFAULT_TRACK):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HIDE_SENSOR, default=False): cv.boolean,
vol.Optional(CONF_SHOW_INSTALLABLE, default=False): cv.boolean,
vol.Optional(CONF_CARD_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_COMPONENT_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_PYTHON_SCRIPT_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up this component."""
conf_mode = config.get('lovelace', {}).get('mode', 'storage')
conf_track = config[DOMAIN][CONF_TRACK]
conf_hide_sensor = config[DOMAIN][CONF_HIDE_SENSOR]
conf_card_urls = config[DOMAIN][CONF_CARD_CONFIG_URLS]
conf_component_urls = config[DOMAIN][CONF_COMPONENT_CONFIG_URLS]
conf_py_script_urls = config[DOMAIN][CONF_PYTHON_SCRIPT_CONFIG_URLS]
_LOGGER.info('if you have ANY issues with this, please report them here:'
' https://github.com/custom-components/custom_updater')
_LOGGER.debug('Version %s', VERSION)
_LOGGER.debug('Mode %s', conf_mode)
hass.http.register_view(CustomCardsView(str(hass.config.path())))
if conf_mode == 'yaml':
if not os.path.exists("{}/ui-lovelace.yaml".format(str(hass.config.path()))):
_LOGGER.warning(
"Configured to run with yaml mode but ui-lovelace.yaml does not exist, assuming storage is used")
conf_mode = 'storage'
if 'cards' in conf_track:
card_controller = CustomCards(
hass, conf_hide_sensor, conf_card_urls, conf_mode)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, card_controller.extra_init())
async_track_time_interval(
hass, card_controller.force_reload, INTERVAL)
if 'components' in conf_track:
components_controller = CustomComponents(
hass, conf_hide_sensor, conf_component_urls)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, components_controller.extra_init())
async_track_time_interval(
hass, components_controller.cache_versions, INTERVAL)
if 'python_scripts' in conf_track:
python_scripts_controller = CustomPythonScripts(
hass, conf_hide_sensor, conf_py_script_urls)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, python_scripts_controller.extra_init())
async_track_time_interval(
hass, python_scripts_controller.cache_versions, INTERVAL)
async def check_all_service(call):
"""Set up service for manual trigger."""
if 'cards' in conf_track:
await card_controller.force_reload()
if 'components' in conf_track:
await components_controller.cache_versions()
if 'python_scripts' in conf_track:
await python_scripts_controller.cache_versions()
async def update_all_service(call):
"""Set up service for manual trigger."""
if 'cards' in conf_track:
await card_controller.update_all()
if 'components' in conf_track:
await components_controller.update_all()
if 'python_scripts' in conf_track:
await python_scripts_controller.update_all()
async def install_service(call):
"""Install single component/card."""
element = call.data.get(ATTR_ELEMENT)
_LOGGER.debug('Installing %s', element)
if 'cards' in conf_track:
await card_controller.install(element)
if 'components' in conf_track:
await components_controller.install(element)
if 'python_scripts' in conf_track:
await python_scripts_controller.install(element)
hass.services.async_register(DOMAIN, 'check_all', check_all_service)
hass.services.async_register(DOMAIN, 'update_all', update_all_service)
hass.services.async_register(DOMAIN, 'install', install_service)
return True
class CustomCards():
"""Custom cards controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_card_urls,
conf_mode):
"""Initialize."""
_LOGGER.debug('CustomCards - __init__')
from pyupdate.ha_custom.custom_cards import CustomCards as Cards
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = Cards(self.ha_conf_dir, conf_mode, '', conf_card_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomCards - extra_init')
await self.pyupdate.init_local_data()
await self.cache_versions()
async def force_reload(self, now=None):
"""Force data refresh"""
_LOGGER.debug('CustomCards - force_reload')
await self.pyupdate.force_reload()
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomCards - cache_versions')
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_card_tracker', state, attributes)
async def update_all(self):
"""Update all cards."""
_LOGGER.debug('CustomCards - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_card_tracker', state, attributes)
async def install(self, element):
"""Install single card."""
_LOGGER.debug('CustomCards - update_all')
await self.pyupdate.install(element)
class CustomComponents():
"""Custom components controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_component_urls):
"""Initialize."""
_LOGGER.debug('CustomComponents - __init__')
from pyupdate.ha_custom.custom_components import (
CustomComponents as Components)
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = Components(self.ha_conf_dir, conf_component_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomComponents - extra_init')
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomComponents - cache_versions')
information = await self.pyupdate.get_sensor_data(True)
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_component_tracker', state, attributes)
async def update_all(self):
"""Update all components."""
_LOGGER.debug('CustomComponents - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_component_tracker', state, attributes)
async def install(self, element):
"""Install single component."""
_LOGGER.debug('CustomComponents - install')
await self.pyupdate.install(element)
class CustomPythonScripts():
"""Custom python_scripts controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_python_script_urls):
"""Initialize."""
_LOGGER.debug('CustomPythonScripts - __init__')
from pyupdate.ha_custom.python_scripts import PythonScripts
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = PythonScripts(
self.ha_conf_dir, conf_python_script_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomPythonScripts - extra_init')
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomPythonScripts - cache_versions')
information = await self.pyupdate.get_sensor_data(True)
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_python_script_tracker', state, attributes)
async def update_all(self):
"""Update all python_scripts."""
_LOGGER.debug('CustomPythonScripts - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_python_script_tracker', state, attributes)
async def install(self, element):
"""Install single python_script."""
_LOGGER.debug('CustomPythonScripts - install')
await self.pyupdate.install(element)
class CustomCardsView(HomeAssistantView):
"""View to return a custom_card file."""
requires_auth = False
url = r"/customcards/{path:.+}"
name = "customcards:path"
def __init__(self, hadir):
"""Initialize custom_card view."""
self.hadir = hadir
async def get(self, request, path):
"""Retrieve custom_card."""
if '?' in path:
path = path.split('?')[0]
file = "{}/www/{}".format(self.hadir, path)
if os.path.exists(file):
msg = "Serving /customcards/{path} from /www/{path}".format(
path=path)
_LOGGER.debug(msg)
resp = web.FileResponse(file)
resp.headers["Cache-Control"] = "max-age=0, must-revalidate"
return resp
else:
_LOGGER.error("Tried to serve up '%s' but it does not exist", file)
return None
| |
"""
Zoof, let's code!
Zoof, let us code!
Zoof, coding to the people!
Zoof, coding for everyone.
"""
import os
import time
from collections import OrderedDict
from PyQt4 import QtCore, QtGui
from zoof.lib.zson import Dict
Qt = QtCore.Qt
# Default values
PARAMS = Dict()
PARAMS.size = 32
PARAMS.gap = 0.05
PARAMS.inset= 0.20
PARAMS.height = 0.4
PARAMS.width = 0.6
#
PARAMS.bgcolor = '#268bd2' # '#cb4b16'
PARAMS.bgcolor2 = '#dc322f'
PARAMS.bgcolor3 = '#859900'
PARAMS.edgecolor = '#073642'
PARAMS.mono = False
PARAMS.fullheight = 0.8
def create_logo(**kwargs):
""" Create Zoof logo and return as a QPixmap.
"""
# Get shortnames for params
params = PARAMS.copy()
params.update(kwargs)
L = [params[k] for k in 'size gap inset height width'.split()]
size, gap, inset, height, width = L
# Create vertices
inset1 = inset * (1 - height)
inset2 = inset * (1 - height - gap)
betweenblocks = width+(1-width)*0.5
#
verts1 = [ (0, 0), (width, 0), (width-inset1, 1-height),
(width, 1-height), (width, 1), (0, 1), (inset1, height),
(0, height), ]
verts2 = [ (width+gap, 0), (1, 0), (1-inset2, 1-height-gap),
(width+gap-inset2, 1-height-gap), ]
verts3 = [ (width+gap, 1-height), (betweenblocks, 1-height),
(betweenblocks, 1), (width+gap, 1), ]
verts4 = [ (betweenblocks+gap, 1-height), (1, 1-height),
(1, 1), (betweenblocks+gap, 1), ]
# Correct for edge
edgewidth = (size/16)**0.5 / size
for verts in (verts1, verts2, verts3, verts4):
for i in range(len(verts)):
verts[i] = [u*(1-edgewidth) + 0.5*edgewidth for u in verts[i]]
u = verts[i][1]
verts[i][1] = u*params.fullheight + 0.5*(1-params.fullheight)
# Prepare to paint
pixmap = QtGui.QPixmap(size, size)
pixmap.fill(QtGui.QColor(0, 0, 0, 0))
painter = QtGui.QPainter()
painter.begin(pixmap)
hint = (painter.Antialiasing | painter.TextAntialiasing |
painter.SmoothPixmapTransform | painter.HighQualityAntialiasing)
painter.setRenderHints(hint)
# Paint outlines
clr = QtGui.QColor(params.edgecolor)
#clr.setAlpha(50)
pen = QtGui.QPen(clr)
pen.setWidthF(edgewidth*size)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
for verts in (verts1, verts2, verts3, verts4):
lines = [QtCore.QPointF(p[0]*size, p[1]*size) for p in verts]
painter.drawPolygon(QtGui.QPolygonF(lines))
# Paint shape
colors = (params.bgcolor, params.bgcolor2, params.bgcolor3,
params.bgcolor3, params.edgecolor, '#000')
clr = colors[max(0, params.mono-1)]
painter.setPen(Qt.NoPen)
painter.setBrush(QtGui.QBrush(QtGui.QColor(clr)))
for verts, clr in zip((verts1, verts2, verts3, verts4), colors):
if not params.mono:
painter.setBrush(QtGui.QBrush(QtGui.QColor(clr)))
lines = [QtCore.QPointF(p[0]*size, p[1]*size) for p in verts]
painter.drawPolygon(QtGui.QPolygonF(lines))
painter.end()
return pixmap
class Win(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self, None)
self.resize(900, 600)
# Create labels to hold the logo pixmap
self._labels = labels = [QtGui.QLabel(self) for i in range(4)]
for label in labels:
label.setAlignment(Qt.AlignCenter)
label.setAutoFillBackground(True)
labels[0].setStyleSheet("QLabel {background-color:#0f0; padding: 0px;}")
labels[1].setStyleSheet("QLabel {background-color:#000; padding: 10px;}")
labels[2].setStyleSheet("QLabel {background-color:%s; padding: 10px;}" % PARAMS.bgcolor)
labels[3].setStyleSheet("QLabel {background-color:#fff; padding: 10px;}")
labels[0].setMinimumSize(200, 200)
labels[0].setMaximumSize(200, 200)
# Lay out the labels
layout = QtGui.QVBoxLayout(self)
self.setLayout(layout)
hbox1 = QtGui.QHBoxLayout()
hbox2 = QtGui.QHBoxLayout()
layout.addLayout(hbox1, 1)
layout.addLayout(hbox2, 1)
#
controlLayout = QtGui.QVBoxLayout()
hbox1.addLayout(controlLayout, 2)
hbox1.addWidget(QtGui.QWidget(self), 1)
#
hbox1.addWidget(labels[0], 0)
hbox2.setSpacing(0)
for label in labels[1:]:
hbox2.addWidget(label, 1)
# Create sliders to modify logo params
self._sliders = []
self._sliderLabels = []
for name, min, max, in [('size', 16, 256),
('gap', 0.0, 0.1),
('inset', 0.0, 0.5),
('height', 0.0, 0.5),
('width', 0.1, 0.8),
('fullheight', 0.5, 1.0),
('mono', 0, 6), ]:
val = PARAMS[name]
if not isinstance(val, (float, int)):
continue
slider = QtGui.QSlider(Qt.Horizontal, self)
slider._name = name
slider._isfloat = False
if isinstance(val, float):
slider._isfloat = True
val, min, max = val*1000, min*1000, max*1000
slider.setRange(min, max)
slider.setValue(val)
slider.valueChanged.connect(self.updateLogo)
label = QtGui.QLabel(name)
#
self._sliders.append(slider)
self._sliderLabels.append(label)
#
hbox = QtGui.QHBoxLayout()
hbox.addWidget(label, 1)
hbox.addWidget(slider, 4)
controlLayout.addLayout(hbox, 1)
# Save button
self._savebut = QtGui.QPushButton('Save', self)
self._savebut.clicked.connect(self.on_save)
controlLayout.addWidget(self._savebut)
# Start
self.updateLogo()
def _get_params(self):
# Obtain values from sliders and update slider labels
names = [slider._name for slider in self._sliders]
values = [(slider.value()/1000. if slider._isfloat else slider.value())
for slider in self._sliders]
for name, val, label in zip(names, values, self._sliderLabels):
fmt = '%s: %0.2f' if isinstance(val, float) else '%s: %i'
label.setText(fmt % (name, val))
return dict(zip(names, values))
def updateLogo(self, bla=None):
params = self._get_params()
# Generate pixmap
t0 = time.time()
pixmap = create_logo(**params)
#print('Logo took %0.0f ms to generate' % ((time.time() - t0)*1000))
# Apply it to all display labels
pixmapL = pixmap.scaled(200, 200, transformMode=Qt.FastTransformation)
self._labels[0].setPixmap(pixmapL)
for label in self._labels[1:]:
label.setPixmap(pixmap)
def on_save(self):
params = self._get_params()
pixmap = create_logo(**params)
filename = os.path.expanduser('~/zooflogo%i.png' % params['size'])
pixmap.save(filename, None, 0)
if __name__ == '__main__':
w = Win()
w.show()
w.raise_()
| |
import collections
import glob
import hashlib
import json
import logging
import os
import re
import shutil
import stat
import StringIO
import tempfile
import zipfile
from cStringIO import StringIO as cStringIO
from datetime import datetime
from itertools import groupby
from xml.dom import minidom
from zipfile import BadZipfile, ZipFile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import trans_real as translation
from django.core.files.storage import default_storage as storage
import rdflib
from tower import ugettext as _
import amo
from amo.utils import rm_local_tmp_dir, strip_bom, to_language
from applications.models import AppVersion
from versions.compare import version_int as vint
log = logging.getLogger('files.utils')
class ParseError(forms.ValidationError):
pass
VERSION_RE = re.compile('^[-+*.\w]{,32}$')
SIGNED_RE = re.compile('^META\-INF/(\w+)\.(rsa|sf)$')
# The default update URL.
default = ('https://versioncheck.addons.mozilla.org/update/VersionCheck.php?'
'reqVersion=%REQ_VERSION%&id=%ITEM_ID%&version=%ITEM_VERSION%&'
'maxAppVersion=%ITEM_MAXAPPVERSION%&status=%ITEM_STATUS%&appID=%APP_ID%&'
'appVersion=%APP_VERSION%&appOS=%APP_OS%&appABI=%APP_ABI%&'
'locale=%APP_LOCALE%¤tAppVersion=%CURRENT_APP_VERSION%&'
'updateType=%UPDATE_TYPE%')
def get_filepath(fileorpath):
"""Get the actual file path of fileorpath if it's a FileUpload object."""
if hasattr(fileorpath, 'path'): # FileUpload
return fileorpath.path
return fileorpath
def get_file(fileorpath):
"""Get a file-like object, whether given a FileUpload object or a path."""
if hasattr(fileorpath, 'path'): # FileUpload
return storage.open(fileorpath.path)
if hasattr(fileorpath, 'name'):
return fileorpath
return storage.open(fileorpath)
def make_xpi(files):
f = cStringIO()
z = ZipFile(f, 'w')
for path, data in files.items():
z.writestr(path, data)
z.close()
f.seek(0)
return f
class Extractor(object):
"""Extract add-on info from an install.rdf."""
TYPES = {'2': amo.ADDON_EXTENSION, '4': amo.ADDON_THEME,
'8': amo.ADDON_LPAPP, '64': amo.ADDON_DICT}
App = collections.namedtuple('App', 'appdata id min max')
manifest = u'urn:mozilla:install-manifest'
def __init__(self, path):
self.path = path
self.rdf = rdflib.Graph().parse(open(os.path.join(path,
'install.rdf')))
self.find_root()
self.data = {
'guid': self.find('id'),
'type': self.find_type(),
'name': self.find('name'),
'version': self.find('version'),
'homepage': self.find('homepageURL'),
'summary': self.find('description'),
'no_restart': self.find('bootstrap') == 'true' or
self.find('type') == '64',
'strict_compatibility': self.find('strictCompatibility') == 'true',
'apps': self.apps(),
}
@classmethod
def parse(cls, install_rdf):
return cls(install_rdf).data
def find_type(self):
# If the extension declares a type that we know about, use
# that.
# FIXME: Fail if it declares a type we don't know about.
declared_type = self.find('type')
if declared_type and declared_type in self.TYPES:
return self.TYPES[declared_type]
# Look for Complete Themes.
if self.path.endswith('.jar') or self.find('internalName'):
return amo.ADDON_THEME
# Look for dictionaries.
dic = os.path.join(self.path, 'dictionaries')
if os.path.exists(dic) and glob.glob('%s/*.dic' % dic):
return amo.ADDON_DICT
# Consult <em:type>.
return self.TYPES.get(declared_type, amo.ADDON_EXTENSION)
def uri(self, name):
namespace = 'http://www.mozilla.org/2004/em-rdf'
return rdflib.term.URIRef('%s#%s' % (namespace, name))
def find_root(self):
# If the install-manifest root is well-defined, it'll show up when we
# search for triples with it. If not, we have to find the context that
# defines the manifest and use that as our root.
# http://www.w3.org/TR/rdf-concepts/#section-triples
manifest = rdflib.term.URIRef(self.manifest)
if list(self.rdf.triples((manifest, None, None))):
self.root = manifest
else:
self.root = self.rdf.subjects(None, self.manifest).next()
def find(self, name, ctx=None):
"""Like $() for install.rdf, where name is the selector."""
if ctx is None:
ctx = self.root
# predicate it maps to <em:{name}>.
match = list(self.rdf.objects(ctx, predicate=self.uri(name)))
# These come back as rdflib.Literal, which subclasses unicode.
if match:
return unicode(match[0])
def apps(self):
rv = []
for ctx in self.rdf.objects(None, self.uri('targetApplication')):
app = amo.APP_GUIDS.get(self.find('id', ctx))
if not app:
continue
try:
qs = AppVersion.objects.filter(application=app.id)
min = qs.get(version=self.find('minVersion', ctx))
max = qs.get(version=self.find('maxVersion', ctx))
except AppVersion.DoesNotExist:
continue
rv.append(self.App(appdata=app, id=app.id, min=min, max=max))
return rv
def extract_search(content):
rv = {}
dom = minidom.parse(content)
text = lambda x: dom.getElementsByTagName(x)[0].childNodes[0].wholeText
rv['name'] = text('ShortName')
rv['description'] = text('Description')
return rv
def parse_search(fileorpath, addon=None):
try:
f = get_file(fileorpath)
data = extract_search(f)
except forms.ValidationError:
raise
except Exception:
log.error('OpenSearch parse error', exc_info=True)
raise forms.ValidationError(_('Could not parse uploaded file.'))
return {'guid': None,
'type': amo.ADDON_SEARCH,
'name': data['name'],
'summary': data['description'],
'version': datetime.now().strftime('%Y%m%d')}
class WebAppParser(object):
def extract_locale(self, locales, key, default=None):
"""Gets a locale item based on key.
For example, given this:
locales = {'en': {'foo': 1, 'bar': 2},
'it': {'foo': 1, 'bar': 2}}
You can get english foo like:
self.extract_locale(locales, 'foo', 'en')
"""
ex = {}
for loc, data in locales.iteritems():
ex[loc] = data.get(key, default)
return ex
def get_json_data(self, fileorpath):
path = get_filepath(fileorpath)
if zipfile.is_zipfile(path):
zf = SafeUnzip(path)
zf.is_valid() # Raises forms.ValidationError if problems.
try:
data = zf.extract_path('manifest.webapp')
except KeyError:
raise forms.ValidationError(
_('The file "manifest.webapp" was not found at the root '
'of the packaged app archive.'))
else:
file_ = get_file(fileorpath)
data = file_.read()
file_.close()
return WebAppParser.decode_manifest(data)
@classmethod
def decode_manifest(cls, manifest):
"""
Returns manifest, stripped of BOMs and UTF-8 decoded, as Python dict.
"""
try:
data = strip_bom(manifest)
# Marketplace only supports UTF-8 encoded manifests.
decoded_data = data.decode('utf-8')
except (ValueError, UnicodeDecodeError) as exc:
msg = 'Error parsing manifest (encoding: utf-8): %s: %s'
log.error(msg % (exc.__class__.__name__, exc))
raise forms.ValidationError(
_('Could not decode the webapp manifest file.'))
try:
return json.loads(decoded_data)
except Exception:
raise forms.ValidationError(
_('The webapp manifest is not valid JSON.'))
def parse(self, fileorpath):
data = self.get_json_data(fileorpath)
loc = data.get('default_locale', translation.get_language())
default_locale = self.trans_locale(loc)
locales = data.get('locales', {})
if type(locales) == list:
raise forms.ValidationError(
_('Your specified app locales are not in the correct format.'))
localized_descr = self.extract_locale(locales, 'description',
default='')
if 'description' in data:
localized_descr.update({default_locale: data['description']})
localized_name = self.extract_locale(locales, 'name',
default=data['name'])
localized_name.update({default_locale: data['name']})
developer_info = data.get('developer', {})
developer_name = developer_info.get('name')
if not developer_name:
# Missing developer name shouldn't happen if validation took place,
# but let's be explicit about this just in case.
raise forms.ValidationError(
_("Developer name is required in the manifest in order to "
"display it on the app's listing."))
return {'guid': None,
'type': amo.ADDON_WEBAPP,
'name': self.trans_all_locales(localized_name),
'developer_name': developer_name,
'description': self.trans_all_locales(localized_descr),
'version': data.get('version', '1.0'),
'default_locale': default_locale,
'origin': data.get('origin')}
def trans_locale(self, locale):
return to_language(settings.SHORTER_LANGUAGES.get(locale, locale))
def trans_all_locales(self, locale_dict):
trans = {}
for key, item in locale_dict.iteritems():
key = self.trans_locale(key)
trans[key] = item
return trans
class SafeUnzip(object):
def __init__(self, source, mode='r'):
self.source = source
self.info = None
self.mode = mode
def is_valid(self, fatal=True):
"""
Runs some overall archive checks.
fatal: if the archive is not valid and fatal is True, it will raise
an error, otherwise it will return False.
"""
try:
zip = zipfile.ZipFile(self.source, self.mode)
except (BadZipfile, IOError):
if fatal:
log.info('Error extracting', exc_info=True)
raise
return False
_info = zip.infolist()
for info in _info:
if '..' in info.filename or info.filename.startswith('/'):
log.error('Extraction error, invalid file name (%s) in '
'archive: %s' % (info.filename, self.source))
# L10n: {0} is the name of the invalid file.
raise forms.ValidationError(
_('Invalid file name in archive: {0}').format(
info.filename))
if info.file_size > settings.FILE_UNZIP_SIZE_LIMIT:
log.error('Extraction error, file too big (%s) for file (%s): '
'%s' % (self.source, info.filename, info.file_size))
# L10n: {0} is the name of the invalid file.
raise forms.ValidationError(
_('File exceeding size limit in archive: {0}').format(
info.filename))
self.info = _info
self.zip = zip
return True
def is_signed(self):
"""Tells us if an addon is signed."""
finds = []
for info in self.info:
match = SIGNED_RE.match(info.filename)
if match:
name, ext = match.groups()
# If it's rsa or sf, just look for the opposite.
if (name, {'rsa': 'sf', 'sf': 'rsa'}[ext]) in finds:
return True
finds.append((name, ext))
def extract_from_manifest(self, manifest):
"""
Extracts a file given a manifest such as:
jar:chrome/de.jar!/locale/de/browser/
or
locale/de/browser
"""
type, path = manifest.split(':')
jar = self
if type == 'jar':
parts = path.split('!')
for part in parts[:-1]:
jar = self.__class__(StringIO.StringIO(jar.zip.read(part)))
jar.is_valid(fatal=True)
path = parts[-1]
return jar.extract_path(path[1:] if path.startswith('/') else path)
def extract_path(self, path):
"""Given a path, extracts the content at path."""
return self.zip.read(path)
def extract_info_to_dest(self, info, dest):
"""Extracts the given info to a directory and checks the file size."""
self.zip.extract(info, dest)
dest = os.path.join(dest, info.filename)
if not os.path.isdir(dest):
# Directories consistently report their size incorrectly.
size = os.stat(dest)[stat.ST_SIZE]
if size != info.file_size:
log.error('Extraction error, uncompressed size: %s, %s not %s'
% (self.source, size, info.file_size))
raise forms.ValidationError(_('Invalid archive.'))
def extract_to_dest(self, dest):
"""Extracts the zip file to a directory."""
for info in self.info:
self.extract_info_to_dest(info, dest)
def close(self):
self.zip.close()
def extract_zip(source, remove=False, fatal=True):
"""Extracts the zip file. If remove is given, removes the source file."""
tempdir = tempfile.mkdtemp()
zip = SafeUnzip(source)
try:
if zip.is_valid(fatal):
zip.extract_to_dest(tempdir)
except:
rm_local_tmp_dir(tempdir)
raise
if remove:
os.remove(source)
return tempdir
def copy_over(source, dest):
"""
Copies from the source to the destination, removing the destination
if it exists and is a directory.
"""
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
shutil.rmtree(source)
def extract_xpi(xpi, path, expand=False):
"""
If expand is given, will look inside the expanded file
and find anything in the whitelist and try and expand it as well.
It will do up to 10 iterations, after that you are on your own.
It will replace the expanded file with a directory and the expanded
contents. If you have 'foo.jar', that contains 'some-image.jpg', then
it will create a folder, foo.jar, with an image inside.
"""
expand_whitelist = ['.jar', '.xpi']
tempdir = extract_zip(xpi)
if expand:
for x in xrange(0, 10):
flag = False
for root, dirs, files in os.walk(tempdir):
for name in files:
if os.path.splitext(name)[1] in expand_whitelist:
src = os.path.join(root, name)
if not os.path.isdir(src):
dest = extract_zip(src, remove=True, fatal=False)
if dest:
copy_over(dest, src)
flag = True
if not flag:
break
copy_over(tempdir, path)
def parse_xpi(xpi, addon=None):
"""Extract and parse an XPI."""
# Extract to /tmp
path = tempfile.mkdtemp()
try:
xpi = get_file(xpi)
extract_xpi(xpi, path)
rdf = Extractor.parse(path)
except forms.ValidationError:
raise
except IOError as e:
if len(e.args) < 2:
errno, strerror = None, e[0]
else:
errno, strerror = e
log.error('I/O error({0}): {1}'.format(errno, strerror))
raise forms.ValidationError(_('Could not parse install.rdf.'))
except Exception:
log.error('XPI parse error', exc_info=True)
raise forms.ValidationError(_('Could not parse install.rdf.'))
finally:
rm_local_tmp_dir(path)
return check_rdf(rdf, addon)
def check_rdf(rdf, addon=None):
from addons.models import Addon, BlacklistedGuid
if not rdf['guid']:
raise forms.ValidationError(_("Could not find a UUID."))
if addon and addon.guid != rdf['guid']:
raise forms.ValidationError(_("UUID doesn't match add-on."))
if (not addon
and Addon.objects.filter(guid=rdf['guid']).exists()
or BlacklistedGuid.objects.filter(guid=rdf['guid']).exists()):
raise forms.ValidationError(_('Duplicate UUID found.'))
if len(rdf['version']) > 32:
raise forms.ValidationError(
_('Version numbers should have fewer than 32 characters.'))
if not VERSION_RE.match(rdf['version']):
raise forms.ValidationError(
_('Version numbers should only contain letters, numbers, '
'and these punctuation characters: +*.-_.'))
return rdf
def parse_addon(pkg, addon=None):
"""
pkg is a filepath or a django.core.files.UploadedFile
or files.models.FileUpload.
"""
name = getattr(pkg, 'name', pkg)
if (getattr(pkg, 'is_webapp', False) or
name.endswith(('.webapp', '.json', '.zip'))):
parsed = WebAppParser().parse(pkg)
elif name.endswith('.xml'):
parsed = parse_search(pkg, addon)
else:
parsed = parse_xpi(pkg, addon)
if addon and addon.type != parsed['type']:
raise forms.ValidationError(_("<em:type> doesn't match add-on"))
return parsed
def _get_hash(filename, block_size=2 ** 20, hash=hashlib.md5):
"""Returns an MD5 hash for a filename."""
f = open(filename, 'rb')
hash_ = hash()
while True:
data = f.read(block_size)
if not data:
break
hash_.update(data)
return hash_.hexdigest()
def get_md5(filename, **kw):
return _get_hash(filename, **kw)
def get_sha256(filename, **kw):
return _get_hash(filename, hash=hashlib.sha256, **kw)
def find_jetpacks(minver, maxver, from_builder_only=False):
"""
Find all jetpack files that aren't disabled.
Files that should be upgraded will have needs_upgrade=True.
Keyword Args
from_builder_only=False
If True, the jetpacks returned are only those that were created
and packaged by the builder.
"""
from .models import File
statuses = amo.VALID_STATUSES
files = (File.objects.filter(jetpack_version__isnull=False,
version__addon__auto_repackage=True,
version__addon__status__in=statuses,
version__addon__disabled_by_user=False)
.exclude(status=amo.STATUS_DISABLED).no_cache()
.select_related('version'))
if from_builder_only:
files = files.exclude(builder_version=None)
files = sorted(files, key=lambda f: (f.version.addon_id, f.version.id))
# Figure out which files need to be upgraded.
for file_ in files:
file_.needs_upgrade = False
# If any files for this add-on are reviewed, take the last reviewed file
# plus all newer files. Otherwise, only upgrade the latest file.
for _, fs in groupby(files, key=lambda f: f.version.addon_id):
fs = list(fs)
if any(f.status in amo.REVIEWED_STATUSES for f in fs):
for file_ in reversed(fs):
file_.needs_upgrade = True
if file_.status in amo.REVIEWED_STATUSES:
break
else:
fs[-1].needs_upgrade = True
# Make sure only old files are marked.
for file_ in [f for f in files if f.needs_upgrade]:
if not (vint(minver) <= vint(file_.jetpack_version) < vint(maxver)):
file_.needs_upgrade = False
return files
class JetpackUpgrader(object):
"""A little manager for jetpack upgrade data in memcache."""
prefix = 'admin:jetpack:upgrade:'
def __init__(self):
self.version_key = self.prefix + 'version'
self.file_key = self.prefix + 'files'
self.jetpack_key = self.prefix + 'jetpack'
def jetpack_versions(self, min_=None, max_=None):
if None not in (min_, max_):
d = {'min': min_, 'max': max_}
return cache.set(self.jetpack_key, d)
d = cache.get(self.jetpack_key, {})
return d.get('min'), d.get('max')
def version(self, val=None):
if val is not None:
return cache.add(self.version_key, val)
return cache.get(self.version_key)
def files(self, val=None):
if val is not None:
current = cache.get(self.file_key, {})
current.update(val)
return cache.set(self.file_key, val)
return cache.get(self.file_key, {})
def file(self, file_id, val=None):
file_id = int(file_id)
if val is not None:
current = cache.get(self.file_key, {})
current[file_id] = val
cache.set(self.file_key, current)
return val
return cache.get(self.file_key, {}).get(file_id, {})
def cancel(self):
cache.delete(self.version_key)
newfiles = dict([(k, v) for (k, v) in self.files().items()
if v.get('owner') != 'bulk'])
cache.set(self.file_key, newfiles)
def finish(self, file_id):
file_id = int(file_id)
newfiles = dict([(k, v) for (k, v) in self.files().items()
if k != file_id])
cache.set(self.file_key, newfiles)
if not newfiles:
cache.delete(self.version_key)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import uuid
import eventlet
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import legacy
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron.openstack.common import uuidutils
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qdhcp-'
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254/%d' % METADATA_DEFAULT_PREFIX
METADATA_PORT = 80
class DhcpAgent(manager.Manager):
OPTS = [
cfg.IntOpt('resync_interval', default=5,
help=_("Interval to resync.")),
cfg.StrOpt('dhcp_driver',
default='neutron.agent.linux.dhcp.Dnsmasq',
help=_("The driver used to manage the DHCP server.")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicated network. Requires "
"enable_isolated_metadata = True")),
cfg.IntOpt('num_sync_threads', default=4,
help=_('Number of threads to use during sync process.')),
]
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync = False
self.conf = cfg.CONF
self.cache = NetworkCache()
self.root_helper = config.get_root_helper(self.conf)
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx)
self.device_manager = DeviceManager(self.conf, self.plugin_rpc)
self.lease_relay = DhcpLeaseRelay(self.update_lease)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf,
self.root_helper
)
for net_id in existing_networks:
net = DictModel({"id": net_id, "subnets": [], "ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug(
_("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks"),
self.conf.dhcp_driver
)
def after_start(self):
self.run()
LOG.info(_("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
self.lease_relay.start()
def _ns_name(self, network):
if self.conf.use_namespaces:
return NS_PREFIX + network.id
def call_driver(self, action, network):
"""Invoke an action on a DHCP driver instance."""
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self.root_helper,
self.device_manager,
self._ns_name(network),
self.dhcp_version)
getattr(driver, action)()
return True
except Exception:
self.needs_resync = True
LOG.exception(_('Unable to %s dhcp.'), action)
def update_lease(self, network_id, ip_address, time_remaining):
try:
self.plugin_rpc.update_lease_expiration(network_id, ip_address,
time_remaining)
except Exception:
self.needs_resync = True
LOG.exception(_('Unable to update lease'))
def sync_state(self):
"""Sync the local DHCP state with Neutron."""
LOG.info(_('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception:
self.needs_resync = True
LOG.exception(_('Unable to sync network state on deleted '
'network %s') % deleted_id)
for network in active_networks:
pool.spawn_n(self.configure_dhcp_for_network, network)
except Exception:
self.needs_resync = True
LOG.exception(_('Unable to sync network state.'))
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync:
self.needs_resync = False
self.sync_state()
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
try:
network = self.plugin_rpc.get_network_info(network_id)
except Exception:
self.needs_resync = True
LOG.exception(_('Network %s RPC info call failed.'), network_id)
return
self.configure_dhcp_for_network(network)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.enable_isolated_metadata_proxy(network)
self.cache.put(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
try:
network = self.plugin_rpc.get_network_info(network_id)
except Exception:
self.needs_resync = True
LOG.exception(_('Network %s RPC info call failed.'), network_id)
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
if new_cidrs:
self.device_manager.update(network)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
port = DictModel(payload['port'])
network = self.cache.get_network_by_id(port.network_id)
if network:
self.cache.put_port(port)
self.call_driver('reload_allocations', network)
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
neutron_lookup_param = '--network_id=%s' % network.id
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets)
if (self.conf.enable_metadata_network and has_metadata_subnet):
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
neutron_lookup_param = ('--router_id=%s' %
router_ports[0].device_id)
def callback(pid_file):
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
neutron_lookup_param,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%d' % METADATA_PORT]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
self._ns_name(network))
pm.enable(callback)
def disable_isolated_metadata_proxy(self, network):
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
self._ns_name(network))
pm.disable()
class DhcpPluginApi(proxy.RpcProxy):
"""Agent side of the dhcp rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic, context):
super(DhcpPluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.context = context
self.host = cfg.CONF.host
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
networks = self.call(self.context,
self.make_msg('get_active_networks_info',
host=self.host),
topic=self.topic)
return [DictModel(n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
return DictModel(self.call(self.context,
self.make_msg('get_network_info',
network_id=network_id,
host=self.host),
topic=self.topic))
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
return DictModel(self.call(self.context,
self.make_msg('get_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host),
topic=self.topic))
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
return DictModel(self.call(self.context,
self.make_msg('create_dhcp_port',
port=port,
host=self.host),
topic=self.topic))
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
return DictModel(self.call(self.context,
self.make_msg('update_dhcp_port',
port_id=port_id,
port=port,
host=self.host),
topic=self.topic))
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
return self.call(self.context,
self.make_msg('release_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host),
topic=self.topic)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
return self.call(self.context,
self.make_msg('release_port_fixed_ip',
network_id=network_id,
subnet_id=subnet_id,
device_id=device_id,
host=self.host),
topic=self.topic)
def update_lease_expiration(self, network_id, ip_address, lease_remaining):
"""Make a remote process call to update the ip lease expiration."""
self.cast(self.context,
self.make_msg('update_lease_expiration',
network_id=network_id,
ip_address=ip_address,
lease_remaining=lease_remaining,
host=self.host),
topic=self.topic)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DeviceManager(object):
OPTS = [
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual interface."))
]
def __init__(self, conf, plugin):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.plugin = plugin
if not conf.interface_driver:
raise SystemExit(_('You must specify an interface driver'))
try:
self.driver = importutils.import_object(
conf.interface_driver, conf
)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % conf.interface_driver
raise SystemExit(msg)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
return 'dhcp%s-%s' % (host_uuid, network.id)
def _get_device(self, network):
"""Return DHCP ip_lib device for this host on the network."""
device_id = self.get_device_id(network)
port = self.plugin.get_dhcp_port(network.id, device_id)
interface_name = self.get_interface_name(network, port)
namespace = NS_PREFIX + network.id
return ip_lib.IPDevice(interface_name,
self.root_helper,
namespace)
def _set_default_route(self, network):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = self._get_device(network)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
msg = _('Removing gateway for dhcp netns on net %s')
LOG.debug(msg, network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'fixed_ips': port_fixed_ips}})
else:
dhcp_port = port
# break since we found port that matches device_id
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.'), {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network, reuse_existing=False):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if self.conf.use_namespaces:
namespace = NS_PREFIX + network.id
else:
namespace = None
if ip_lib.device_exists(interface_name,
self.root_helper,
namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name)
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_IP)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=namespace)
# ensure that the dhcp interface is first in the list
if namespace is None:
device = ip_lib.IPDevice(interface_name,
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network)
return interface_name
def update(self, network):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
if self.conf.use_namespaces:
namespace = NS_PREFIX + network.id
else:
namespace = None
self.driver.unplug(device_name, namespace=namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
class DictModel(object):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, d):
for key, value in d.iteritems():
if isinstance(value, list):
value = [DictModel(item) if isinstance(item, dict) else item
for item in value]
elif isinstance(value, dict):
value = DictModel(value)
setattr(self, key, value)
class DhcpLeaseRelay(object):
"""UNIX domain socket server for processing lease updates.
Network namespace isolation prevents the DHCP process from notifying
Neutron directly. This class works around the limitation by using the
domain socket to pass the information. This class handles message.
receiving and then calls the callback method.
"""
OPTS = [
cfg.StrOpt('dhcp_lease_relay_socket',
default='$state_path/dhcp/lease_relay',
help=_('Location to DHCP lease relay UNIX domain socket'))
]
def __init__(self, lease_update_callback):
self.callback = lease_update_callback
dirname = os.path.dirname(cfg.CONF.dhcp_lease_relay_socket)
if os.path.isdir(dirname):
try:
os.unlink(cfg.CONF.dhcp_lease_relay_socket)
except OSError:
if os.path.exists(cfg.CONF.dhcp_lease_relay_socket):
raise
else:
os.makedirs(dirname, 0o755)
def _handler(self, client_sock, client_addr):
"""Handle incoming lease relay stream connection.
This method will only read the first 1024 bytes and then close the
connection. The limit exists to limit the impact of misbehaving
clients.
"""
try:
msg = client_sock.recv(1024)
data = jsonutils.loads(msg)
client_sock.close()
network_id = data['network_id']
if not uuidutils.is_uuid_like(network_id):
raise ValueError(_("Network ID %s is not a valid UUID") %
network_id)
ip_address = str(netaddr.IPAddress(data['ip_address']))
lease_remaining = int(data['lease_remaining'])
self.callback(network_id, ip_address, lease_remaining)
except ValueError as e:
LOG.warn(_('Unable to parse lease relay msg to dict.'))
LOG.warn(_('Exception value: %s'), e)
LOG.warn(_('Message representation: %s'), repr(msg))
except Exception as e:
LOG.exception(_('Unable update lease. Exception'))
def start(self):
"""Spawn a green thread to run the lease relay unix socket server."""
listener = eventlet.listen(cfg.CONF.dhcp_lease_relay_socket,
family=socket.AF_UNIX)
eventlet.spawn(eventlet.serve, listener, self._handler)
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.needs_resync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_("DHCP agent started"))
def register_options():
cfg.CONF.register_opts(DhcpAgent.OPTS)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(DeviceManager.OPTS)
cfg.CONF.register_opts(DhcpLeaseRelay.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
def main():
eventlet.monkey_patch()
register_options()
cfg.CONF(project='neutron')
config.setup_logging(cfg.CONF)
legacy.modernize_quantum_config(cfg.CONF)
server = neutron_service.Service.create(
binary='neutron-dhcp-agent',
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport')
service.launch(server).wait()
| |
"""TcEx Framework"""
# standard library
import inspect
import logging
import os
import platform
import signal
import threading
from typing import TYPE_CHECKING, Dict, Optional, Union
# third-party
from requests import Session
# first-party
from tcex.api.tc.utils.threat_intel_utils import ThreatIntelUtils
from tcex.api.tc.v2.v2 import V2
from tcex.api.tc.v3.v3 import V3
from tcex.app_config.install_json import InstallJson
from tcex.app_feature import AdvancedRequest
from tcex.backports import cached_property
from tcex.exit.exit import ExitCode, ExitService
from tcex.input.input import Input
from tcex.key_value_store import KeyValueApi, KeyValueRedis, RedisClient
from tcex.logger.logger import Logger # pylint: disable=no-name-in-module
from tcex.logger.trace_logger import TraceLogger # pylint: disable=no-name-in-module
from tcex.playbook import Playbook
from tcex.pleb.proxies import proxies
from tcex.pleb.registry import registry
from tcex.pleb.scoped_property import scoped_property
from tcex.services.api_service import ApiService
from tcex.services.common_service_trigger import CommonServiceTrigger
from tcex.services.webhook_trigger_service import WebhookTriggerService
from tcex.sessions.auth.tc_auth import TcAuth
from tcex.sessions.external_session import ExternalSession
from tcex.sessions.tc_session import TcSession
from tcex.tokens import Tokens
from tcex.utils import Utils
from tcex.utils.file_operations import FileOperations
if TYPE_CHECKING:
# first-party
from tcex.sessions.auth.hmac_auth import HmacAuth
from tcex.sessions.auth.token_auth import TokenAuth
class TcEx:
"""Provides basic functionality for all types of TxEx Apps.
Args:
config (dict, kwargs): A dictionary containing configuration items typically used by
external Apps.
config_file (str, kwargs): A filename containing JSON configuration items typically used
by external Apps.
"""
def __init__(self, **kwargs):
"""Initialize Class Properties."""
# catch interrupt signals specifically based on thread name
signal.signal(signal.SIGINT, self._signal_handler)
if platform.system() != 'Windows':
signal.signal(signal.SIGHUP, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
# Property defaults
self._config: dict = kwargs.get('config') or {}
self._log = None
self._jobs = None
self._redis_client = None
self._service = None
self.ij = InstallJson()
self.main_os_pid = os.getpid()
# init inputs
self.inputs = Input(self._config, kwargs.get('config_file'))
# add methods to registry
registry.add_method(self.inputs.resolve_variable)
# add methods to registry
registry.register(self)
registry.add_service(Input, self.inputs)
# log standard App info early so it shows at the top of the logfile
self.logger.log_info(self.inputs.model_unresolved)
def _signal_handler(self, signal_interrupt: int, _) -> None:
"""Handle signal interrupt."""
call_file: str = os.path.basename(inspect.stack()[1][0].f_code.co_filename)
call_module: str = inspect.stack()[1][0].f_globals['__name__'].lstrip('Functions.')
call_line: int = inspect.stack()[1][0].f_lineno
self.log.error(
f'App interrupted - file: {call_file}, method: {call_module}, line: {call_line}.'
)
exit_code = ExitCode.SUCCESS
if threading.current_thread().name == 'MainThread' and signal_interrupt in (2, 15):
exit_code = ExitCode.FAILURE
self.exit(exit_code, 'The App received an interrupt signal and will now exit.')
@property
def _user_agent(self):
"""Return a User-Agent string."""
return {
'User-Agent': (
f'TcEx/{__import__(__name__).__version__}, '
f'{self.ij.model.display_name}/{self.ij.model.program_version}'
)
}
def advanced_request(
self,
session: Session,
output_prefix: str,
timeout: Optional[int] = 600,
) -> AdvancedRequest:
"""Return instance of AdvancedRequest.
Args:
session: An instance of requests.Session.
output_prefix: A value to prepend to outputs.
timeout: The number of second before timing out the request.
"""
return AdvancedRequest(self.inputs, self.playbook, session, output_prefix, timeout)
def exit(self, code: Optional[ExitCode] = None, msg: Optional[str] = None) -> None:
"""Application exit method with proper exit code
The method will run the Python standard sys.exit() with the exit code
previously defined via :py:meth:`~tcex.tcex.TcEx.exit_code` or provided
during the call of this method.
Args:
code: The exit code value for the app.
msg: A message to log and add to message tc output.
"""
# get correct code
self.exit_service.exit(code, msg) # pylint: disable=no-member
@property
def exit_code(self) -> ExitCode:
"""Return the current exit code."""
return self.exit_service.exit_code # pylint: disable=no-member
@exit_code.setter
def exit_code(self, code: ExitCode) -> None:
"""Set the App exit code.
For TC Exchange Apps there are 3 supported exit codes.
* 0 indicates a normal exit
* 1 indicates a failure during execution
* 3 indicates a partial failure
Args:
code (int): The exit code value for the app.
"""
self.exit_service.exit_code = code
@registry.factory(ExitService)
@scoped_property
def exit_service(self) -> ExitService:
"""Return an ExitService object."""
# TODO: [high] @cblades - inputs being required for exit prevents AOT from exiting
return self.get_exit_service(self.inputs)
@cached_property
def file_operations(self) -> 'FileOperations': # pylint: disable=no-self-use
"""Include the Utils module."""
return FileOperations(temp_path=self.inputs.model_unresolved.tc_temp_path)
@staticmethod
def get_exit_service(inputs) -> ExitService:
"""Create an ExitService object."""
return ExitService(inputs)
def get_playbook(
self, context: Optional[str] = None, output_variables: Optional[list] = None
) -> Playbook:
"""Return a new instance of playbook module.
Args:
context: The KV Store context/session_id. For PB Apps the context is provided on
startup, but for service Apps each request gets a different context.
output_variables: The requested output variables. For PB Apps outputs are provided on
startup, but for service Apps each request gets different outputs.
"""
return Playbook(self.key_value_store, context, output_variables)
@staticmethod
def get_redis_client(
host: str, port: int, db: int = 0, blocking_pool: bool = False, **kwargs
) -> RedisClient:
"""Return a *new* instance of Redis client.
For a full list of kwargs see https://redis-py.readthedocs.io/en/latest/#redis.Connection.
Args:
host: The REDIS host. Defaults to localhost.
port: The REDIS port. Defaults to 6379.
db: The REDIS db. Defaults to 0.
blocking_pool: Use BlockingConnectionPool instead of ConnectionPool.
errors (str, kwargs): The REDIS errors policy (e.g. strict).
max_connections (int, kwargs): The maximum number of connections to REDIS.
password (str, kwargs): The REDIS password.
socket_timeout (int, kwargs): The REDIS socket timeout.
timeout (int, kwargs): The REDIS Blocking Connection Pool timeout value.
Returns:
Redis.client: An instance of redis client.
"""
return RedisClient(
host=host, port=port, db=db, blocking_pool=blocking_pool, **kwargs
).client
def get_session_tc(
self,
auth: Optional[Union['HmacAuth', 'TokenAuth', 'TcAuth']] = None,
base_url: Optional[str] = None,
log_curl: Optional[bool] = False,
proxies: Optional[Dict[str, str]] = None, # pylint: disable=redefined-outer-name
proxies_enabled: Optional[bool] = False,
verify: Optional[Union[bool, str]] = True,
) -> TcSession:
"""Return an instance of Requests Session configured for the ThreatConnect API.
No args are required to get a working instance of TC Session instance.
This method allows for getting a new instance of TC Session instance. This can be
very useful when connecting between multiple TC instances (e.g., migrating data).
"""
auth = auth or TcAuth(
tc_api_access_id=self.inputs.model_unresolved.tc_api_access_id,
tc_api_secret_key=self.inputs.model_unresolved.tc_api_secret_key,
tc_token=self.token,
)
return TcSession(
auth=auth,
base_url=base_url or self.inputs.model_unresolved.tc_api_path,
log_curl=log_curl or self.inputs.model_unresolved.tc_log_curl,
proxies=proxies or self.proxies,
proxies_enabled=proxies_enabled or self.inputs.model_unresolved.tc_proxy_tc,
user_agent=self._user_agent,
verify=verify or self.inputs.model_unresolved.tc_verify,
)
def get_session_external(self) -> ExternalSession:
"""Return an instance of Requests Session configured for the ThreatConnect API."""
_session_external = ExternalSession(logger=self.log)
# add User-Agent to headers
_session_external.headers.update(self._user_agent)
# add proxy support if requested
if self.inputs.model_unresolved.tc_proxy_external:
_session_external.proxies = self.proxies
self.log.info(
f'Using proxy host {self.inputs.model_unresolved.tc_proxy_host}:'
f'{self.inputs.model_unresolved.tc_proxy_port} for external session.'
)
if self.inputs.model_unresolved.tc_log_curl:
_session_external.log_curl = True
return _session_external
# def get_ti(self) -> ThreatIntelligence:
# """Include the Threat Intel Module."""
# return ThreatIntelligence(session=self.get_session_tc())
@registry.factory('KeyValueStore')
@scoped_property
def key_value_store(self) -> Union[KeyValueApi, KeyValueRedis]:
"""Return the correct KV store for this execution.
The TCKeyValueAPI KV store is limited to two operations (create and read),
while the Redis kvstore wraps a few other Redis methods.
"""
if self.inputs.model_unresolved.tc_kvstore_type == 'Redis':
return KeyValueRedis(self.redis_client)
if self.inputs.model_unresolved.tc_kvstore_type == 'TCKeyValueAPI':
return KeyValueApi(self.session_tc)
raise RuntimeError(
f'Invalid KV Store Type: ({self.inputs.model_unresolved.tc_kvstore_type})'
)
@property
def log(self) -> TraceLogger:
"""Return a valid logger."""
if self._log is None:
self._log = self.logger.log
return self._log
@log.setter
def log(self, log: object) -> None:
"""Return a valid logger."""
if isinstance(log, logging.Logger):
self._log = log
@cached_property
def logger(self) -> Logger:
"""Return logger."""
_logger = Logger(logger_name='tcex')
# set logger to prevent recursion issue on get_session_tc
self._log = _logger.log
# add api handler
if (
self.inputs.contents.get('tc_token') is not None
and self.inputs.contents.get('tc_log_to_api') is True
):
_logger.add_api_handler(
session_tc=self.get_session_tc(), level=self.inputs.model_unresolved.tc_log_level
)
# add rotating log handler
_logger.add_rotating_file_handler(
name='rfh',
filename=self.inputs.model_unresolved.tc_log_file,
path=self.inputs.model_unresolved.tc_log_path,
backup_count=self.inputs.model_unresolved.tc_log_backup_count,
max_bytes=self.inputs.model_unresolved.tc_log_max_bytes,
level=self.inputs.model_unresolved.tc_log_level,
)
# set logging level
_logger.update_handler_level(level=self.inputs.model_unresolved.tc_log_level)
_logger.log.setLevel(_logger.log_level(self.inputs.model_unresolved.tc_log_level))
# replay cached log events
_logger.replay_cached_events(handler_name='cache')
return _logger
@registry.factory(Playbook)
@scoped_property
def playbook(self) -> 'Playbook':
"""Return an instance of Playbooks module.
This property defaults context and outputvariables to arg values.
Returns:
tcex.playbook.Playbooks: An instance of Playbooks
"""
return self.get_playbook(
context=self.inputs.model_unresolved.tc_playbook_kvstore_context,
output_variables=self.inputs.model_unresolved.tc_playbook_out_variables,
)
@cached_property
def proxies(self) -> dict:
"""Format the proxy configuration for Python Requests module.
Generates a dictionary for use with the Python Requests module format
when proxy is required for remote connections.
**Example Response**
::
{"http": "http://user:pass@10.10.1.10:3128/"}
Returns:
(dict): Dictionary of proxy settings
"""
return proxies(
proxy_host=self.inputs.model_unresolved.tc_proxy_host,
proxy_port=self.inputs.model_unresolved.tc_proxy_port,
proxy_user=self.inputs.model_unresolved.tc_proxy_username,
proxy_pass=self.inputs.model_unresolved.tc_proxy_password,
)
@registry.factory(RedisClient)
@scoped_property
def redis_client(self) -> 'RedisClient':
"""Return redis client instance configure for Playbook/Service Apps."""
return self.get_redis_client(
host=self.inputs.contents.get('tc_kvstore_host'),
port=self.inputs.contents.get('tc_kvstore_port'),
db=0,
)
def results_tc(self, key: str, value: str) -> None:
"""Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key: The data key to be stored.
value: The data value to be stored.
"""
if os.access(self.inputs.model_unresolved.tc_out_path, os.W_OK):
results_file = f'{self.inputs.model_unresolved.tc_out_path}/results.tc'
else:
results_file = 'results.tc'
new = True
# ensure file exists
open(results_file, 'a').close() # pylint: disable=consider-using-with
with open(results_file, 'r+') as fh:
results = ''
for line in fh.read().strip().split('\n'):
if not line:
continue
try:
k, v = line.split(' = ')
except ValueError:
# handle null/empty value (e.g., "name =")
k, v = line.split(' =')
if k == key:
v = value
new = False
if v is not None:
results += f'{k} = {v}\n'
if new and value is not None: # indicates the key/value pair didn't already exist
results += f'{key} = {value}\n'
fh.seek(0)
fh.write(results)
fh.truncate()
@cached_property
def service(self) -> Union[ApiService, CommonServiceTrigger, WebhookTriggerService]:
"""Include the Service Module."""
if self.ij.model.runtime_level.lower() == 'apiservice':
from .services import ApiService as Service
elif self.ij.model.runtime_level.lower() == 'triggerservice':
from .services import CommonServiceTrigger as Service
elif self.ij.model.runtime_level.lower() == 'webhooktriggerservice':
from .services import WebhookTriggerService as Service
else:
self.exit(1, 'Could not determine the service type.')
return Service(self)
@registry.factory(TcSession)
@scoped_property
def session_tc(self) -> 'TcSession':
"""Return an instance of Requests Session configured for the ThreatConnect API."""
return self.get_session_tc()
@scoped_property
def session_external(self) -> 'ExternalSession':
"""Return an instance of Requests Session configured for the ThreatConnect API."""
return self.get_session_external()
@registry.factory(Tokens, singleton=True)
@cached_property
def token(self) -> 'Tokens':
"""Return token object."""
_proxies = None
if self.inputs.model_unresolved.tc_proxy_tc is True:
_proxies = self.proxies
_tokens = Tokens(
self.inputs.model_unresolved.tc_api_path,
self.inputs.model_unresolved.tc_verify,
_proxies,
)
# register token for Apps that pass token on start
if all(
[self.inputs.model_unresolved.tc_token, self.inputs.model_unresolved.tc_token_expires]
):
_tokens.register_token(
key=threading.current_thread().name,
token=self.inputs.model_unresolved.tc_token,
expires=self.inputs.model_unresolved.tc_token_expires,
)
return _tokens
def set_exit_code(self, exit_code: int):
"""Set the exit code (registry)"""
self.exit_code = exit_code
@property
def ti_utils(self) -> 'ThreatIntelUtils':
"""Return instance of Threat Intel Utils."""
return ThreatIntelUtils(self.session_tc)
@cached_property
def utils(self) -> 'Utils': # pylint: disable=no-self-use
"""Include the Utils module."""
return Utils()
@property
def v2(self) -> 'V2':
"""Return a case management instance."""
return V2(self.inputs, self.session_tc)
@property
def v3(self) -> 'V3':
"""Return a case management instance."""
return V3(self.session_tc)
| |
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs checks for mesos style."""
import os
import platform
import re
import string
import subprocess
import sys
from pathlib import PurePath
class LinterBase():
"""
This is an abstract class that provides the base functionality for
linting files in the mesos project. Its 'main()' function
walks through the set of files passed to it and runs some
standard linting over them. This includes checking for license headers
and checking for non-supported characters. From there it calls a
'run_lint()' function that can be overridden to provide
customizable style checks for a specific class of files (e.g. C++,
Python, etc.).
Any class that extends from 'LinterBase' should override the
following class variables / functions:
linter_type
source_dirs
exclude_files
source_files
comment_prefix
run_lint()
Please see the comments below for details on how to override each
variable.
"""
# The name of the linter to help with printing which linter files
# are currently being processed by.
linter_type = ''
# Root source paths (will be traversed recursively).
source_dirs = []
# Add file paths and patterns which should not be checked
# This should include 3rdparty libraries, includes and machine generated
# source.
exclude_files = ''
# A regex of possible matches for your source files.
source_files = ''
# A prefix at the beginning of the line to demark comments (e.g. '//')
comment_prefix = ''
def check_encoding(self, source_paths):
"""
Checks for encoding errors in the given files. Source
code files must contain only printable ascii characters.
This excludes the extended ascii characters 128-255.
http://www.asciitable.com/
"""
error_count = 0
for path in source_paths:
with open(path) as source_file:
for line_number, line in enumerate(source_file):
# If we find an error, add 1 to both the character and
# the line offset to give them 1-based indexing
# instead of 0 (as is common in most editors).
char_errors = [offset for offset, char in enumerate(line)
if char not in string.printable]
if char_errors:
sys.stderr.write(
"{path}:{line_number}: Non-printable characters"
" found at [{offsets}]: \"{line}\"\n".format(
path=path,
line_number=line_number + 1,
offsets=', '.join([str(offset + 1) for offset
in char_errors]),
line=line.rstrip()))
error_count += 1
return error_count
def check_license_header(self, source_paths):
"""Checks the license headers of the given files."""
error_count = 0
for path in source_paths:
with open(path) as source_file:
# We read the three first lines of the file as the
# first line could be a shebang and the second line empty.
head = "".join([next(source_file) for _ in range(3)])
# TODO(bbannier) We allow `Copyright` for
# currently deviating files. This should be
# removed one we have a uniform license format.
regex = r'^{comment_prefix} [Licensed|Copyright]'.format(
comment_prefix=self.comment_prefix)
# pylint: disable=no-member
regex = re.compile(regex, re.MULTILINE)
if not regex.search(head):
sys.stderr.write(
"{path}:1: A license header should appear's on one of"
" the first line of the file starting with"
" '{comment_prefix} Licensed'.: {head}".format(
path=path,
head=head,
comment_prefix=self.comment_prefix))
error_count += 1
return error_count
def find_candidates(self, root_dir):
"""
Search through the all files rooted at 'root_dir' and compare
them against 'self.exclude_files' and 'self.source_files' to
come up with a set of candidate files to lint.
"""
exclude_file_regex = re.compile(self.exclude_files)
source_criteria_regex = re.compile(self.source_files)
for root, _, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
if exclude_file_regex.search(path) is not None:
continue
if source_criteria_regex.search(name) is not None:
yield path
def run_command_in_virtualenv(self, command):
"""
Activate the virtual environment, run the
given command and return its output.
"""
virtualenv = os.path.join('support', '.virtualenv')
if platform.system() == 'Windows':
command = r'{virtualenv_path}\Scripts\activate.bat & {cmd}'.format(
virtualenv_path=virtualenv, cmd=command)
else:
command = '. {virtualenv_path}/bin/activate; {cmd}'.format(
virtualenv_path=virtualenv, cmd=command)
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
# pylint: disable=unused-argument
def run_lint(self, source_paths):
"""
A custom function to provide linting for 'linter_type'.
It takes a list of source files to lint and returns the number
of errors found during the linting process.
It should print any errors as it encounters them to provide
feedback to the caller.
"""
return 0
def main(self, modified_files):
"""
This function takes a list of files and lints them for the
class of files defined by 'linter_type'.
"""
# Verify that source roots are accessible from current
# working directory. A common error could be to call
# the style checker from other (possibly nested) paths.
for source_dir in self.source_dirs:
if not os.path.exists(source_dir):
print("Could not find '{dir}'".format(dir=source_dir))
print('Please run from the root of the mesos source directory')
exit(1)
# Add all source file candidates to candidates list.
candidates = []
for source_dir in self.source_dirs:
for candidate in self.find_candidates(source_dir):
candidates.append(candidate)
# Normalize paths of any files given.
modified_files = [os.fspath(PurePath(f)) for f in modified_files]
# If file paths are specified, check all file paths that are
# candidates; else check all candidates.
file_paths = modified_files if modified_files else candidates
# Compute the set intersect of the input file paths and candidates.
# This represents the reduced set of candidates to run lint on.
candidates_set = set(candidates)
clean_file_paths_set = set(path.rstrip() for path in file_paths)
filtered_candidates_set = clean_file_paths_set.intersection(
candidates_set)
if filtered_candidates_set:
plural = '' if len(filtered_candidates_set) == 1 else 's'
print('Checking {num_files} {linter} file{plural}'.format(
num_files=len(filtered_candidates_set),
linter=self.linter_type,
plural=plural))
license_errors = self.check_license_header(filtered_candidates_set)
encoding_errors = self.check_encoding(list(filtered_candidates_set))
lint_errors = self.run_lint(list(filtered_candidates_set))
total_errors = license_errors + encoding_errors + lint_errors
if total_errors > 0:
sys.stderr.write('Total errors found: {num_errors}\n'.format(
num_errors=total_errors))
return total_errors
return 0
class CppLinter(LinterBase):
"""The linter for C++ files, uses cpplint."""
linter_type = 'C++'
source_dirs = ['src',
'include',
os.path.join('3rdparty', 'libprocess'),
os.path.join('3rdparty', 'stout')]
exclude_files = '(' \
r'elfio\-3\.2|' \
r'protobuf\-2\.4\.1|' \
r'googletest\-release\-1\.8\.0|' \
r'glog\-0\.3\.3|' \
r'boost\-1\.53\.0|' \
r'libev\-4\.15|' \
r'\.pb\.cc|\.pb\.h|\.md|\.virtualenv' \
')'
source_files = r'\.(cpp|hpp|cc|h)$'
comment_prefix = r'\/\/'
def run_lint(self, source_paths):
"""
Runs cpplint over given files.
http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"""
# See cpplint.py for full list of rules.
active_rules = [
'build/class',
'build/deprecated',
'build/endif_comment',
'readability/todo',
'readability/namespace',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/comma',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/operators',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/comments',
'whitespace/todo']
rules_filter = '--filter=-,+' + ',+'.join(active_rules)
# We do not use a version of cpplint available through pip as
# we use a custom version (see cpplint.path) to lint C++ files.
process = subprocess.Popen(
[sys.executable, 'support/cpplint.py', '--extensions=hpp,cpp',
rules_filter] + source_paths,
stderr=subprocess.PIPE,
close_fds=True)
# Lines are stored and filtered, only showing found errors instead
# of e.g., 'Done processing XXX.' which tends to be dominant output.
for line in process.stderr:
line = line.decode(sys.stdout.encoding)
if re.match('^(Done processing |Total errors found: )', line):
continue
sys.stderr.write(line)
process.wait()
return process.returncode
class JsLinter(LinterBase):
"""The linter for JavaScript files, uses eslint."""
linter_type = 'JavaScript'
source_dirs = [os.path.join('src', 'webui')]
exclude_files = '(' \
r'angular\-1\.2\.32|' \
r'angular\-route\-1\.2\.32|' \
r'bootstrap\-table\-1\.11\.1|' \
r'clipboard\-1\.5\.16|' \
r'jquery\-3\.2\.1|' \
r'relative\-date|' \
r'ui\-bootstrap\-tpls\-0\.9\.0|' \
r'angular\-route\-1\.2\.32|' \
r'underscore\-1\.4\.3' \
')'
source_files = r'\.(js)$'
comment_prefix = '//'
def run_lint(self, source_paths):
"""
Runs eslint over given files.
https://eslint.org/docs/user-guide/configuring
"""
num_errors = 0
source_files = ' '.join(source_paths)
config_path = os.path.join('support', '.eslintrc.js')
process = self.run_command_in_virtualenv(
'eslint {files} -c {config} -f compact'.format(
files=source_files,
config=config_path
)
)
for line in process.stdout:
line = line.decode(sys.stdout.encoding)
if "Error -" in line or "Warning -" in line:
sys.stderr.write(line)
if "Error -" in line:
num_errors += 1
return num_errors
class PyLinter(LinterBase):
"""The linter for Python files, uses pylint."""
linter_type = 'Python'
cli_dir = os.path.join('src', 'python', 'cli_new')
lib_dir = os.path.join('src', 'python', 'lib')
support_dir = os.path.join('support')
source_dirs_to_lint_with_venv = [support_dir]
source_dirs_to_lint_with_tox = [cli_dir, lib_dir]
source_dirs = source_dirs_to_lint_with_tox + source_dirs_to_lint_with_venv
exclude_files = '(' \
r'protobuf\-2\.4\.1|' \
r'googletest\-release\-1\.8\.0|' \
r'glog\-0\.3\.3|' \
r'boost\-1\.53\.0|' \
r'libev\-4\.15|' \
r'\.virtualenv|' \
r'\.tox' \
')'
source_files = r'\.(py)$'
comment_prefix = '#'
pylint_config = os.path.abspath(os.path.join('support', 'pylint.config'))
def run_tox(self, configfile, args, tox_env=None, recreate=False):
"""
Runs tox with given configfile and args. Optionally set tox env
and/or recreate the tox-managed virtualenv.
"""
support_dir = os.path.dirname(__file__)
bin_dir = 'Script' if platform.system() == 'Windows' else 'bin'
cmd = [os.path.join(support_dir, '.virtualenv', bin_dir, 'tox')]
cmd += ['-qq']
cmd += ['-c', configfile]
if tox_env is not None:
cmd += ['-e', tox_env]
if recreate:
cmd += ['--recreate']
cmd += ['--']
cmd += args
# We do not use `run_command_in_virtualenv()` here, as we
# directly call `tox` from inside the virtual environment bin
# directory without activating the virtualenv.
return subprocess.Popen(cmd, stdout=subprocess.PIPE)
def filter_source_files(self, source_dir, source_files):
"""
Filters out files starting with source_dir.
"""
return [f for f in source_files if f.startswith(source_dir)]
def lint_source_files_under_source_dir(self, source_dir, source_files):
"""
Runs pylint directly or indirectly throgh tox on source_files which
are under source_dir. If tox is to be used, it must be configured
in source_dir, i.e. a tox.ini must be present.
"""
filtered_source_files = self.filter_source_files(
source_dir, source_files)
if not filtered_source_files:
return 0
if source_dir in self.source_dirs_to_lint_with_tox:
process = self.run_tox(
configfile=os.path.join(source_dir, 'tox.ini'),
args=['--rcfile='+self.pylint_config] + filtered_source_files,
tox_env='py3-lint')
else:
process = self.run_command_in_virtualenv(
'pylint --score=n --rcfile={rcfile} {files}'.format(
rcfile=self.pylint_config,
files=' '.join(filtered_source_files)))
num_errors = 0
for line in process.stdout:
line = line.decode(sys.stdout.encoding)
if re.search(r'[RCWEF][0-9]{4}:', line):
num_errors += 1
sys.stderr.write(line)
return num_errors
def run_lint(self, source_paths):
"""
Runs pylint over given files.
https://google.github.io/styleguide/pyguide.html
"""
num_errors = 0
for source_dir in self.source_dirs:
num_errors += self.lint_source_files_under_source_dir(
source_dir, source_paths)
return num_errors
def should_build_virtualenv(modified_files):
"""
Check if we should build the virtual environment required.
This is the case if the requirements of the environment
have changed or if the support script is run with no
arguments (meaning that the entire codebase should be linted).
"""
# NOTE: If the file list is empty, we are linting the entire test
# codebase. We should always rebuild the virtualenv in this case.
if not modified_files:
return True
support_dir = os.path.dirname(__file__)
bin_dir = 'Script' if platform.system() == 'Windows' else 'bin'
interpreter = os.path.basename(sys.executable)
interpreter = os.path.join(support_dir, '.virtualenv', bin_dir, interpreter)
if not os.path.isfile(interpreter):
return True
basenames = [os.path.basename(path) for path in modified_files]
if 'pip-requirements.txt' in basenames:
print('The "pip-requirements.txt" file has changed.')
return True
if 'build-virtualenv' in basenames:
print('The "build-virtualenv" file has changed.')
return True
# The JS and Python linters require a virtual environment.
# If all the files modified are not JS or Python files,
# we do not need to build the virtual environment.
# TODO(ArmandGrillet): There should be no duplicated logic to know
# which linters to instantiate depending on the files to analyze.
if not os.path.isdir(os.path.join('support', '.virtualenv')):
js_and_python_files = [JsLinter().source_files, PyLinter().source_files]
js_and_python_files_regex = re.compile('|'.join(js_and_python_files))
for basename in basenames:
if js_and_python_files_regex.search(basename) is not None:
print('Virtualenv not detected and required... building')
return True
return False
def build_virtualenv():
"""
Rebuild the virtualenv by running a bootstrap script.
This will exit the program if there is a failure.
"""
print('Rebuilding virtualenv...')
python3_env = os.environ.copy()
python3_env["PYTHON"] = sys.executable
build_virtualenv_file = [os.path.join('support', 'build-virtualenv')]
if platform.system() == 'Windows':
# TODO(andschwa): Port more of the `build-virtualenv` Bash script.
python_dir = os.path.dirname(sys.executable)
virtualenv = os.path.join(python_dir, 'Scripts', 'virtualenv.exe')
build_virtualenv_file = [virtualenv,
'--no-site-packages',
'support/.virtualenv']
process = subprocess.Popen(
build_virtualenv_file,
env=python3_env,
stdout=subprocess.PIPE)
output = ''
for line in process.stdout:
output += line.decode(sys.stdout.encoding)
process.wait()
if process.returncode != 0:
sys.stderr.write(output)
sys.exit(1)
# TODO(andschwa): Move this into a script like above.
if platform.system() == 'Windows':
def run_command_in_virtualenv(command):
"""
Stolen from `PyLinter`, runs command in virtualenv.
"""
virtualenv = os.path.join('support',
'.virtualenv',
'Scripts',
'activate.bat')
command = '{virtualenv_path} & {cmd}'.format(
virtualenv_path=virtualenv, cmd=command)
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
pip_install_pip = 'python.exe -m pip install --upgrade pip'
process = run_command_in_virtualenv(pip_install_pip)
for line in process.stdout:
output += line.decode(sys.stdout.encoding)
process.wait()
if process.returncode != 0:
sys.stderr.write(output)
sys.exit(1)
pip_reqs = 'python.exe -m pip install -r support/pip-requirements.txt'
process = run_command_in_virtualenv(pip_reqs)
for line in process.stdout:
output += line.decode(sys.stdout.encoding)
process.wait()
if process.returncode != 0:
sys.stderr.write(output)
sys.exit(1)
if __name__ == '__main__':
if should_build_virtualenv(sys.argv[1:]):
build_virtualenv()
# TODO(ArmandGrillet): We should only instantiate the linters
# required to lint the files to analyze. See MESOS-8351.
CPP_LINTER = CppLinter()
CPP_ERRORS = CPP_LINTER.main(sys.argv[1:])
JS_LINTER = JsLinter()
JS_ERRORS = JS_LINTER.main(sys.argv[1:])
PY_LINTER = PyLinter()
PY_ERRORS = PY_LINTER.main(sys.argv[1:])
sys.exit(CPP_ERRORS + JS_ERRORS + PY_ERRORS)
| |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Column Family."""
from google.cloud import _helpers
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_v2_pb2,
)
class GarbageCollectionRule(object):
"""Garbage collection rule for column families within a table.
Cells in the column family (within a table) fitting the rule will be
deleted during garbage collection.
.. note::
This class is a do-nothing base class for all GC rules.
.. note::
A string ``gc_expression`` can also be used with API requests, but
that value would be superceded by a ``gc_rule``. As a result, we
don't support that feature and instead support via native classes.
"""
class MaxVersionsGCRule(GarbageCollectionRule):
"""Garbage collection limiting the number of versions of a cell.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_family_gc_max_versions]
:end-before: [END bigtable_create_family_gc_max_versions]
:type max_num_versions: int
:param max_num_versions: The maximum number of versions
"""
def __init__(self, max_num_versions):
self.max_num_versions = max_num_versions
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other.max_num_versions == self.max_num_versions
def __ne__(self, other):
return not self == other
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions)
class MaxAgeGCRule(GarbageCollectionRule):
"""Garbage collection limiting the age of a cell.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_family_gc_max_age]
:end-before: [END bigtable_create_family_gc_max_age]
:type max_age: :class:`datetime.timedelta`
:param max_age: The maximum age allowed for a cell in the table.
"""
def __init__(self, max_age):
self.max_age = max_age
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other.max_age == self.max_age
def __ne__(self, other):
return not self == other
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _helpers._timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age)
class GCRuleUnion(GarbageCollectionRule):
"""Union of garbage collection rules.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_family_gc_union]
:end-before: [END bigtable_create_family_gc_union]
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other.rules == self.rules
def __ne__(self, other):
return not self == other
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union)
class GCRuleIntersection(GarbageCollectionRule):
"""Intersection of garbage collection rules.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_family_gc_intersection]
:end-before: [END bigtable_create_family_gc_intersection]
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other.rules == self.rules
def __ne__(self, other):
return not self == other
def to_pb(self):
"""Converts the intersection into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
intersection = table_v2_pb2.GcRule.Intersection(
rules=[rule.to_pb() for rule in self.rules]
)
return table_v2_pb2.GcRule(intersection=intersection)
class ColumnFamily(object):
"""Representation of a Google Cloud Bigtable Column Family.
We can use a :class:`ColumnFamily` to:
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: The table that owns the column family.
:type gc_rule: :class:`GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
"""
def __init__(self, column_family_id, table, gc_rule=None):
self.column_family_id = column_family_id
self._table = table
self.gc_rule = gc_rule
@property
def name(self):
"""Column family name used in requests.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_column_family_name]
:end-before: [END bigtable_column_family_name]
.. note::
This property will not change if ``column_family_id`` does not, but
the return value is not cached.
The Column family name is of the form
``"projects/../zones/../clusters/../tables/../columnFamilies/.."``
:rtype: str
:returns: The column family name.
"""
return self._table.name + "/columnFamilies/" + self.column_family_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
other.column_family_id == self.column_family_id
and other._table == self._table
and other.gc_rule == self.gc_rule
)
def __ne__(self, other):
return not self == other
def to_pb(self):
"""Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object.
"""
if self.gc_rule is None:
return table_v2_pb2.ColumnFamily()
else:
return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
def create(self):
"""Create this column family.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_create_column_family]
:end-before: [END bigtable_create_column_family]
"""
column_family = self.to_pb()
modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
id=self.column_family_id, create=column_family
)
client = self._table._instance._client
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client.table_admin_client.modify_column_families(
self._table.name, [modification]
)
def update(self):
"""Update this column family.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_update_column_family]
:end-before: [END bigtable_update_column_family]
.. note::
Only the GC rule can be updated. By changing the column family ID,
you will simply be referring to a different column family.
"""
column_family = self.to_pb()
modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
id=self.column_family_id, update=column_family
)
client = self._table._instance._client
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client.table_admin_client.modify_column_families(
self._table.name, [modification]
)
def delete(self):
"""Delete this column family.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_delete_column_family]
:end-before: [END bigtable_delete_column_family]
"""
modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
id=self.column_family_id, drop=True
)
client = self._table._instance._client
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client.table_admin_client.modify_column_families(
self._table.name, [modification]
)
def _gc_rule_from_pb(gc_rule_pb):
"""Convert a protobuf GC rule to a native object.
:type gc_rule_pb: :class:`.table_v2_pb2.GcRule`
:param gc_rule_pb: The GC rule to convert.
:rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>`
:returns: An instance of one of the native rules defined
in :module:`column_family` or :data:`None` if no values were
set on the protobuf passed in.
:raises: :class:`ValueError <exceptions.ValueError>` if the rule name
is unexpected.
"""
rule_name = gc_rule_pb.WhichOneof("rule")
if rule_name is None:
return None
if rule_name == "max_num_versions":
return MaxVersionsGCRule(gc_rule_pb.max_num_versions)
elif rule_name == "max_age":
max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age)
return MaxAgeGCRule(max_age)
elif rule_name == "union":
return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules])
elif rule_name == "intersection":
rules = [_gc_rule_from_pb(rule) for rule in gc_rule_pb.intersection.rules]
return GCRuleIntersection(rules)
else:
raise ValueError("Unexpected rule name", rule_name)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.objects import cell_mapping
from nova.objects import instance
from nova.objects import instance_mapping
from nova import test
from nova.tests import fixtures
sample_mapping = {'instance_uuid': '',
'cell_id': 3,
'project_id': 'fake-project'}
sample_cell_mapping = {'id': 3,
'uuid': '',
'name': 'fake-cell',
'transport_url': 'rabbit:///',
'database_connection': 'mysql:///'}
def create_cell_mapping(**kwargs):
args = sample_cell_mapping.copy()
if 'uuid' not in kwargs:
args['uuid'] = uuidutils.generate_uuid()
args.update(kwargs)
ctxt = context.RequestContext('fake-user', 'fake-project')
return cell_mapping.CellMapping._create_in_db(ctxt, args)
def create_mapping(**kwargs):
args = sample_mapping.copy()
if 'instance_uuid' not in kwargs:
args['instance_uuid'] = uuidutils.generate_uuid()
args.update(kwargs)
ctxt = context.RequestContext('fake-user', 'fake-project')
return instance_mapping.InstanceMapping._create_in_db(ctxt, args)
class InstanceMappingTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(InstanceMappingTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.mapping_obj = instance_mapping.InstanceMapping()
def test_get_by_instance_uuid(self):
cell_mapping = create_cell_mapping()
mapping = create_mapping()
db_mapping = self.mapping_obj._get_by_instance_uuid_from_db(
self.context, mapping['instance_uuid'])
for key in [key for key in self.mapping_obj.fields.keys()
if key != 'cell_mapping']:
self.assertEqual(db_mapping[key], mapping[key])
self.assertEqual(db_mapping['cell_mapping']['id'], cell_mapping['id'])
def test_get_by_instance_uuid_not_found(self):
self.assertRaises(exception.InstanceMappingNotFound,
self.mapping_obj._get_by_instance_uuid_from_db, self.context,
uuidutils.generate_uuid())
def test_save_in_db(self):
mapping = create_mapping()
cell_mapping = create_cell_mapping()
self.mapping_obj._save_in_db(self.context, mapping['instance_uuid'],
{'cell_id': cell_mapping['id']})
db_mapping = self.mapping_obj._get_by_instance_uuid_from_db(
self.context, mapping['instance_uuid'])
for key in [key for key in self.mapping_obj.fields.keys()
if key not in ['cell_id', 'cell_mapping', 'updated_at']]:
self.assertEqual(db_mapping[key], mapping[key])
self.assertEqual(db_mapping['cell_id'], cell_mapping['id'])
def test_destroy_in_db(self):
mapping = create_mapping()
self.mapping_obj._get_by_instance_uuid_from_db(self.context,
mapping['instance_uuid'])
self.mapping_obj._destroy_in_db(self.context, mapping['instance_uuid'])
self.assertRaises(exception.InstanceMappingNotFound,
self.mapping_obj._get_by_instance_uuid_from_db, self.context,
mapping['instance_uuid'])
def test_cell_id_nullable(self):
# Just ensure this doesn't raise
create_mapping(cell_id=None)
def test_modify_cell_mapping(self):
inst_mapping = instance_mapping.InstanceMapping(context=self.context)
inst_mapping.instance_uuid = uuidutils.generate_uuid()
inst_mapping.project_id = self.context.project_id
inst_mapping.cell_mapping = None
inst_mapping.create()
c_mapping = cell_mapping.CellMapping(
self.context,
uuid=uuidutils.generate_uuid(),
name="cell0",
transport_url="none:///",
database_connection="fake:///")
c_mapping.create()
inst_mapping.cell_mapping = c_mapping
inst_mapping.save()
result_mapping = instance_mapping.InstanceMapping.get_by_instance_uuid(
self.context, inst_mapping.instance_uuid)
self.assertEqual(result_mapping.cell_mapping.id,
c_mapping.id)
def test_populate_queued_for_delete(self):
cells = []
celldbs = fixtures.CellDatabases()
# Create two cell databases and map them
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = cell_mapping.CellMapping(context=self.context, uuid=uuid,
database_connection=uuid,
transport_url='fake://')
cm.create()
cells.append(cm)
celldbs.add_cell_database(uuid)
self.useFixture(celldbs)
# Create 5 instances per cell, two deleted, one with matching
# queued_for_delete in the instance mapping
for cell in cells:
for i in range(0, 5):
# Instance 4 should be SOFT_DELETED
vm_state = (vm_states.SOFT_DELETED if i == 4
else vm_states.ACTIVE)
# Instance 2 should already be marked as queued_for_delete
qfd = True if i == 2 else None
with context.target_cell(self.context, cell) as cctxt:
inst = instance.Instance(
cctxt,
vm_state=vm_state,
project_id=self.context.project_id,
user_id=self.context.user_id)
inst.create()
if i in (2, 3):
# Instances 2 and 3 are hard-deleted
inst.destroy()
instance_mapping.InstanceMapping._create_in_db(
self.context,
{'project_id': self.context.project_id,
'cell_id': cell.id,
'queued_for_delete': qfd,
'instance_uuid': inst.uuid})
done, total = instance_mapping.populate_queued_for_delete(self.context,
2)
# First two needed fixing, and honored the limit
self.assertEqual(2, done)
self.assertEqual(2, total)
done, total = instance_mapping.populate_queued_for_delete(self.context,
1000)
# Last six included two that were already done, and spanned to the
# next cell
self.assertEqual(6, done)
self.assertEqual(6, total)
mappings = instance_mapping.InstanceMappingList.get_by_project_id(
self.context, self.context.project_id)
# Check that we have only the expected number of records with
# True/False (which implies no NULL records).
# Six deleted instances
self.assertEqual(6, len(
[im for im in mappings if im.queued_for_delete is True]))
# Four non-deleted instances
self.assertEqual(4, len(
[im for im in mappings if im.queued_for_delete is False]))
class InstanceMappingListTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(InstanceMappingListTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.list_obj = instance_mapping.InstanceMappingList()
def test_get_by_project_id_from_db(self):
project_id = 'fake-project'
mappings = {}
mapping = create_mapping(project_id=project_id)
mappings[mapping['instance_uuid']] = mapping
mapping = create_mapping(project_id=project_id)
mappings[mapping['instance_uuid']] = mapping
db_mappings = self.list_obj._get_by_project_id_from_db(
self.context, project_id)
for db_mapping in db_mappings:
mapping = mappings[db_mapping.instance_uuid]
for key in instance_mapping.InstanceMapping.fields.keys():
self.assertEqual(db_mapping[key], mapping[key])
def test_instance_mapping_list_get_by_cell_id(self):
"""Tests getting all of the InstanceMappings for a given CellMapping id
"""
# we shouldn't have any instance mappings yet
inst_mapping_list = (
instance_mapping.InstanceMappingList.get_by_cell_id(
self.context, sample_cell_mapping['id'])
)
self.assertEqual(0, len(inst_mapping_list))
# now create an instance mapping in a cell
db_inst_mapping1 = create_mapping()
# let's also create an instance mapping that's not in a cell to make
# sure our filtering is working
db_inst_mapping2 = create_mapping(cell_id=None)
self.assertIsNone(db_inst_mapping2['cell_id'])
# now we should list out one instance mapping for the cell
inst_mapping_list = (
instance_mapping.InstanceMappingList.get_by_cell_id(
self.context, db_inst_mapping1['cell_id'])
)
self.assertEqual(1, len(inst_mapping_list))
self.assertEqual(db_inst_mapping1['id'], inst_mapping_list[0].id)
def test_instance_mapping_get_by_instance_uuids(self):
db_inst_mapping1 = create_mapping()
db_inst_mapping2 = create_mapping(cell_id=None)
# Create a third that we won't include
create_mapping()
uuids = [db_inst_mapping1.instance_uuid,
db_inst_mapping2.instance_uuid]
mappings = instance_mapping.InstanceMappingList.get_by_instance_uuids(
self.context, uuids + [uuidsentinel.deleted_instance])
self.assertEqual(sorted(uuids),
sorted([m.instance_uuid for m in mappings]))
def test_get_not_deleted_by_cell_and_project(self):
cells = []
# Create two cells
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = cell_mapping.CellMapping(context=self.context, uuid=uuid,
database_connection="fake:///",
transport_url='fake://')
cm.create()
cells.append(cm)
uuids = {cells[0]: [uuidsentinel.c1i1, uuidsentinel.c1i2],
cells[1]: [uuidsentinel.c2i1, uuidsentinel.c2i2]}
project_ids = ['fake-project-1', 'fake-project-2']
# Create five instance_mappings such that:
for cell, uuid in uuids.items():
# Both the cells contain a mapping belonging to fake-project-1
im1 = instance_mapping.InstanceMapping(context=self.context,
project_id=project_ids[0], cell_mapping=cell,
instance_uuid=uuid[0], queued_for_delete=False)
im1.create()
# Both the cells contain a mapping belonging to fake-project-2
im2 = instance_mapping.InstanceMapping(context=self.context,
project_id=project_ids[1], cell_mapping=cell,
instance_uuid=uuid[1], queued_for_delete=False)
im2.create()
# The second cell has a third mapping that is queued for deletion
# which belongs to fake-project-1.
if cell.uuid == uuidsentinel.cell2:
im3 = instance_mapping.InstanceMapping(context=self.context,
project_id=project_ids[0], cell_mapping=cell,
instance_uuid=uuidsentinel.qfd, queued_for_delete=True)
im3.create()
# Get not queued for deletion mappings from cell1 belonging to
# fake-project-2.
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(
self.context, cells[0].uuid, 'fake-project-2'))
# This will give us one mapping from cell1
self.assertEqual([uuidsentinel.c1i2],
sorted([m.instance_uuid for m in ims]))
self.assertIn('cell_mapping', ims[0])
# Get not queued for deletion mappings from cell2 belonging to
# fake-project-1.
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(
self.context, cells[1].uuid, 'fake-project-1'))
# This will give us one mapping from cell2. Note that even if
# there are two mappings belonging to fake-project-1 inside cell2,
# only the one not queued for deletion is returned.
self.assertEqual([uuidsentinel.c2i1],
sorted([m.instance_uuid for m in ims]))
# Try getting a mapping belonging to a non-existing project_id.
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(
self.context, cells[0].uuid, 'fake-project-3'))
# Since no mappings belong to fake-project-3, nothing is returned.
self.assertEqual([], sorted([m.instance_uuid for m in ims]))
def test_get_not_deleted_by_cell_and_project_limit(self):
cm = cell_mapping.CellMapping(context=self.context,
uuid=uuidsentinel.cell,
database_connection='fake:///',
transport_url='fake://')
cm.create()
pid = self.context.project_id
for uuid in (uuidsentinel.uuid2, uuidsentinel.inst2):
im = instance_mapping.InstanceMapping(context=self.context,
project_id=pid,
cell_mapping=cm,
instance_uuid=uuid,
queued_for_delete=False)
im.create()
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(self.context,
cm.uuid,
pid))
self.assertEqual(2, len(ims))
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(self.context,
cm.uuid,
pid,
limit=10))
self.assertEqual(2, len(ims))
ims = (instance_mapping.InstanceMappingList.
get_not_deleted_by_cell_and_project(self.context,
cm.uuid,
pid,
limit=1))
self.assertEqual(1, len(ims))
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
net_profile_id = forms.ChoiceField(label=_("Network Profile"))
admin_state = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
if api.neutron.is_port_profiles_supported():
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
profiles = []
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
# TODO(absubram): Add ability to view network profile information
# in the network detail if a profile is used.
class Meta:
name = _("Network")
help_text = _("From here you can create a new network.\n"
"In addition a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
if api.neutron.is_port_profiles_supported():
contributes = ("net_name", "admin_state", "net_profile_id")
else:
contributes = ("net_name", "admin_state")
class CreateSubnetInfoAction(workflows.Action):
with_subnet = forms.BooleanField(label=_("Create Subnet"),
initial=True, required=False)
subnet_name = forms.CharField(max_length=255,
label=_("Subnet Name"),
required=False)
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address (e.g. 192.168.0.1 for "
"192.168.0.0/24). "
"If you use the default, leave blank. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the new '
'network, in which case "Network Address" must be '
'specified. If you wish to create a network WITHOUT a '
'subnet, uncheck the "Create Subnet" checkbox.')
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
if not cidr:
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox.')
raise forms.ValidationError(msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is too small (/%s)."
% subnet.prefixlen)
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("with_subnet", "subnet_name", "cidr",
"ip_version", "gateway_ip", "no_gateway")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
allocation_pools = forms.CharField(
widget=forms.Textarea(),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is: "
"start_ip_address,end_ip_address "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is: destination_cidr,nexthop "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(ip)s)' % dict(
field_name=field_name, ip=ip))
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(network)s)' % dict(
field_name=field_name, network=network))
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
wizard = True
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': data['admin_state']}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
network.set_id_as_name_if_empty()
self.context['net_id'] = network.id
msg = _('Network "%s" was successfully created.') % network.name
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if is_create and data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name'],
'cidr': data['cidr'],
'ip_version': int(data['ip_version'])}
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
"""Delete the created network when subnet creation failed."""
try:
api.neutron.network_delete(request, network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
LOG.debug(msg)
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
#return exceptions.RecoverableError
except Exception:
msg = _('Failed to delete network "%s"') % network.name
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
# If we do not need to create a subnet, return here.
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""a converter from AI0 feature to AJ1 feature"""
# The implementation is very incomplete and very very ugly.
import sys, re
from collections import namedtuple
from enum import Enum
class GsubFragmentType(Enum):
UNKNOWN = 0
CID = 1
FROMBY = 2
OTHER = 0xbeef
GsubFragment = namedtuple('GsubFragment', ["val", "typ"])
def all_comments(lines):
for line in lines:
line = line.strip()
if line != "" and line[0:1] != "#":
return False
return True
def preprocess_class_def(line):
u"""replace spaces in class definitions with '#'"""
newline = ""
iterator = re.finditer(r"\[.*\]?", line)
if not iterator:
return line
e = 0
for m in iterator:
new_s = m.start()
prev_substr = line[e:new_s]
e = m.end()
newline += prev_substr + re.sub(r"\s+", "#", m.group())
newline += line[e:]
return newline
def preprocess_gsub_line(line, mapf):
u"""parse GSUB line"""
# clean up line
line = re.sub(r"^\s*sub\S*\s+", "", line)
line = re.sub(r"\s*;.*$", "", line)
parsed_line = []
for fragm in re.split(r"\s+", line):
if fragm == "from" or fragm == "by":
parsed_line.append( GsubFragment(val=fragm, typ=GsubFragmentType.FROMBY) )
elif fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid not in mapf:
# this line can't be used because it contains invalid CID for a new font
return None
parsed_line.append( GsubFragment(val=mapf[cid], typ=GsubFragmentType.CID) )
else:
parsed_line.append( GsubFragment(val=fragm, typ=GsubFragmentType.OTHER) )
return parsed_line
class LookupProc(object):
def __init__(self, tag, mapf):
self.tag = tag
self.mapf = mapf
self.lines = []
def valid(self):
return True if self.lines else False
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("lookup %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.lines.append(newline)
else:
self.lines.append(line)
##############################
class ClassProc(object):
def __init__(self, tag, mapf, inside_feature=False):
self.tag = tag
self.mapf = mapf
self.inside_feature = inside_feature
self.cids = []
self.cls_def = ""
def valid(self):
return True if self.cids else False
def start(self):
pass
def end(self):
if self.cids:
self.cls_def = " {} = [{}];".format(self.tag, " ".join(["\\%d" % (cid) for cid in self.cids]))
if not self.inside_feature:
print(self.cls_def)
def line(self, line):
for fragm in re.split(r"\s+", line):
if fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid in self.mapf:
self.cids.append(self.mapf[cid])
##############################
class TableProc(object):
def __init__(self, tag, mapf):
self.tag = tag
self.mapf = mapf
def start(self):
print("table %s {" % (self.tag))
def end(self):
print("} %s;" % (self.tag))
def line(self, line):
print(line)
class HheaProc(TableProc):
def __init__(self, mapf):
super().__init__("hhea", mapf)
def line(self, line):
if "Ascender" in line:
print(re.sub(r"Ascender\s+([-\d]+)", "Ascender 880", line))
elif "Descender" in line:
print(re.sub(r"Descender\s+([-\d]+)", "Descender -120", line))
else:
print(line)
class VmtxProc(TableProc):
def __init__(self, mapf):
super().__init__("vmtx", mapf)
def line(self, line):
m = re.search(r"Vert\S+\s+\\(\d+)", line)
if m:
cid = int(m.group(1))
if cid in self.mapf:
print(re.sub(r"\\\d+", r"\\%d" % (self.mapf[cid]), line))
else:
print(line)
class OS2Proc(TableProc):
def __init__(self, mapf):
super().__init__("OS/2", mapf)
def line(self, line):
if "winAscent" in line:
print(re.sub(r"winAscent\s+([-\d]+)", "winAscent 880", line))
elif "winDescent" in line:
print(re.sub(r"winDescent\s+([-\d]+)", "winDescent 120", line))
else:
print(line)
##############################
class FeatureProc(object):
def __init__(self, tag, mapf, lookups=None):
self.tag = tag
self.mapf = mapf
self.lookups = lookups
def start(self):
print("feature %s {" % (self.tag))
def end(self):
print("} %s;" % (self.tag))
def line(self, line):
print(line)
class GeneralGsubProc(FeatureProc):
def __init__(self, tag, mapf, lookups):
super().__init__(tag, mapf, lookups)
self.lines = []
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
m = re.search(r"^\s*lookup\s+(\S+)\s*;", line)
if m:
lookup = m.group(1)
if lookup in self.lookups:
self.lines.append(line)
return
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.lines.append(newline)
return
self.lines.append(line)
# XXX: very ugly and complicated ...
class LoclProc(FeatureProc):
def __init__(self, tag, mapf, lookups):
super().__init__(tag, mapf, lookups)
self.tmp_script = None
self.tmp_lang = None
self.tmp_gsublines = []
self.lines = []
def start(self):
pass
def end(self):
if not all_comments(self.tmp_gsublines):
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
if re.search(r"^\s*script", line):
if all_comments(self.tmp_gsublines):
# first comments
if not self.tmp_script and not self.tmp_lang:
self.lines.extend(self.tmp_gsublines)
else:
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
self.tmp_script = line
self.tmp_lang = None
self.tmp_gsublines = []
return
if re.search(r"^\s*language", line):
if not all_comments(self.tmp_gsublines):
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
self.tmp_script = None
self.tmp_lang = line
self.tmp_gsublines = []
return
m = re.search(r"^\s*lookup\s+(\S+)\s*;", line)
if m:
lookup = m.group(1)
if lookup in self.lookups:
self.tmp_gsublines.append(line)
return
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.tmp_gsublines.append(newline)
return
self.tmp_gsublines.append(line)
class PaltVpalHaltVhalProc(FeatureProc):
def __init__(self, tag, mapf):
super().__init__(tag, mapf)
def line(self, line):
m = re.search(r"pos\S*\s+\\(\d+)", line)
if m:
cid = int(m.group(1))
if cid in self.mapf:
print(re.sub(r"\\\d+", r"\\%d" % (self.mapf[cid]), line))
else:
print(line)
class KernVkrnProc(FeatureProc):
def __init__(self, tag, mapf, classes):
super().__init__(tag, mapf)
self.classes = classes
self.lines = []
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
m = re.search(r"^(.*pos\S*)\s+(.*)\s*;", line)
if m:
declaration = m.group(1)
pairs_value = m.group(2).strip()
latter_half_fragments = []
for fragm in re.split(r"\s+", pairs_value):
if fragm[0:1] == "@":
if fragm not in self.classes:
return
latter_half_fragments.append(fragm)
elif fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid not in self.mapf:
return
latter_half_fragments.append("\\%d" % (self.mapf[cid]))
else:
latter_half_fragments.append(fragm)
self.lines.append("{} {};".format(declaration, " ".join(latter_half_fragments)))
else:
self.lines.append(line)
##############################
class Proc(object):
def __init__(self, mapf):
self.mapf = mapf
self.lookups = set()
self.classes = set()
self.cur_look = None
self.cur_cls = None
self.cur_tbl = None
self.cur_fea = None
def line(self, line):
print(line)
###
def lookup_start(self, tag):
self.cur_look = Proc.lookup_factory(tag, self.mapf)
self.cur_look.start()
def lookup_end(self):
self.cur_look.end()
if self.cur_look.valid() and self.cur_look.tag not in self.lookups:
self.lookups.add(self.cur_look.tag)
self.cur_look = None
def lookup_line(self, line):
self.cur_look.line(line)
###
def class_start(self, tag):
self.cur_cls = Proc.class_factory(tag, self.mapf, True if self.cur_fea else False)
self.cur_cls.start()
def class_end(self):
self.cur_cls.end()
if self.cur_cls.valid():
if self.cur_cls.tag not in self.classes:
self.classes.add(self.cur_cls.tag)
# XXX: ugly...
if self.cur_fea:
self.cur_fea.line(self.cur_cls.cls_def)
self.cur_cls = None
def class_line(self, line):
self.cur_cls.line(line)
###
def table_start(self, tag):
self.cur_tbl = Proc.table_factory(tag, self.mapf)
self.cur_tbl.start()
def table_end(self):
self.cur_tbl.end()
self.cur_tbl = None
def table_line(self, line):
self.cur_tbl.line(line)
###
def feature_start(self, tag):
self.cur_fea = Proc.fearure_factory(tag, self.mapf, self.lookups, self.classes)
self.cur_fea.start()
def feature_end(self):
self.cur_fea.end()
self.cur_fea = None
def feature_line(self, line):
self.cur_fea.line(line)
#####
@staticmethod
def lookup_factory(tag, mapf):
return LookupProc(tag, mapf)
@staticmethod
def class_factory(tag, mapf, inside_feature):
return ClassProc(tag, mapf, inside_feature)
@staticmethod
def table_factory(tag, mapf):
if tag == "hhea":
return HheaProc(mapf)
elif tag == "vmtx":
return VmtxProc(mapf)
elif tag == "OS/2":
return OS2Proc(mapf)
else:
return TableProc(tag, mapf)
@staticmethod
def fearure_factory(tag, mapf, lookups, classes):
if tag in ["palt", "vpal", "halt", "vhal"]:
return PaltVpalHaltVhalProc(tag, mapf)
elif tag in ["kern", "vkrn"]:
return KernVkrnProc(tag, mapf, classes)
elif tag in ["ccmp", "hist", "liga", "dlig", "fwid",
"hwid", "pwid", "jp78", "jp83", "jp90",
"nlck", "vert", "vrt2"]:
return GeneralGsubProc(tag, mapf, lookups)
elif tag == "locl":
return LoclProc(tag, mapf, lookups)
else:
return FeatureProc(tag, mapf, lookups)
##################################################
class FeatureConverter(object):
def __init__(self):
self.fea = sys.argv[1]
self.mapf = FeatureConverter.readMapFile(sys.argv[2])
self.cur_tbl = None
self.cur_fea = None
self.cur_look = None
self.cur_cls = None
def run(self):
self._walk_through_fea()
@staticmethod
def readMapFile(map_f):
map_ = {}
with open(map_f) as f:
for line in f.readlines():
m = re.search(r"(\d+)\s+(\d+)", line)
if m:
cid_to = int(m.group(1))
cid_from = int(m.group(2))
if cid_from not in map_:
map_[cid_from] = cid_to
return map_
def _walk_through_fea(self):
proc = Proc(self.mapf)
with open(self.fea) as f:
for line in [l.rstrip() for l in f.readlines()]:
self._line_proc(line, proc)
def _line_proc(self, line, proc):
# evaluate lookup case first because it is defined inside feature definition.
if self._lookup_proc(line, proc):
pass
elif self._class_proc(line, proc):
pass
elif self._table_proc(line, proc):
pass
elif self._feature_proc(line, proc):
pass
else:
proc.line(line)
def _lookup_proc(self, line, proc):
m = re.search(r"^\s*lookup\s+(\S+)\s*{", line)
if m:
self.cur_look = m.group(1)
proc.lookup_start(self.cur_look)
return True
if self.cur_look:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_look), line):
proc.lookup_end()
self.cur_look = None
return True
proc.lookup_line(line)
return True
return False
def _class_proc(self, line, proc):
m = re.search(r"^\s*(@[a-zA-Z0-9_]+)\s*=\s*\[(.*)", line)
if m:
self.cur_cls = m.group(1)
latter_half = m.group(2)
latter_half = re.sub(r"#.*", "", latter_half).replace(";", "").strip()
proc.class_start(self.cur_cls)
if latter_half != "":
a = latter_half.split("]")
cls_line = a[0]
if cls_line != "":
proc.class_line(cls_line)
if len(a) > 1:
proc.class_end()
self.cur_cls = None
return True
if self.cur_look:
line = re.sub(r"#.*", "", line).replace(";", "").strip()
a = line.split("]")
cls_line = a[0]
if cls_line != "":
proc.class_line(cls_line)
if len(a) > 1:
proc.class_end()
self.cur_cls = None
return True
return False
def _table_proc(self, line, proc):
m = re.search(r"^\s*table\s+(\S+)\s*{", line)
if m:
self.cur_tbl = m.group(1)
proc.table_start(self.cur_tbl)
return True
if self.cur_tbl:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_tbl), line):
proc.table_end()
self.cur_tbl = None
return True
proc.table_line(line)
return True
return False
def _feature_proc(self, line, proc):
m = re.search(r"^\s*feature\s+(\S+)\s*{", line)
if m:
self.cur_fea = m.group(1)
proc.feature_start(self.cur_fea)
return True
if self.cur_fea:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_fea), line):
proc.feature_end()
self.cur_fea = None
return True
proc.feature_line(line)
return True
return False
################################################################################
ver = sys.version_info
if ver.major < 3:
print("I may not work... :(")
conv = FeatureConverter()
conv.run()
| |
"""The tests the for GPSLogger device tracker platform."""
from unittest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import gpslogger, zone
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
HOME_LATITUDE = 37.239622
HOME_LONGITUDE = -115.815811
# pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@pytest.fixture
async def gpslogger_client(loop, hass, aiohttp_client):
"""Mock client for GPSLogger (unauthenticated)."""
assert await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await aiohttp_client(hass.http.app)
@pytest.fixture(autouse=True)
async def setup_zones(loop, hass):
"""Set up Zone config in HA."""
assert await async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "Home",
"latitude": HOME_LATITUDE,
"longitude": HOME_LONGITUDE,
"radius": 100,
}
},
)
await hass.async_block_till_done()
@pytest.fixture
async def webhook_id(hass, gpslogger_client):
"""Initialize the GPSLogger component and get the webhook_id."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(hass, gpslogger_client, webhook_id):
"""Test missing data."""
url = f"/api/webhook/{webhook_id}"
data = {"latitude": 1.0, "longitude": 1.1, "device": "123"}
# No data
req = await gpslogger_client.post(url)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["latitude"]
req = await gpslogger_client.post(url, data=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["device"]
req = await gpslogger_client.post(url, data=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, gpslogger_client, webhook_id):
"""Test when there is a known zone."""
url = f"/api/webhook/{webhook_id}"
data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"}
# Enter the Home
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}").state
assert state_name == STATE_HOME
# Enter Home again
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}").state
assert state_name == STATE_HOME
data["longitude"] = 0
data["latitude"] = 0
# Enter Somewhere else
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}").state
assert state_name == STATE_NOT_HOME
dev_reg = dr.async_get(hass)
assert len(dev_reg.devices) == 1
ent_reg = er.async_get(hass)
assert len(ent_reg.entities) == 1
async def test_enter_with_attrs(hass, gpslogger_client, webhook_id):
"""Test when additional attributes are present."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 1.0,
"longitude": 1.1,
"device": "123",
"accuracy": 10.5,
"battery": 10,
"speed": 100,
"direction": 105.32,
"altitude": 102,
"provider": "gps",
"activity": "running",
}
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}")
assert state.state == STATE_NOT_HOME
assert state.attributes["gps_accuracy"] == 10.5
assert state.attributes["battery_level"] == 10.0
assert state.attributes["speed"] == 100.0
assert state.attributes["direction"] == 105.32
assert state.attributes["altitude"] == 102.0
assert state.attributes["provider"] == "gps"
assert state.attributes["activity"] == "running"
data = {
"latitude": HOME_LATITUDE,
"longitude": HOME_LONGITUDE,
"device": "123",
"accuracy": 123,
"battery": 23,
"speed": 23,
"direction": 123,
"altitude": 123,
"provider": "gps",
"activity": "idle",
}
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}")
assert state.state == STATE_HOME
assert state.attributes["gps_accuracy"] == 123
assert state.attributes["battery_level"] == 23
assert state.attributes["speed"] == 23
assert state.attributes["direction"] == 123
assert state.attributes["altitude"] == 123
assert state.attributes["provider"] == "gps"
assert state.attributes["activity"] == "idle"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, gpslogger_client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = f"/api/webhook/{webhook_id}"
data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"}
# Enter the Home
req = await gpslogger_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(f"{DEVICE_TRACKER_DOMAIN}.{data['device']}").state
assert state_name == STATE_HOME
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert await gpslogger.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
| |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.pip_requirement import PipRequirement
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
MainSpecification,
PexLayout,
PythonRequirementsField,
PythonResolveField,
parse_requirements_file,
)
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.local_dists import rules as local_dists_rules
from pants.backend.python.util_rules.pex import (
CompletePlatforms,
OptionalPex,
OptionalPexRequest,
PexPlatforms,
PexRequest,
)
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.pex_requirements import Lockfile, PexRequirements
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
StrippedPythonSourceFiles,
)
from pants.backend.python.util_rules.python_sources import rules as python_sources_rules
from pants.engine.addresses import Address, Addresses
from pants.engine.collection import DeduplicatedCollection
from pants.engine.fs import Digest, DigestContents, GlobMatchErrorBehavior, MergeDigests, PathGlobs
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target, TransitiveTargets, TransitiveTargetsRequest
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.strutil import bullet_list, path_safe
logger = logging.getLogger(__name__)
@frozen_after_init
@dataclass(unsafe_hash=True)
class PexFromTargetsRequest:
addresses: Addresses
output_filename: str
internal_only: bool
layout: PexLayout | None
main: MainSpecification | None
platforms: PexPlatforms
complete_platforms: CompletePlatforms
additional_args: tuple[str, ...]
additional_lockfile_args: tuple[str, ...]
include_source_files: bool
include_requirements: bool
include_local_dists: bool
additional_sources: Digest | None
additional_inputs: Digest | None
hardcoded_interpreter_constraints: InterpreterConstraints | None
# This field doesn't participate in comparison (and therefore hashing), as it doesn't affect
# the result.
description: str | None = dataclasses.field(compare=False)
def __init__(
self,
addresses: Iterable[Address],
*,
output_filename: str,
internal_only: bool,
layout: PexLayout | None = None,
main: MainSpecification | None = None,
platforms: PexPlatforms = PexPlatforms(),
complete_platforms: CompletePlatforms = CompletePlatforms(),
additional_args: Iterable[str] = (),
additional_lockfile_args: Iterable[str] = (),
include_source_files: bool = True,
include_requirements: bool = True,
include_local_dists: bool = False,
additional_sources: Digest | None = None,
additional_inputs: Digest | None = None,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
description: str | None = None,
) -> None:
"""Request to create a Pex from the transitive closure of the given addresses.
:param addresses: The addresses to use for determining what is included in the Pex. The
transitive closure of these addresses will be used; you only need to specify the roots.
:param output_filename: The name of the built Pex file, which typically should end in
`.pex`.
:param internal_only: Whether we ever materialize the Pex and distribute it directly
to end users, such as with the `binary` goal. Typically, instead, the user never
directly uses the Pex, e.g. with `lint` and `test`. If True, we will use a Pex setting
that results in faster build time but compatibility with fewer interpreters at runtime.
:param layout: The filesystem layout to create the PEX with.
:param main: The main for the built Pex, equivalent to Pex's `-e` or `-c` flag. If
left off, the Pex will open up as a REPL.
:param platforms: Which platforms should be supported. Setting this value will cause
interpreter constraints to not be used because platforms already constrain the valid
Python versions, e.g. by including `cp36m` in the platform string.
:param additional_args: Any additional Pex flags.
:param additional_lockfile_args: Any additional Pex flags that should be used with the
lockfile.pex. Many Pex args like `--emit-warnings` do not impact the lockfile, and
setting them would reduce reuse with other call sites. Generally, these should only be
flags that impact lockfile resolution like `--manylinux`.
:param include_source_files: Whether to include source files in the built Pex or not.
Setting this to `False` and loading the source files by instead populating the chroot
and setting the environment variable `PEX_EXTRA_SYS_PATH` will result in substantially
fewer rebuilds of the Pex.
:param include_requirements: Whether to resolve requirements and include them in the Pex.
:param include_local_dists: Whether to build local dists and include them in the built pex.
:param additional_sources: Any additional source files to include in the built Pex.
:param additional_inputs: Any inputs that are not source files and should not be included
directly in the Pex, but should be present in the environment when building the Pex.
:param hardcoded_interpreter_constraints: Use these constraints rather than resolving the
constraints from the input.
:param description: A human-readable description to render in the dynamic UI when building
the Pex.
"""
self.addresses = Addresses(addresses)
self.output_filename = output_filename
self.internal_only = internal_only
self.layout = layout
self.main = main
self.platforms = platforms
self.complete_platforms = complete_platforms
self.additional_args = tuple(additional_args)
self.additional_lockfile_args = tuple(additional_lockfile_args)
self.include_source_files = include_source_files
self.include_requirements = include_requirements
self.include_local_dists = include_local_dists
self.additional_sources = additional_sources
self.additional_inputs = additional_inputs
self.hardcoded_interpreter_constraints = hardcoded_interpreter_constraints
self.description = description
def to_interpreter_constraints_request(self) -> InterpreterConstraintsRequest:
return InterpreterConstraintsRequest(
addresses=self.addresses,
hardcoded_interpreter_constraints=self.hardcoded_interpreter_constraints,
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class InterpreterConstraintsRequest:
addresses: Addresses
hardcoded_interpreter_constraints: InterpreterConstraints | None
def __init__(
self,
addresses: Iterable[Address],
*,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
self.addresses = Addresses(addresses)
self.hardcoded_interpreter_constraints = hardcoded_interpreter_constraints
@rule
async def interpreter_constraints_for_targets(
request: InterpreterConstraintsRequest, python_setup: PythonSetup
) -> InterpreterConstraints:
if request.hardcoded_interpreter_constraints:
return request.hardcoded_interpreter_constraints
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
calculated_constraints = InterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
)
# If there are no targets, we fall back to the global constraints. This is relevant,
# for example, when running `./pants repl` with no specs or only on targets without
# `interpreter_constraints` (e.g. `python_requirement`).
interpreter_constraints = calculated_constraints or InterpreterConstraints(
python_setup.interpreter_constraints
)
return interpreter_constraints
@dataclass(frozen=True)
class ChosenPythonResolve:
name: str
lockfile_path: str
@dataclass(frozen=True)
class ChosenPythonResolveRequest:
addresses: Addresses
# Note: Inspired by `coursier_fetch.py`.
class NoCompatibleResolveException(Exception):
"""No compatible resolve could be found for a set of targets."""
def __init__(
self,
python_setup: PythonSetup,
msg_prefix: str,
relevant_targets: Iterable[Target],
msg_suffix: str = "",
) -> None:
resolves_to_addresses = defaultdict(list)
for tgt in relevant_targets:
if tgt.has_field(PythonResolveField):
resolve = tgt[PythonResolveField].normalized_value(python_setup)
resolves_to_addresses[resolve].append(tgt.address.spec)
formatted_resolve_lists = "\n\n".join(
f"{resolve}:\n{bullet_list(sorted(addresses))}"
for resolve, addresses in sorted(resolves_to_addresses.items())
)
super().__init__(
f"{msg_prefix}:\n\n"
f"{formatted_resolve_lists}\n\n"
"Targets which will be used together must all have the same resolve (from the "
f"[resolve]({doc_url('reference-python_test#coderesolvecode')}) "
"field) in common." + (f"\n\n{msg_suffix}" if msg_suffix else "")
)
@rule
async def choose_python_resolve(
request: ChosenPythonResolveRequest, python_setup: PythonSetup
) -> ChosenPythonResolve:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
# First, choose the resolve by inspecting the root targets.
root_resolves = {
root[PythonResolveField].normalized_value(python_setup)
for root in transitive_targets.roots
if root.has_field(PythonResolveField)
}
if not root_resolves:
# If there are no relevant targets, we fall back to the default resolve. This is relevant,
# for example, when running `./pants repl` with no specs or only on non-Python targets.
return ChosenPythonResolve(
name=python_setup.default_resolve,
lockfile_path=python_setup.resolves[python_setup.default_resolve],
)
if len(root_resolves) > 1:
raise NoCompatibleResolveException(
python_setup,
"The input targets did not have a resolve in common",
transitive_targets.roots,
)
chosen_resolve = next(iter(root_resolves))
# Then, validate that all transitive deps are compatible.
for tgt in transitive_targets.dependencies:
if (
tgt.has_field(PythonResolveField)
and tgt[PythonResolveField].normalized_value(python_setup) != chosen_resolve
):
plural = ("s", "their") if len(transitive_targets.roots) > 1 else ("", "its")
raise NoCompatibleResolveException(
python_setup,
(
f"The resolve chosen for the root target{plural[0]} was {chosen_resolve}, but "
f"some of {plural[1]} dependencies are not compatible with that resolve"
),
transitive_targets.closure,
)
return ChosenPythonResolve(
name=chosen_resolve, lockfile_path=python_setup.resolves[chosen_resolve]
)
class GlobalRequirementConstraints(DeduplicatedCollection[PipRequirement]):
"""Global constraints specified by the `[python].requirement_constraints` setting, if any."""
@rule
async def determine_global_requirement_constraints(
python_setup: PythonSetup,
) -> GlobalRequirementConstraints:
if not python_setup.requirement_constraints:
return GlobalRequirementConstraints()
constraints_file_contents = await Get(
DigestContents,
PathGlobs(
[python_setup.requirement_constraints],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the option `[python].requirement_constraints`",
),
)
return GlobalRequirementConstraints(
parse_requirements_file(
constraints_file_contents[0].content.decode(),
rel_path=constraints_file_contents[0].path,
)
)
@dataclass(frozen=True)
class _PexRequirementsRequest:
"""Determine the requirement strings used transitively.
This type is private because callers should likely use `RequirementsPexRequest` or
`PexFromTargetsRequest` instead.
"""
addresses: Addresses
@rule
async def determine_requirement_strings_in_closure(
request: _PexRequirementsRequest, global_requirement_constraints: GlobalRequirementConstraints
) -> PexRequirements:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
return PexRequirements.create_from_requirement_fields(
(
tgt[PythonRequirementsField]
for tgt in transitive_targets.closure
if tgt.has_field(PythonRequirementsField)
),
constraints_strings=(str(constraint) for constraint in global_requirement_constraints),
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class _RepositoryPexRequest:
addresses: Addresses
requirements: PexRequirements
hardcoded_interpreter_constraints: InterpreterConstraints | None
platforms: PexPlatforms
complete_platforms: CompletePlatforms
internal_only: bool
additional_lockfile_args: tuple[str, ...]
def __init__(
self,
addresses: Iterable[Address],
requirements: PexRequirements,
*,
internal_only: bool,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
platforms: PexPlatforms = PexPlatforms(),
complete_platforms: CompletePlatforms = CompletePlatforms(),
additional_lockfile_args: tuple[str, ...] = (),
) -> None:
self.addresses = Addresses(addresses)
self.requirements = requirements
self.internal_only = internal_only
self.hardcoded_interpreter_constraints = hardcoded_interpreter_constraints
self.platforms = platforms
self.complete_platforms = complete_platforms
self.additional_lockfile_args = additional_lockfile_args
def to_interpreter_constraints_request(self) -> InterpreterConstraintsRequest:
return InterpreterConstraintsRequest(
addresses=self.addresses,
hardcoded_interpreter_constraints=self.hardcoded_interpreter_constraints,
)
@dataclass(frozen=True)
class _ConstraintsRepositoryPexRequest:
repository_pex_request: _RepositoryPexRequest
@rule(level=LogLevel.DEBUG)
async def create_pex_from_targets(request: PexFromTargetsRequest) -> PexRequest:
interpreter_constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
)
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
sources_digests = []
if request.additional_sources:
sources_digests.append(request.additional_sources)
if request.include_source_files:
sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure))
else:
sources = PythonSourceFiles.empty()
additional_inputs_digests = []
if request.additional_inputs:
additional_inputs_digests.append(request.additional_inputs)
additional_args = request.additional_args
if request.include_local_dists:
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=request.internal_only,
interpreter_constraints=interpreter_constraints,
sources=sources,
),
)
remaining_sources = local_dists.remaining_sources
additional_inputs_digests.append(local_dists.pex.digest)
additional_args += ("--requirements-pex", local_dists.pex.name)
else:
remaining_sources = sources
remaining_sources_stripped = await Get(
StrippedPythonSourceFiles, PythonSourceFiles, remaining_sources
)
sources_digests.append(remaining_sources_stripped.stripped_source_files.snapshot.digest)
merged_sources_digest, additional_inputs = await MultiGet(
Get(Digest, MergeDigests(sources_digests)),
Get(Digest, MergeDigests(additional_inputs_digests)),
)
description = request.description
if request.include_requirements:
requirements = await Get(PexRequirements, _PexRequirementsRequest(request.addresses))
else:
requirements = PexRequirements()
if requirements:
repository_pex = await Get(
OptionalPex,
_RepositoryPexRequest(
request.addresses,
requirements=requirements,
hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
internal_only=request.internal_only,
additional_lockfile_args=request.additional_lockfile_args,
),
)
requirements = dataclasses.replace(requirements, repository_pex=repository_pex.maybe_pex)
return PexRequest(
output_filename=request.output_filename,
internal_only=request.internal_only,
layout=request.layout,
requirements=requirements,
interpreter_constraints=interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
main=request.main,
sources=merged_sources_digest,
additional_inputs=additional_inputs,
additional_args=additional_args,
description=description,
)
@rule
async def get_repository_pex(
request: _RepositoryPexRequest, python_setup: PythonSetup
) -> OptionalPexRequest:
interpreter_constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
)
repository_pex_request: PexRequest | None = None
if python_setup.requirement_constraints:
constraints_repository_pex_request = await Get(
OptionalPexRequest,
_ConstraintsRepositoryPexRequest(request),
)
repository_pex_request = constraints_repository_pex_request.maybe_pex_request
elif (
python_setup.resolve_all_constraints
and python_setup.resolve_all_constraints_was_set_explicitly()
):
raise ValueError(
"`[python].resolve_all_constraints` is enabled, so "
"`[python].requirement_constraints` must also be set."
)
elif python_setup.enable_resolves:
chosen_resolve = await Get(
ChosenPythonResolve, ChosenPythonResolveRequest(request.addresses)
)
repository_pex_request = PexRequest(
description=(
f"Installing {chosen_resolve.lockfile_path} for the resolve `{chosen_resolve.name}`"
),
output_filename=f"{path_safe(chosen_resolve.name)}_lockfile.pex",
internal_only=request.internal_only,
requirements=Lockfile(
file_path=chosen_resolve.lockfile_path,
file_path_description_of_origin=(
f"the resolve `{chosen_resolve.name}` (from `[python].resolves`)"
),
resolve_name=chosen_resolve.name,
req_strings=request.requirements.req_strings,
),
interpreter_constraints=interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
additional_args=request.additional_lockfile_args,
)
return OptionalPexRequest(repository_pex_request)
@rule
async def _setup_constraints_repository_pex(
constraints_request: _ConstraintsRepositoryPexRequest,
python_setup: PythonSetup,
global_requirement_constraints: GlobalRequirementConstraints,
) -> OptionalPexRequest:
request = constraints_request.repository_pex_request
# NB: it isn't safe to resolve against the whole constraints file if
# platforms are in use. See https://github.com/pantsbuild/pants/issues/12222.
if not python_setup.resolve_all_constraints or request.platforms or request.complete_platforms:
return OptionalPexRequest(None)
constraints_path = python_setup.requirement_constraints
assert constraints_path is not None
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
requirements = PexRequirements.create_from_requirement_fields(
(
tgt[PythonRequirementsField]
for tgt in transitive_targets.closure
if tgt.has_field(PythonRequirementsField)
),
constraints_strings=(str(constraint) for constraint in global_requirement_constraints),
)
# In requirement strings, Foo_-Bar.BAZ and foo-bar-baz refer to the same project. We let
# packaging canonicalize for us.
# See: https://www.python.org/dev/peps/pep-0503/#normalized-names
url_reqs = set() # E.g., 'foobar@ git+https://github.com/foo/bar.git@branch'
name_reqs = set() # E.g., foobar>=1.2.3
name_req_projects = set()
constraints_file_reqs = set(global_requirement_constraints)
for req_str in requirements.req_strings:
req = PipRequirement.parse(req_str)
if req.url:
url_reqs.add(req)
else:
name_reqs.add(req)
name_req_projects.add(canonicalize_project_name(req.project_name))
constraint_file_projects = {
canonicalize_project_name(req.project_name) for req in constraints_file_reqs
}
# Constraints files must only contain name reqs, not URL reqs (those are already
# constrained by their very nature). See https://github.com/pypa/pip/issues/8210.
unconstrained_projects = name_req_projects - constraint_file_projects
if unconstrained_projects:
logger.warning(
f"The constraints file {constraints_path} does not contain "
f"entries for the following requirements: {', '.join(unconstrained_projects)}.\n\n"
f"Ignoring `[python_setup].resolve_all_constraints` option."
)
return OptionalPexRequest(None)
interpreter_constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest,
request.to_interpreter_constraints_request(),
)
# To get a full set of requirements we must add the URL requirements to the
# constraints file, since the latter cannot contain URL requirements.
# NB: We can only add the URL requirements we know about here, i.e., those that
# are transitive deps of the targets in play. There may be others in the repo.
# So we may end up creating a few different repository pexes, each with identical
# name requirements but different subsets of URL requirements. Fortunately since
# all these repository pexes will have identical pinned versions of everything,
# this is not a correctness issue, only a performance one.
all_constraints = {str(req) for req in (constraints_file_reqs | url_reqs)}
repository_pex = PexRequest(
description=f"Resolving {constraints_path}",
output_filename="repository.pex",
internal_only=request.internal_only,
requirements=PexRequirements(
all_constraints,
constraints_strings=(str(constraint) for constraint in global_requirement_constraints),
# TODO: See PexRequirements docs.
is_all_constraints_resolve=True,
),
interpreter_constraints=interpreter_constraints,
platforms=request.platforms,
complete_platforms=request.complete_platforms,
additional_args=request.additional_lockfile_args,
)
return OptionalPexRequest(repository_pex)
@frozen_after_init
@dataclass(unsafe_hash=True)
class RequirementsPexRequest:
addresses: tuple[Address, ...]
internal_only: bool
hardcoded_interpreter_constraints: InterpreterConstraints | None
def __init__(
self,
addresses: Iterable[Address],
*,
internal_only: bool,
hardcoded_interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
self.addresses = Addresses(addresses)
self.internal_only = internal_only
self.hardcoded_interpreter_constraints = hardcoded_interpreter_constraints
@rule
async def get_requirements_pex(request: RequirementsPexRequest, setup: PythonSetup) -> PexRequest:
if setup.run_against_entire_lockfile and request.internal_only:
requirements = await Get(
PexRequirements, _PexRequirementsRequest(Addresses(request.addresses))
)
opt_pex_request = await Get(
OptionalPexRequest,
_RepositoryPexRequest(
addresses=sorted(request.addresses),
requirements=requirements,
internal_only=request.internal_only,
hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
),
)
if opt_pex_request.maybe_pex_request is None:
raise ValueError(
"[python].run_against_entire_lockfile was set, but could not find a "
"lockfile or constraints file for this target set. See "
f"{doc_url('python-third-party-dependencies')} for details."
)
return opt_pex_request.maybe_pex_request
pex_request = await Get(
PexRequest,
PexFromTargetsRequest(
addresses=sorted(request.addresses),
output_filename="requirements.pex",
internal_only=request.internal_only,
include_source_files=False,
hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
),
)
return pex_request
def rules():
return (*collect_rules(), *pex_rules(), *local_dists_rules(), *python_sources_rules())
| |
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import argparse
import json
import os
import shutil
import tempfile
from os import path
from subprocess import call
from subprocess import check_output
# ------------------ Constants ------------------------
CHUNK_SIZE = 100
_METADATA_FILE = 'metadata.json'
# metadata fields
_M_HAS_CLOUDIFY_EVENTS = 'has_cloudify_events'
_M_VERSION = 'snapshot_version'
VERSION = '3.2'
MANAGER_FILE = 'manager.json'
MANAGER_IP_KEY = 'MANAGEMENT_IP'
ELASTICSEARCH = 'es_data'
CRED_DIR = 'snapshot-credentials'
CRED_KEY_NAME = 'agent_key'
INFLUXDB = 'influxdb_data'
INFLUXDB_DUMP_CMD = ('curl -s -G "http://localhost:8086/db/cloudify/series'
'?u=root&p=root&chunked=true" --data-urlencode'
' "q=select * from /.*/" > {0}')
DUMP_STORAGE_TEMPLATE = (
'http://localhost:9200/'
'cloudify_storage/'
'_search?from={start}&size={size}')
DUMP_EVENTS_TEMPLATE = (
'http://localhost:9200/'
'cloudify_events/_search?from={start}&size={size}')
# ------------------ Elasticsearch ---------------------
def _get_chunk(cmd):
return check_output(['curl', '-s', '-XGET', cmd], universal_newlines=True)
def _remove_newlines(s):
return s.replace('\n', '').replace('\r', '')
def _convert_to_bulk(chunk):
def patch_node(n):
if n['_type'] == 'execution' and\
'is_system_workflow' not in n['_source']:
n['_source']['is_system_workflow'] = False
return json.dumps(n)
return '\n'.join([_remove_newlines(patch_node(n))
for n in chunk if n['_type'] != 'provider_context'])\
+ '\n'
def _append_to_file(f, js):
f.write(_convert_to_bulk(js['hits']['hits']))
def _dump_chunks(f, template, save=False):
cmd = template.format(start='0', size=str(CHUNK_SIZE))
js = json.loads(_get_chunk(cmd))
if save:
data = js['hits']['hits']
_append_to_file(f, js)
total = int(js['hits']['total'])
if total > CHUNK_SIZE:
for i in xrange(CHUNK_SIZE, total, CHUNK_SIZE):
cmd = template.format(
start=str(i),
size=str(CHUNK_SIZE))
js = json.loads(_get_chunk(cmd))
if save:
data.extend(js['hits']['hits'])
_append_to_file(f, js)
if save:
return data
def dump_elasticsearch(file_path):
with open(file_path, 'w') as f:
data = _dump_chunks(f, DUMP_STORAGE_TEMPLATE, save=True)
_dump_chunks(f, DUMP_EVENTS_TEMPLATE)
return data
# ------------------ Utils ---------------------
def get_json_objects(f):
def chunks(g):
ch = g.read(10000)
yield ch
while ch:
ch = g.read(10000)
yield ch
s = ''
decoder = json.JSONDecoder()
for ch in chunks(f):
s += ch
try:
while s:
obj, idx = decoder.raw_decode(s)
yield json.dumps(obj)
s = s[idx:]
except:
pass
def copy_data(archive_root, config, to_archive=True):
DATA_TO_COPY = [
(config.file_server_blueprints_folder, 'blueprints'),
(config.file_server_uploaded_blueprints_folder, 'uploaded-blueprints')
]
# files with constant relative/absolute paths
for (p1, p2) in DATA_TO_COPY:
if p1[0] != '/':
p1 = path.join(config.file_server_root, p1)
if p2[0] != '/':
p2 = path.join(archive_root, p2)
if not to_archive:
p1, p2 = p2, p1
if not path.exists(p1):
continue
if path.isfile(p1):
shutil.copy(p1, p2)
else:
if path.exists(p2):
shutil.rmtree(p2, ignore_errors=True)
shutil.copytree(p1, p2)
# ------------------ Main ---------------------
def worker(config):
metadata = {}
tempdir = tempfile.mkdtemp('-snapshot-data')
# files/dirs copy
copy_data(tempdir, config)
# elasticsearch
storage = dump_elasticsearch(path.join(tempdir, ELASTICSEARCH))
metadata[_M_HAS_CLOUDIFY_EVENTS] = True
# influxdb
if config.include_metrics:
influxdb_file = path.join(tempdir, INFLUXDB)
influxdb_temp_file = influxdb_file + '.temp'
call(INFLUXDB_DUMP_CMD.format(influxdb_temp_file), shell=True)
with open(influxdb_temp_file, 'r') as f, open(influxdb_file, 'w') as g:
for obj in get_json_objects(f):
g.write(obj + '\n')
os.remove(influxdb_temp_file)
# credentials
archive_cred_path = path.join(tempdir, CRED_DIR)
os.makedirs(archive_cred_path)
for n in filter(lambda x: x['_type'] == 'node', storage):
props = n['_source']['properties']
if 'cloudify_agent' in props and 'key' in props['cloudify_agent']:
node_id = n['_id']
agent_key_path = props['cloudify_agent']['key']
os.makedirs(path.join(archive_cred_path, node_id))
shutil.copy(path.expanduser(agent_key_path),
path.join(archive_cred_path, node_id, CRED_KEY_NAME))
# version
metadata[_M_VERSION] = VERSION
manager = {
MANAGER_IP_KEY: os.environ[MANAGER_IP_KEY]
}
with open(path.join(tempdir, MANAGER_FILE), 'w') as f:
f.write(json.dumps(manager))
# metadata
with open(path.join(tempdir, _METADATA_FILE), 'w') as f:
json.dump(metadata, f)
# zip
shutil.make_archive('/tmp/home/snapshot_3_2',
'zip',
tempdir)
# end
shutil.rmtree(tempdir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fs-root', dest='file_server_root',
default='/opt/manager/resources/')
parser.add_argument('--fs-blueprints',
dest='file_server_blueprints_folder',
default='blueprints')
parser.add_argument('--fs-ublueprints',
dest='file_server_uploaded_blueprints_folder',
default='uploaded-blueprints')
parser.add_argument('--include-metrics',
dest='include_metrics',
action='store_true')
pargs = parser.parse_args()
worker(pargs)
| |
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy import Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class LogExceptionMiddleware:
def process_spider_exception(self, response, exception, spider):
spider.logger.info('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (0) recover from an exception on a spider callback
class RecoveryMiddleware:
def process_spider_exception(self, response, exception, spider):
spider.logger.info('Middleware: %s exception caught', exception.__class__.__name__)
return [
{'from': 'process_spider_exception'},
Request(response.url, meta={'dont_fail': True}, dont_filter=True),
]
class RecoverySpider(Spider):
name = 'RecoverySpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
RecoveryMiddleware: 10,
},
}
def start_requests(self):
yield Request(self.mockserver.url('/status?n=200'))
def parse(self, response):
yield {'test': 1}
self.logger.info('DONT_FAIL: %s', response.meta.get('dont_fail'))
if not response.meta.get('dont_fail'):
raise TabError()
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
spider.logger.info('Middleware: will raise IndexError')
raise IndexError()
class ProcessSpiderInputSpiderWithoutErrback(Spider):
name = 'ProcessSpiderInputSpiderWithoutErrback'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
FailProcessSpiderInputMiddleware: 8,
LogExceptionMiddleware: 6,
# engine
}
}
def start_requests(self):
yield Request(url=self.mockserver.url('/status?n=200'), callback=self.parse)
def parse(self, response):
return {'from': 'callback'}
class ProcessSpiderInputSpiderWithErrback(ProcessSpiderInputSpiderWithoutErrback):
name = 'ProcessSpiderInputSpiderWithErrback'
def start_requests(self):
yield Request(self.mockserver.url('/status?n=200'), self.parse, errback=self.errback)
def errback(self, failure):
self.logger.info('Got a Failure on the Request errback')
return {'from': 'errback'}
# ================================================================================
# (2) exceptions from a spider callback (generator)
class GeneratorCallbackSpider(Spider):
name = 'GeneratorCallbackSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
LogExceptionMiddleware: 10,
},
}
def start_requests(self):
yield Request(self.mockserver.url('/status?n=200'))
def parse(self, response):
yield {'test': 1}
yield {'test': 2}
raise ImportError()
# ================================================================================
# (2.1) exceptions from a spider callback (generator, middleware right after callback)
class GeneratorCallbackSpiderMiddlewareRightAfterSpider(GeneratorCallbackSpider):
name = 'GeneratorCallbackSpiderMiddlewareRightAfterSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
LogExceptionMiddleware: 100000,
},
}
# ================================================================================
# (3) exceptions from a spider callback (not a generator)
class NotGeneratorCallbackSpider(Spider):
name = 'NotGeneratorCallbackSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
LogExceptionMiddleware: 10,
},
}
def start_requests(self):
yield Request(self.mockserver.url('/status?n=200'))
def parse(self, response):
return [{'test': 1}, {'test': 1 / 0}]
# ================================================================================
# (3.1) exceptions from a spider callback (not a generator, middleware right after callback)
class NotGeneratorCallbackSpiderMiddlewareRightAfterSpider(NotGeneratorCallbackSpider):
name = 'NotGeneratorCallbackSpiderMiddlewareRightAfterSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
LogExceptionMiddleware: 100000,
},
}
# ================================================================================
# (4) exceptions from a middleware process_spider_output method (generator)
class _GeneratorDoNothingMiddleware:
def process_spider_output(self, response, result, spider):
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
yield r
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
return None
class GeneratorFailMiddleware:
def process_spider_output(self, response, result, spider):
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
yield r
raise LookupError()
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
yield {'processed': [method]}
class GeneratorDoNothingAfterFailureMiddleware(_GeneratorDoNothingMiddleware):
pass
class GeneratorRecoverMiddleware:
def process_spider_output(self, response, result, spider):
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
yield r
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
yield {'processed': [method]}
class GeneratorDoNothingAfterRecoveryMiddleware(_GeneratorDoNothingMiddleware):
pass
class GeneratorOutputChainSpider(Spider):
name = 'GeneratorOutputChainSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
GeneratorFailMiddleware: 10,
GeneratorDoNothingAfterFailureMiddleware: 8,
GeneratorRecoverMiddleware: 5,
GeneratorDoNothingAfterRecoveryMiddleware: 3,
},
}
def start_requests(self):
yield Request(self.mockserver.url('/status?n=200'))
def parse(self, response):
yield {'processed': ['parse-first-item']}
yield {'processed': ['parse-second-item']}
# ================================================================================
# (5) exceptions from a middleware process_spider_output method (not generator)
class _NotGeneratorDoNothingMiddleware:
def process_spider_output(self, response, result, spider):
out = []
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
out.append(r)
return out
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
return None
class NotGeneratorFailMiddleware:
def process_spider_output(self, response, result, spider):
out = []
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
out.append(r)
raise ReferenceError()
return out
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
return [{'processed': [method]}]
class NotGeneratorDoNothingAfterFailureMiddleware(_NotGeneratorDoNothingMiddleware):
pass
class NotGeneratorRecoverMiddleware:
def process_spider_output(self, response, result, spider):
out = []
for r in result:
r['processed'].append(f'{self.__class__.__name__}.process_spider_output')
out.append(r)
return out
def process_spider_exception(self, response, exception, spider):
method = f'{self.__class__.__name__}.process_spider_exception'
spider.logger.info('%s: %s caught', method, exception.__class__.__name__)
return [{'processed': [method]}]
class NotGeneratorDoNothingAfterRecoveryMiddleware(_NotGeneratorDoNothingMiddleware):
pass
class NotGeneratorOutputChainSpider(Spider):
name = 'NotGeneratorOutputChainSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
NotGeneratorFailMiddleware: 10,
NotGeneratorDoNothingAfterFailureMiddleware: 8,
NotGeneratorRecoverMiddleware: 5,
NotGeneratorDoNothingAfterRecoveryMiddleware: 3,
},
}
def start_requests(self):
return [Request(self.mockserver.url('/status?n=200'))]
def parse(self, response):
return [{'processed': ['parse-first-item']}, {'processed': ['parse-second-item']}]
# ================================================================================
class TestSpiderMiddleware(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
return log
@defer.inlineCallbacks
def test_recovery(self):
"""
(0) Recover from an exception in a spider callback. The final item count should be 3
(one yielded from the callback method before the exception is raised, one directly
from the recovery middleware and one from the spider when processing the request that
was enqueued from the recovery middleware)
"""
log = yield self.crawl_log(RecoverySpider)
self.assertIn("Middleware: TabError exception caught", str(log))
self.assertEqual(str(log).count("Middleware: TabError exception caught"), 1)
self.assertIn("'item_scraped_count': 3", str(log))
@defer.inlineCallbacks
def test_process_spider_input_without_errback(self):
"""
(1.1) An exception from the process_spider_input chain should be caught by the
process_spider_exception chain from the start if the Request has no errback
"""
log1 = yield self.crawl_log(ProcessSpiderInputSpiderWithoutErrback)
self.assertIn("Middleware: will raise IndexError", str(log1))
self.assertIn("Middleware: IndexError exception caught", str(log1))
@defer.inlineCallbacks
def test_process_spider_input_with_errback(self):
"""
(1.2) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain if the Request has an errback
"""
log1 = yield self.crawl_log(ProcessSpiderInputSpiderWithErrback)
self.assertNotIn("Middleware: IndexError exception caught", str(log1))
self.assertIn("Middleware: will raise IndexError", str(log1))
self.assertIn("Got a Failure on the Request errback", str(log1))
self.assertIn("{'from': 'errback'}", str(log1))
self.assertNotIn("{'from': 'callback'}", str(log1))
self.assertIn("'item_scraped_count': 1", str(log1))
@defer.inlineCallbacks
def test_generator_callback(self):
"""
(2) An exception from a spider callback (returning a generator) should
be caught by the process_spider_exception chain. Items yielded before the
exception is raised should be processed normally.
"""
log2 = yield self.crawl_log(GeneratorCallbackSpider)
self.assertIn("Middleware: ImportError exception caught", str(log2))
self.assertIn("'item_scraped_count': 2", str(log2))
@defer.inlineCallbacks
def test_generator_callback_right_after_callback(self):
"""
(2.1) Special case of (2): Exceptions should be caught
even if the middleware is placed right after the spider
"""
log21 = yield self.crawl_log(GeneratorCallbackSpiderMiddlewareRightAfterSpider)
self.assertIn("Middleware: ImportError exception caught", str(log21))
self.assertIn("'item_scraped_count': 2", str(log21))
@defer.inlineCallbacks
def test_not_a_generator_callback(self):
"""
(3) An exception from a spider callback (returning a list) should
be caught by the process_spider_exception chain. No items should be processed.
"""
log3 = yield self.crawl_log(NotGeneratorCallbackSpider)
self.assertIn("Middleware: ZeroDivisionError exception caught", str(log3))
self.assertNotIn("item_scraped_count", str(log3))
@defer.inlineCallbacks
def test_not_a_generator_callback_right_after_callback(self):
"""
(3.1) Special case of (3): Exceptions should be caught
even if the middleware is placed right after the spider
"""
log31 = yield self.crawl_log(NotGeneratorCallbackSpiderMiddlewareRightAfterSpider)
self.assertIn("Middleware: ZeroDivisionError exception caught", str(log31))
self.assertNotIn("item_scraped_count", str(log31))
@defer.inlineCallbacks
def test_generator_output_chain(self):
"""
(4) An exception from a middleware's process_spider_output method should be sent
to the process_spider_exception method from the next middleware in the chain.
The result of the recovery by the process_spider_exception method should be handled
by the process_spider_output method from the next middleware.
The final item count should be 2 (one from the spider callback and one from the
process_spider_exception chain)
"""
log4 = yield self.crawl_log(GeneratorOutputChainSpider)
self.assertIn("'item_scraped_count': 2", str(log4))
self.assertIn("GeneratorRecoverMiddleware.process_spider_exception: LookupError caught", str(log4))
self.assertIn(
"GeneratorDoNothingAfterFailureMiddleware.process_spider_exception: LookupError caught",
str(log4))
self.assertNotIn(
"GeneratorFailMiddleware.process_spider_exception: LookupError caught",
str(log4))
self.assertNotIn(
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_exception: LookupError caught",
str(log4))
item_from_callback = {'processed': [
'parse-first-item',
'GeneratorFailMiddleware.process_spider_output',
'GeneratorDoNothingAfterFailureMiddleware.process_spider_output',
'GeneratorRecoverMiddleware.process_spider_output',
'GeneratorDoNothingAfterRecoveryMiddleware.process_spider_output']}
item_recovered = {'processed': [
'GeneratorRecoverMiddleware.process_spider_exception',
'GeneratorDoNothingAfterRecoveryMiddleware.process_spider_output']}
self.assertIn(str(item_from_callback), str(log4))
self.assertIn(str(item_recovered), str(log4))
self.assertNotIn('parse-second-item', str(log4))
@defer.inlineCallbacks
def test_not_a_generator_output_chain(self):
"""
(5) An exception from a middleware's process_spider_output method should be sent
to the process_spider_exception method from the next middleware in the chain.
The result of the recovery by the process_spider_exception method should be handled
by the process_spider_output method from the next middleware.
The final item count should be 1 (from the process_spider_exception chain, the items
from the spider callback are lost)
"""
log5 = yield self.crawl_log(NotGeneratorOutputChainSpider)
self.assertIn("'item_scraped_count': 1", str(log5))
self.assertIn("GeneratorRecoverMiddleware.process_spider_exception: ReferenceError caught", str(log5))
self.assertIn(
"GeneratorDoNothingAfterFailureMiddleware.process_spider_exception: ReferenceError caught",
str(log5))
self.assertNotIn("GeneratorFailMiddleware.process_spider_exception: ReferenceError caught", str(log5))
self.assertNotIn(
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_exception: ReferenceError caught",
str(log5))
item_recovered = {'processed': [
'NotGeneratorRecoverMiddleware.process_spider_exception',
'NotGeneratorDoNothingAfterRecoveryMiddleware.process_spider_output']}
self.assertIn(str(item_recovered), str(log5))
self.assertNotIn('parse-first-item', str(log5))
self.assertNotIn('parse-second-item', str(log5))
| |
#!/usr/bin/env python
# Copyright (C) 2006-2010, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author: James Krycka
"""
This script uses py2exe to create inversion\dist\direfl.exe for Windows.
The resulting executable bundles the DiRefl application, the python runtime
environment, and other required python packages into a single file. Additional
resource files that are needed when DiRefl is run are placed in the dist
directory tree. On completion, the contents of the dist directory tree can be
used by the Inno Setup Compiler (via a separate script) to build a Windows
installer/uninstaller for deployment of the DiRefl application. For testing
purposes, direfl.exe can be run from the dist directory.
"""
import os
import sys
'''
print "*** Python path is:"
for i, p in enumerate(sys.path):
print "%5d %s" %(i, p)
'''
from distutils.core import setup
# Augment the setup interface with the py2exe command and make sure the py2exe
# option is passed to setup.
import py2exe
if len(sys.argv) == 1:
sys.argv.append('py2exe')
import matplotlib
# Retrieve the application version string.
from version import version
# A manifest is required to be included in a py2exe image (or accessible as a
# file in the image directory) when wxPython is included so that the Windows XP
# theme is used when rendering wx widgets. The manifest must be matched to the
# version of Python that is being used.
#
# Create a manifest for use with Python 2.5 on Windows XP or Vista. It is
# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
manifest_for_python25 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
manifest_for_python26 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32">
</assemblyIdentity>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# Select the appropriate manifest to use.
if sys.version_info >= (3, 0) or sys.version_info < (2, 5):
print "*** This script only works with Python 2.5, 2.6, or 2.7."
sys.exit()
elif sys.version_info >= (2, 6):
manifest = manifest_for_python26
elif sys.version_info >= (2, 5):
manifest = manifest_for_python25
# Create a list of all files to include along side the executable being built
# in the dist directory tree. Each element of the data_files list is a tuple
# consisting of a path (relative to dist\) and a list of files in that path.
data_files = []
# Add data files from the matplotlib\mpl-data folder and its subfolders.
# For matploblib prior to version 0.99 see the examples at the end of the file.
data_files = matplotlib.get_py2exe_datafiles()
# Add resource files that need to reside in the same directory as the image.
data_files.append( ('.', [os.path.join('.', 'direfl.ico')]) )
data_files.append( ('.', [os.path.join('.', 'direfl_splash.png')]) )
data_files.append( ('.', [os.path.join('.', 'LICENSE.txt')]) )
data_files.append( ('.', [os.path.join('.', 'README.txt')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_1.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_2.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_3.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd1.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd2.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_air_4.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_d2o_4.refl')]) )
# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
# Python 2.6 or 2.7. This kit will be installed on the target system as part
# of the installation process for the frozen image. Note that the Python 2.5
# interpreter requires msvcr71.dll which is included in the Python25 package,
# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
# with the Python26 or Python27 package. Thus, for Python 2.6 and later, the
# appropriate dll must be present on the target system at runtime.
if sys.version_info >= (2, 6):
pypath = os.path.dirname(sys.executable)
data_files.append( ('.', [os.path.join(pypath, 'vcredist_x86.exe')]) )
# Specify required packages to bundle in the executable image.
packages = ['matplotlib', 'numpy', 'scipy', 'pytz']
# Specify files to include in the executable image.
includes = []
# Specify files to exclude from the executable image.
# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
# - We do not use ssl services so they are omitted.
# - We can safely exclude the TkAgg matplotlib backend because our app uses
# "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
# - On the web it is widely recommended to exclude certain lib*.dll modules
# but this does not seem necessary any more (but adding them does not hurt).
# - Python25 requires mscvr71.dll, however, Win XP includes this file.
# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
# - For some reason cygwin1.dll gets included by default, but it is not needed.
excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg']
dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgdk-win32-2.0-0.dll',
'tcl84.dll',
'tk84.dll',
'QtGui4.dll',
'QtCore4.dll',
'msvcr71.dll',
'msvcp90.dll',
'w9xpopen.exe',
'cygwin1.dll']
class Target():
"""This class stores metadata about the distribution in a dictionary."""
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
client = Target(
name = 'DiRefl',
description = 'Direct Inversion Reflectometry (DiRefl) application',
script = 'bin/direfl.py', # module to run on application start
dest_base = 'direfl', # file name part of the exe file to create
icon_resources = [(1, 'direfl.ico')], # also need to specify in data_files
bitmap_resources = [],
other_resources = [(24, 1, manifest % dict(prog='DiRefl'))] )
# Now we do the work to create a standalone distribution using py2exe.
#
# When the application is run in console mode, a console window will be created
# to receive any logging or error messages and the application will then create
# a separate GUI application window.
#
# When the application is run in windows mode, it will create a GUI application
# window and no console window will be provided. Output to stderr will be
# written to <app-image-name>.log.
setup(
#console=[client],
windows=[client],
options={'py2exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'dll_excludes': dll_excludes,
'compressed': 1, # standard compression
'optimize': 0, # no byte-code optimization
'dist_dir': "dist",# where to put py2exe results
'xref': False, # display cross reference (as html doc)
'bundle_files': 1 # bundle python25.dll in library.zip
}
},
#zipfile=None, # None means bundle library.zip in exe
data_files=data_files # list of files to copy to dist directory
)
#==============================================================================
# This section is for reference only when using older versions of matplotlib.
# The location of mpl-data files has changed across releases of matplotlib.
# Furthermore, matplotlib.get_py2exe_datafiles() had problems prior to version
# 0.99 (see link below for details), so alternative ways had to be used.
# The various techniques shown below for obtaining matplotlib auxiliary files
# (and previously used by this project) was adapted from the examples and
# discussion on http://www.py2exe.org/index.cgi/MatPlotLib.
#
# The following technique worked for matplotlib 0.91.2.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98.5.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts\afm',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\afm\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\pdfcorefonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\pdfcorefonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\ttf',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\ttf\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98 and 0.99.
'''
from distutils.filelist import findall
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
matplotlibdata = findall(matplotlibdatadir)
for f in matplotlibdata:
dirname = os.path.join('mpl-data', f[len(matplotlibdatadir)+1:])
data_files.append((os.path.split(dirname)[0], [f]))
'''
| |
"""Support for monitoring the local system."""
from __future__ import annotations
import asyncio
from dataclasses import dataclass
import datetime
from functools import lru_cache
import logging
import os
import socket
import sys
from typing import Any, cast
import psutil
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_TOTAL,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_TYPE,
DATA_GIBIBYTES,
DATA_MEBIBYTES,
DATA_RATE_MEGABYTES_PER_SECOND,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_component import DEFAULT_SCAN_INTERVAL
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_ARG = "arg"
if sys.maxsize > 2 ** 32:
CPU_ICON = "mdi:cpu-64-bit"
else:
CPU_ICON = "mdi:cpu-32-bit"
SENSOR_TYPE_NAME = 0
SENSOR_TYPE_UOM = 1
SENSOR_TYPE_ICON = 2
SENSOR_TYPE_DEVICE_CLASS = 3
SENSOR_TYPE_MANDATORY_ARG = 4
SIGNAL_SYSTEMMONITOR_UPDATE = "systemmonitor_update"
@dataclass
class SysMonitorSensorEntityDescription(SensorEntityDescription):
"""Description for System Monitor sensor entities."""
mandatory_arg: bool = False
SENSOR_TYPES: dict[str, SysMonitorSensorEntityDescription] = {
"disk_free": SysMonitorSensorEntityDescription(
key="disk_free",
name="Disk free",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
"disk_use": SysMonitorSensorEntityDescription(
key="disk_use",
name="Disk use",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
"disk_use_percent": SysMonitorSensorEntityDescription(
key="disk_use_percent",
name="Disk use (percent)",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
"ipv4_address": SysMonitorSensorEntityDescription(
key="ipv4_address",
name="IPv4 address",
icon="mdi:server-network",
mandatory_arg=True,
),
"ipv6_address": SysMonitorSensorEntityDescription(
key="ipv6_address",
name="IPv6 address",
icon="mdi:server-network",
mandatory_arg=True,
),
"last_boot": SysMonitorSensorEntityDescription(
key="last_boot",
name="Last boot",
device_class=DEVICE_CLASS_TIMESTAMP,
),
"load_15m": SysMonitorSensorEntityDescription(
key="load_15m",
name="Load (15m)",
icon=CPU_ICON,
state_class=STATE_CLASS_TOTAL,
),
"load_1m": SysMonitorSensorEntityDescription(
key="load_1m",
name="Load (1m)",
icon=CPU_ICON,
state_class=STATE_CLASS_TOTAL,
),
"load_5m": SysMonitorSensorEntityDescription(
key="load_5m",
name="Load (5m)",
icon=CPU_ICON,
state_class=STATE_CLASS_TOTAL,
),
"memory_free": SysMonitorSensorEntityDescription(
key="memory_free",
name="Memory free",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:memory",
state_class=STATE_CLASS_TOTAL,
),
"memory_use": SysMonitorSensorEntityDescription(
key="memory_use",
name="Memory use",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:memory",
state_class=STATE_CLASS_TOTAL,
),
"memory_use_percent": SysMonitorSensorEntityDescription(
key="memory_use_percent",
name="Memory use (percent)",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:memory",
state_class=STATE_CLASS_TOTAL,
),
"network_in": SysMonitorSensorEntityDescription(
key="network_in",
name="Network in",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL_INCREASING,
mandatory_arg=True,
),
"network_out": SysMonitorSensorEntityDescription(
key="network_out",
name="Network out",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL_INCREASING,
mandatory_arg=True,
),
"packets_in": SysMonitorSensorEntityDescription(
key="packets_in",
name="Packets in",
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL_INCREASING,
mandatory_arg=True,
),
"packets_out": SysMonitorSensorEntityDescription(
key="packets_out",
name="Packets out",
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL_INCREASING,
mandatory_arg=True,
),
"throughput_network_in": SysMonitorSensorEntityDescription(
key="throughput_network_in",
name="Network throughput in",
native_unit_of_measurement=DATA_RATE_MEGABYTES_PER_SECOND,
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL,
mandatory_arg=True,
),
"throughput_network_out": SysMonitorSensorEntityDescription(
key="throughput_network_out",
name="Network throughput out",
native_unit_of_measurement=DATA_RATE_MEGABYTES_PER_SECOND,
icon="mdi:server-network",
state_class=STATE_CLASS_TOTAL,
mandatory_arg=True,
),
"process": SysMonitorSensorEntityDescription(
key="process",
name="Process",
icon=CPU_ICON,
state_class=STATE_CLASS_TOTAL,
mandatory_arg=True,
),
"processor_use": SysMonitorSensorEntityDescription(
key="processor_use",
name="Processor use",
native_unit_of_measurement=PERCENTAGE,
icon=CPU_ICON,
state_class=STATE_CLASS_TOTAL,
),
"processor_temperature": SysMonitorSensorEntityDescription(
key="processor_temperature",
name="Processor temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_TOTAL,
),
"swap_free": SysMonitorSensorEntityDescription(
key="swap_free",
name="Swap free",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
"swap_use": SysMonitorSensorEntityDescription(
key="swap_use",
name="Swap use",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
"swap_use_percent": SysMonitorSensorEntityDescription(
key="swap_use_percent",
name="Swap use (percent)",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
state_class=STATE_CLASS_TOTAL,
),
}
def check_required_arg(value: Any) -> Any:
"""Validate that the required "arg" for the sensor types that need it are set."""
for sensor in value:
sensor_type = sensor[CONF_TYPE]
sensor_arg = sensor.get(CONF_ARG)
if sensor_arg is None and SENSOR_TYPES[sensor_type].mandatory_arg:
raise vol.RequiredFieldInvalid(
f"Mandatory 'arg' is missing for sensor type '{sensor_type}'."
)
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_RESOURCES, default={CONF_TYPE: "disk_use"}): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_TYPE): vol.In(SENSOR_TYPES),
vol.Optional(CONF_ARG): cv.string,
}
)
],
check_required_arg,
)
}
)
IO_COUNTER = {
"network_out": 0,
"network_in": 1,
"packets_out": 2,
"packets_in": 3,
"throughput_network_out": 0,
"throughput_network_in": 1,
}
IF_ADDRS_FAMILY = {"ipv4_address": socket.AF_INET, "ipv6_address": socket.AF_INET6}
# There might be additional keys to be added for different
# platforms / hardware combinations.
# Taken from last version of "glances" integration before they moved to
# a generic temperature sensor logic.
# https://github.com/home-assistant/core/blob/5e15675593ba94a2c11f9f929cdad317e27ce190/homeassistant/components/glances/sensor.py#L199
CPU_SENSOR_PREFIXES = [
"amdgpu 1",
"aml_thermal",
"Core 0",
"Core 1",
"CPU Temperature",
"CPU",
"cpu-thermal 1",
"cpu_thermal 1",
"exynos-therm 1",
"Package id 0",
"Physical id 0",
"radeon 1",
"soc-thermal 1",
"soc_thermal 1",
"Tctl",
"cpu0-thermal",
]
@dataclass
class SensorData:
"""Data for a sensor."""
argument: Any
state: str | None
value: Any | None
update_time: datetime.datetime | None
last_exception: BaseException | None
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: Any | None = None,
) -> None:
"""Set up the system monitor sensors."""
entities = []
sensor_registry: dict[tuple[str, str], SensorData] = {}
for resource in config[CONF_RESOURCES]:
type_ = resource[CONF_TYPE]
# Initialize the sensor argument if none was provided.
# For disk monitoring default to "/" (root) to prevent runtime errors, if argument was not specified.
if CONF_ARG not in resource:
argument = ""
if resource[CONF_TYPE].startswith("disk_"):
argument = "/"
else:
argument = resource[CONF_ARG]
# Verify if we can retrieve CPU / processor temperatures.
# If not, do not create the entity and add a warning to the log
if (
type_ == "processor_temperature"
and await hass.async_add_executor_job(_read_cpu_temperature) is None
):
_LOGGER.warning("Cannot read CPU / processor temperature information")
continue
sensor_registry[(type_, argument)] = SensorData(
argument, None, None, None, None
)
entities.append(
SystemMonitorSensor(sensor_registry, SENSOR_TYPES[type_], argument)
)
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
await async_setup_sensor_registry_updates(hass, sensor_registry, scan_interval)
async_add_entities(entities)
async def async_setup_sensor_registry_updates(
hass: HomeAssistant,
sensor_registry: dict[tuple[str, str], SensorData],
scan_interval: datetime.timedelta,
) -> None:
"""Update the registry and create polling."""
_update_lock = asyncio.Lock()
def _update_sensors() -> None:
"""Update sensors and store the result in the registry."""
for (type_, argument), data in sensor_registry.items():
try:
state, value, update_time = _update(type_, data)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.exception("Error updating sensor: %s (%s)", type_, argument)
data.last_exception = ex
else:
data.state = state
data.value = value
data.update_time = update_time
data.last_exception = None
# Only fetch these once per iteration as we use the same
# data source multiple times in _update
_disk_usage.cache_clear()
_swap_memory.cache_clear()
_virtual_memory.cache_clear()
_net_io_counters.cache_clear()
_net_if_addrs.cache_clear()
_getloadavg.cache_clear()
async def _async_update_data(*_: Any) -> None:
"""Update all sensors in one executor jump."""
if _update_lock.locked():
_LOGGER.warning(
"Updating systemmonitor took longer than the scheduled update interval %s",
scan_interval,
)
return
async with _update_lock:
await hass.async_add_executor_job(_update_sensors)
async_dispatcher_send(hass, SIGNAL_SYSTEMMONITOR_UPDATE)
polling_remover = async_track_time_interval(hass, _async_update_data, scan_interval)
@callback
def _async_stop_polling(*_: Any) -> None:
polling_remover()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop_polling)
await _async_update_data()
class SystemMonitorSensor(SensorEntity):
"""Implementation of a system monitor sensor."""
should_poll = False
def __init__(
self,
sensor_registry: dict[tuple[str, str], SensorData],
sensor_description: SysMonitorSensorEntityDescription,
argument: str = "",
) -> None:
"""Initialize the sensor."""
self.entity_description = sensor_description
self._attr_name: str = f"{sensor_description.name} {argument}".rstrip()
self._attr_unique_id: str = slugify(f"{sensor_description.key}_{argument}")
self._sensor_registry = sensor_registry
self._argument: str = argument
@property
def native_value(self) -> str | None:
"""Return the state of the device."""
return self.data.state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.data.last_exception is None
@property
def data(self) -> SensorData:
"""Return registry entry for the data."""
return self._sensor_registry[(self.entity_description.key, self._argument)]
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_SYSTEMMONITOR_UPDATE, self.async_write_ha_state
)
)
def _update( # noqa: C901
type_: str, data: SensorData
) -> tuple[str | None, str | None, datetime.datetime | None]:
"""Get the latest system information."""
state = None
value = None
update_time = None
if type_ == "disk_use_percent":
state = _disk_usage(data.argument).percent
elif type_ == "disk_use":
state = round(_disk_usage(data.argument).used / 1024 ** 3, 1)
elif type_ == "disk_free":
state = round(_disk_usage(data.argument).free / 1024 ** 3, 1)
elif type_ == "memory_use_percent":
state = _virtual_memory().percent
elif type_ == "memory_use":
virtual_memory = _virtual_memory()
state = round((virtual_memory.total - virtual_memory.available) / 1024 ** 2, 1)
elif type_ == "memory_free":
state = round(_virtual_memory().available / 1024 ** 2, 1)
elif type_ == "swap_use_percent":
state = _swap_memory().percent
elif type_ == "swap_use":
state = round(_swap_memory().used / 1024 ** 2, 1)
elif type_ == "swap_free":
state = round(_swap_memory().free / 1024 ** 2, 1)
elif type_ == "processor_use":
state = round(psutil.cpu_percent(interval=None))
elif type_ == "processor_temperature":
state = _read_cpu_temperature()
elif type_ == "process":
state = STATE_OFF
for proc in psutil.process_iter():
try:
if data.argument == proc.name():
state = STATE_ON
break
except psutil.NoSuchProcess as err:
_LOGGER.warning(
"Failed to load process with ID: %s, old name: %s",
err.pid,
err.name,
)
elif type_ in ("network_out", "network_in"):
counters = _net_io_counters()
if data.argument in counters:
counter = counters[data.argument][IO_COUNTER[type_]]
state = round(counter / 1024 ** 2, 1)
else:
state = None
elif type_ in ("packets_out", "packets_in"):
counters = _net_io_counters()
if data.argument in counters:
state = counters[data.argument][IO_COUNTER[type_]]
else:
state = None
elif type_ in ("throughput_network_out", "throughput_network_in"):
counters = _net_io_counters()
if data.argument in counters:
counter = counters[data.argument][IO_COUNTER[type_]]
now = dt_util.utcnow()
if data.value and data.value < counter:
state = round(
(counter - data.value)
/ 1000 ** 2
/ (now - (data.update_time or now)).total_seconds(),
3,
)
else:
state = None
update_time = now
value = counter
else:
state = None
elif type_ in ("ipv4_address", "ipv6_address"):
addresses = _net_if_addrs()
if data.argument in addresses:
for addr in addresses[data.argument]:
if addr.family == IF_ADDRS_FAMILY[type_]:
state = addr.address
else:
state = None
elif type_ == "last_boot":
# Only update on initial setup
if data.state is None:
state = dt_util.utc_from_timestamp(psutil.boot_time()).isoformat()
else:
state = data.state
elif type_ == "load_1m":
state = round(_getloadavg()[0], 2)
elif type_ == "load_5m":
state = round(_getloadavg()[1], 2)
elif type_ == "load_15m":
state = round(_getloadavg()[2], 2)
return state, value, update_time
# When we drop python 3.8 support these can be switched to
# @cache https://docs.python.org/3.9/library/functools.html#functools.cache
@lru_cache(maxsize=None)
def _disk_usage(path: str) -> Any:
return psutil.disk_usage(path)
@lru_cache(maxsize=None)
def _swap_memory() -> Any:
return psutil.swap_memory()
@lru_cache(maxsize=None)
def _virtual_memory() -> Any:
return psutil.virtual_memory()
@lru_cache(maxsize=None)
def _net_io_counters() -> Any:
return psutil.net_io_counters(pernic=True)
@lru_cache(maxsize=None)
def _net_if_addrs() -> Any:
return psutil.net_if_addrs()
@lru_cache(maxsize=None)
def _getloadavg() -> tuple[float, float, float]:
return os.getloadavg()
def _read_cpu_temperature() -> float | None:
"""Attempt to read CPU / processor temperature."""
temps = psutil.sensors_temperatures()
for name, entries in temps.items():
for i, entry in enumerate(entries, start=1):
# In case the label is empty (e.g. on Raspberry PI 4),
# construct it ourself here based on the sensor key name.
_label = f"{name} {i}" if not entry.label else entry.label
# check both name and label because some systems embed cpu# in the
# name, which makes label not match because label adds cpu# at end.
if _label in CPU_SENSOR_PREFIXES or name in CPU_SENSOR_PREFIXES:
return cast(float, round(entry.current, 1))
return None
| |
"""
english.py
Tools for NLG. Uses various sub-modules like nouns and verbs.
"""
import re
import copy
from utils import *
import ans
from ans import Pr, Vr, PVr, SbT
import nouns
import verbs
ANYTAG = re.compile(r"(\b[A-Z]#[a-z_/]+\b)")
TAGS = {
"noun": re.compile(r"\bN#([a-z_]+)/([a-z_]+)\b"),
"verb": re.compile(r"\bV#([a-z]+)/([a-z]+)/([a-z_]+)\b"),
#"pronoun": re.compile(r"\bP#[a-z]+/[a-z]+\b"),
#"determined": re.compile(r"\bT#[a-z]+\b"),
}
TR_TYPE = {
"party_member": "person",
"item": "item",
"actor": "entity",
}
TSABR = {
"prs": "present",
"pst": "past",
"inf": "infinitive",
"imp": "imperative",
"prp": "present participle",
"psp": "past participle",
}
NOUN_SCHEMAS = {
"name":
Pr(
"st",
Vr("Node"),
Pr("property",
Pr("name"),
Pr("inst", Vr("Type"), Vr("Key")),
Vr("Name")
)
),
"number":
Pr(
"st",
Vr("Node"),
Pr("property",
Pr("number"),
Pr("inst", Vr("Type"), Vr("Key")),
Vr("Number")
)
),
"gender":
Pr(
"st",
Vr("Node"),
Pr("property",
Pr("gender"),
Pr("inst", Vr("Type"), Vr("Key")),
Vr("Gender")
)
),
"person":
Pr(
"st",
Vr("Node"),
Pr("property",
Pr("person"),
Pr("inst", Vr("Type"), Vr("Key")),
Vr("Person")
)
),
"determined":
Pr(
"st",
Vr("Node"),
Pr("property",
Pr("determined"),
Pr("inst", Vr("Type"), Vr("Key")),
Vr("Determination")
)
),
}
TEXT_SCHEMAS = {
"intro_text":
PVr("txt", "intro_text", Vr("Node"), Vr("Text")),
"potential_text":
PVr("txt", "potential_text", Vr("Node"), Vr("Text")),
"option_text":
PVr("txt", "option_text", Vr("Node"), Pr("option", Vr("Opt")), Vr("Text")),
"action_text":
PVr("txt", "action_text", Vr("Node"), Pr("option", Vr("Opt")), Vr("Text")),
}
SUCCESSOR = Pr("successor", Vr("From"), Pr("option", Vr("Opt")), Vr("To"))
def glean_nouns(story):
result = {}
for sc, binding in ans.bindings(NOUN_SCHEMAS, story):
n = binding["st.property.inst.Key"].unquoted()
t = binding["st.property.inst.Type"].unquoted()
if n not in result:
result[n] = nouns.Noun(n, TR_TYPE[t] if t in TR_TYPE else "thing")
if sc == "name":
result[n].name = binding["st.property.Name"].unquoted()
elif sc == "number":
result[n].number = binding["st.property.Number"].unquoted()
elif sc == "gender":
result[n].gender = binding["st.property.Gender"].unquoted()
elif sc == "person":
result[n].person = binding["st.property.Person"].unquoted()
elif sc == "determined":
d = binding["st.property.Determination"].unquoted()
result[n].determined = d == "true"
return result
def merge_pnslots(pns1, pns2):
"""
Takes two sets of pronoun slots and merges them such that the result is valid
for text that might follow text which resulted in either of the merged slot
sets.
"""
result = {}
for pn in pns1:
if pns1[pn][1] == pns2[pn][1]:
result[pn] = [max(pns1[pn][0], pns2[pn][0]), set(pns1[pn][1])]
else:
# Any kind of ambiguity results in an empty slot:
result[pn] = [0, set()]
return result
def merge_txt_states(pilist):
"""
Takes a list of pnslots, introduced pairs and returns a single pnslots,
introduced pair that's valid no matter which of the input text states you're
coming from.
"""
result_pnslots = pilist[0][0]
result_introduced = pilist[0][1]
for pnslots, introduced in pilist[1:]:
result_pnslots = merge_pnslots(result_pnslots, pnslots)
result_introduced &= introduced
return result_pnslots, result_introduced
def build_text(template, ndict, pnslots=None, introduced=None):
"""
Takes a text template and builds a filled-in string using the given nouns
along with pronoun and noun usage information. Returns a tuple of the
constructed string and the resulting pronoun slots and noun introduction.
"""
if pnslots == None:
pnslots = {
"I": [0, set()],
"we": [0, set()],
"you": [0, set()],
"he": [0, set()],
"she": [0, set()],
"it": [0, set()],
"they": [0, set()],
}
else:
pnslots = copy.deepcopy(pnslots)
if introduced == None:
introduced = set()
else:
introduced = set(introduced)
bits = re.split(ANYTAG, template)
result = ""
for b in bits:
add = b
if '.' in b: # TODO: Better sentence-counting! (?)
# there's the end of a sentence somewhere in this bit: clean out
# ambiguous and expired slots
for s in pnslots:
if len(pnslots[s][1]) > 1 or pnslots[s][0] > 2:
pnslots[s] = [0, set()]
elif len(pnslots[s][1]) > 0:
pnslots[s][0] += 1
for t in TAGS:
m = TAGS[t].fullmatch(b)
if m:
if t == "noun":
noun = m.group(1)
if noun not in ndict:
# TODO: Something about this
print("ERROR!")
print(template, noun, ndict)
exit(1)
pro = m.group(2)
case, position = nouns.casepos(pro)
slot = pnslots[nouns.pnslot(ndict[noun])]
if noun in slot[1] and len(slot[1]) == 1:
slot[0] = 0
add = nouns.pronoun(ndict[noun], case, position)
else:
if slot[0] > 0:
slot[0] = 0
slot[1] = { noun }
else:
slot[1].add(noun)
if noun in introduced:
add = nouns.definite(ndict[noun])
else:
introduced.add(noun)
add = nouns.indefinite(ndict[noun])
elif t == "verb":
verb = m.group(1)
tense = TSABR[m.group(2)]
agree = m.group(3)
if agree == "_plural":
verbs.conjugation(verb, tense, "plural", "third")
else:
add = verbs.conj_ref(ndict[agree], verb, tense)
# if we matched a tag, don't bother checking the other tags:
break
result += add
return result, pnslots, introduced
def find_node_structure(story):
"""
Takes a story and looks at successor/3 predicates to determine the structure
of nodes in the story, returning a dictionary that maps node names to both
successor and predecessor entries: successor entries being option->node
mappings and predecessor entries being a list of nodes that have this node as
a successor.
"""
result = {}
for pr in story:
scc = ans.bind(SUCCESSOR, pr)
if scc:
frm = scc["successor.From"].unquoted()
opt = scc["successor.option.Opt"].unquoted()
to = scc["successor.To"].unquoted()
if frm not in result:
result[frm] = {"successors":{}, "predecessors":[]}
if to not in result:
result[to] = {"successors":{}, "predecessors":[]}
result[frm]["successors"][opt] = to
result[to]["predecessors"].append(frm)
return result
def build_node_text(node, node_structure, nouns, pnslots, introduced):
"""
Builds text for the given node (should be a dictionary from the
node_templates map). Returns the resulting text and a dictionary mapping
options to their outgoing (pnslots, introduced) tuples.
"""
outgoing = {}
# TODO: A more rigorous capitalization approach.
intro, _pnslots, _introduced = build_text(
node["intro"],
nouns,
pnslots,
introduced
)
intro = intro.capitalize()
situation, _pnslots, _introduced = build_text(
node["situation"],
nouns,
_pnslots,
_introduced
)
situation += "."
situation = situation.capitalize()
options = ""
if node["options"]:
options = "*choice\n"
for opt in node["options"]:
txt, pnout, intout = build_text(
node["options"][opt],
nouns,
_pnslots,
_introduced
)
options += " #{}\n".format(txt.capitalize())
txt, pnout, intout = build_text(
node["outcomes"][opt],
nouns,
pnout,
intout
)
options += " {}\n".format(txt.capitalize())
successors = node_structure[node["name"]]["successors"]
if opt in successors:
scc = successors[opt]
outgoing[scc] = (pnout, intout)
options += " *goto {}\n".format(scc.replace(":", "_"))
else:
options += " *finish\n"
else:
options = "*finish"
result = """
*label {label}
{intro}
{situation}
{options}
""".format(
label=node["name"].replace(":", "_"),
intro=intro,
situation=situation,
options=options
)
return result, outgoing
def build_story_text(story, root=None):
node_templates = {}
# First, build all of the templates for the entire story:
for sc, bnd in ans.bindings(TEXT_SCHEMAS, story):
node = bnd["txt.Node"].unquoted()
print("Adding {} template for node '{}'.".format(sc, node))
if node not in node_templates:
node_templates[node] = {
"name": node,
"intro": "",
"situation": "",
"options": {},
"outcomes": {},
# TODO: state-change text
}
txt = bnd["txt.Text"].unquoted()
if sc == "intro_text":
node_templates[node]["intro"] = txt
elif sc == "potential_text":
if node_templates[node]["situation"]:
node_templates[node]["situation"] += " and "
node_templates[node]["situation"] += txt
elif sc == "option_text":
opt = bnd["txt.option.Opt"].unquoted()
node_templates[node]["options"][opt] = txt
elif sc == "action_text":
opt = bnd["txt.option.Opt"].unquoted()
node_templates[node]["outcomes"][opt] = txt
# Next, use the node structure to recursively render the story text in
# ChoiceScript:
nouns = glean_nouns(story)
node_structure = find_node_structure(story)
base_pnslots = {
"I": [0, set()],
"we": [0, set()],
"you": [0, { "the_party" }],
"he": [0, set()],
"she": [0, set()],
"it": [0, set()],
"they": [0, set()],
}
base_introduced = { "the_party" }
# Start with all root nodes on our open list:
olist = [
(n, base_pnslots, base_introduced) for n in node_templates.keys()
if len(node_structure[n]["predecessors"]) == 0
]
print("Root nodes: {}".format([n for (n, bp, bi) in olist]))
# The ready dictionary keeps track of introduction and pronoun information
# propagating between non-root nodes and has enough information to know when
# a node is ready to be rendered:
ready = {
n: { pr: None for pr in node_structure[n]["predecessors"]}
for n in node_templates.keys()
if len(node_structure[n]["predecessors"]) > 0
}
results = []
while olist:
target, pnslots, introduced = olist.pop(0)
print("Processing node: '{}'.".format(target))
# build node text:
txt, outgoing = build_node_text(
node_templates[target],
node_structure,
nouns,
pnslots,
introduced
)
results.append(txt)
# update our readiness information and propagate nodes to the open list as
# they're fully ready:
for n in [x for x in outgoing if x in ready]:
# DEBUG:
if None not in ready[n].values():
raise RuntimeError("""
Updating readiness of already-ready node '{}' from node '{}'.
Readiness is: {}\
""".format(n, target, ready[n])
)
ready[n][target] = outgoing[n]
if None not in ready[n].values():
pns, intr = merge_txt_states(list(ready[n].values()))
# TODO: Get rid of pnslots merging altogether?
#olist.append((n, pns, intr))
olist.append((n, base_pnslots, intr))
return ("\n\n*comment " + '-'*72 + "\n\n").join(results)
| |
# -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: utils for ngram
"""
def _unigrams(words):
"""
Input: a list of words, e.g., ["I", "am", "Denny"]
Output: a list of unigram
"""
assert type(words) == list
return words
def _bigrams(words, join_string, skip=0):
"""
Input: a list of words, e.g., ["I", "am", "Denny"]
Output: a list of bigram, e.g., ["I_am", "am_Denny"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 1:
lst = []
for i in range(L-1):
for k in range(1,skip+2):
if i+k < L:
lst.append( join_string.join([words[i], words[i+k]]) )
else:
# set it as unigram
lst = _unigrams(words)
return lst
def _trigrams(words, join_string, skip=0):
"""
Input: a list of words, e.g., ["I", "am", "Denny"]
Output: a list of trigram, e.g., ["I_am_Denny"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 2:
lst = []
for i in range(L-2):
for k1 in range(1,skip+2):
for k2 in range(1,skip+2):
if i+k1 < L and i+k1+k2 < L:
lst.append( join_string.join([words[i], words[i+k1], words[i+k1+k2]]) )
else:
# set it as bigram
lst = _bigrams(words, join_string, skip)
return lst
def _fourgrams(words, join_string):
"""
Input: a list of words, e.g., ["I", "am", "Denny", "boy"]
Output: a list of trigram, e.g., ["I_am_Denny_boy"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 3:
lst = []
for i in xrange(L-3):
lst.append( join_string.join([words[i], words[i+1], words[i+2], words[i+3]]) )
else:
# set it as trigram
lst = _trigrams(words, join_string)
return lst
def _uniterms(words):
return _unigrams(words)
def _biterms(words, join_string):
"""
Input: a list of words, e.g., ["I", "am", "Denny", "boy"]
Output: a list of biterm, e.g., ["I_am", "I_Denny", "I_boy", "am_Denny", "am_boy", "Denny_boy"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 1:
lst = []
for i in range(L-1):
for j in range(i+1,L):
lst.append( join_string.join([words[i], words[j]]) )
else:
# set it as uniterm
lst = _uniterms(words)
return lst
def _triterms(words, join_string):
"""
Input: a list of words, e.g., ["I", "am", "Denny", "boy"]
Output: a list of triterm, e.g., ["I_am_Denny", "I_am_boy", "I_Denny_boy", "am_Denny_boy"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 2:
lst = []
for i in xrange(L-2):
for j in xrange(i+1,L-1):
for k in xrange(j+1,L):
lst.append( join_string.join([words[i], words[j], words[k]]) )
else:
# set it as biterm
lst = _biterms(words, join_string)
return lst
def _fourterms(words, join_string):
"""
Input: a list of words, e.g., ["I", "am", "Denny", "boy", "ha"]
Output: a list of fourterm, e.g., ["I_am_Denny_boy", "I_am_Denny_ha", "I_am_boy_ha", "I_Denny_boy_ha", "am_Denny_boy_ha"]
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 3:
lst = []
for i in xrange(L-3):
for j in xrange(i+1,L-2):
for k in xrange(j+1,L-1):
for l in xrange(k+1,L):
lst.append( join_string.join([words[i], words[j], words[k], words[l]]) )
else:
# set it as triterm
lst = _triterms(words, join_string)
return lst
_ngram_str_map = {
1: "Unigram",
2: "Bigram",
3: "Trigram",
4: "Fourgram",
5: "Fivegram",
12: "UBgram",
123: "UBTgram",
}
def _ngrams(words, ngram, join_string=" "):
"""wrapper for ngram"""
if ngram == 1:
return _unigrams(words)
elif ngram == 2:
return _bigrams(words, join_string)
elif ngram == 3:
return _trigrams(words, join_string)
elif ngram == 4:
return _fourgrams(words, join_string)
elif ngram == 12:
unigram = _unigrams(words)
bigram = [x for x in _bigrams(words, join_string) if len(x.split(join_string)) == 2]
return unigram + bigram
elif ngram == 123:
unigram = _unigrams(words)
bigram = [x for x in _bigrams(words, join_string) if len(x.split(join_string)) == 2]
trigram = [x for x in _trigrams(words, join_string) if len(x.split(join_string)) == 3]
return unigram + bigram + trigram
_nterm_str_map = {
1: "Uniterm",
2: "Biterm",
3: "Triterm",
4: "Fourterm",
5: "Fiveterm",
}
def _nterms(words, nterm, join_string=" "):
"""wrapper for nterm"""
if nterm == 1:
return _uniterms(words)
elif nterm == 2:
return _biterms(words, join_string)
elif nterm == 3:
return _triterms(words, join_string)
elif nterm == 4:
return _fourterms(words, join_string)
if __name__ == "__main__":
text = "I am Denny boy ha"
words = text.split(" ")
assert _ngrams(words, 1) == ["I", "am", "Denny", "boy", "ha"]
assert _ngrams(words, 2) == ["I am", "am Denny", "Denny boy", "boy ha"]
assert _ngrams(words, 3) == ["I am Denny", "am Denny boy", "Denny boy ha"]
assert _ngrams(words, 4) == ["I am Denny boy", "am Denny boy ha"]
assert _nterms(words, 1) == ["I", "am", "Denny", "boy", "ha"]
assert _nterms(words, 2) == ["I am", "I Denny", "I boy", "I ha", "am Denny", "am boy", "am ha", "Denny boy", "Denny ha", "boy ha"]
assert _nterms(words, 3) == ["I am Denny", "I am boy", "I am ha", "I Denny boy", "I Denny ha", "I boy ha", "am Denny boy", "am Denny ha", "am boy ha", "Denny boy ha"]
assert _nterms(words, 4) == ["I am Denny boy", "I am Denny ha", "I am boy ha", "I Denny boy ha", "am Denny boy ha"]
| |
# extdiff.py - external diff program support for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
'''
The `extdiff' Mercurial extension allows you to use external programs
to compare revisions, or revision with working dir. The external diff
programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
To enable this extension:
[extensions]
hgext.extdiff =
The `extdiff' extension also allows to configure new diff commands, so
you do not need to type "hg extdiff -p kdiff3" always.
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
#(see http://www.vim.org/scripts/script.php?script_id=102)
# Non english user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
You can use -I/-X and list of file or directory names like normal
"hg diff" command. The `extdiff' extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short
from mercurial import cmdutil, util, commands
import os, shlex, shutil, tempfile
def snapshot_node(ui, repo, files, node, tmproot):
'''snapshot files as of some revision'''
mf = repo.changectx(node).manifest()
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
for fn in files:
if not fn in mf:
# skipping new file after a merge ?
continue
wfn = util.pconvert(fn)
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
data = repo.wwritedata(wfn, repo.file(wfn).read(mf[wfn]))
open(dest, 'wb').write(data)
return dirname
def snapshot_wdir(ui, repo, files, tmproot):
'''snapshot files from working directory.
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
repo_root = repo.root
dirname = os.path.basename(repo_root)
if dirname == "":
dirname = "root"
base = os.path.join(tmproot, dirname)
os.mkdir(base)
ui.note(_('making snapshot of %d files from working dir\n') %
(len(files)))
fns_and_mtime = []
for fn in files:
wfn = util.pconvert(fn)
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
fp = open(dest, 'wb')
for chunk in util.filechunkiter(repo.wopener(wfn)):
fp.write(chunk)
fp.close()
fns_and_mtime.append((dest, os.path.join(repo_root, fn),
os.path.getmtime(dest)))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
node1, node2 = cmdutil.revpair(repo, opts['rev'])
files, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
modified, added, removed, deleted, unknown = repo.status(
node1, node2, files, match=matchfn)[:5]
if not (modified or added or removed):
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
dir2root = ''
try:
# Always make a copy of node1
dir1 = snapshot_node(ui, repo, modified + removed, node1, tmproot)
changes = len(modified) + len(removed) + len(added)
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
if node2:
dir2 = snapshot_node(ui, repo, modified + added, node2, tmproot)
elif changes > 1:
#we only actually need to get the files to copy back to the working
#dir in this case (because the other cases are: diffing 2 revisions
#or single file -- in which case the file is already directly passed
#to the diff tool).
dir2, fns_and_mtime = snapshot_wdir(ui, repo, modified + added, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
# If only one change, diff the files instead of the directories
if changes == 1 :
if len(modified):
dir1 = os.path.join(dir1, util.localpath(modified[0]))
dir2 = os.path.join(dir2root, dir2, util.localpath(modified[0]))
elif len(removed) :
dir1 = os.path.join(dir1, util.localpath(removed[0]))
dir2 = os.devnull
else:
dir1 = os.devnull
dir2 = os.path.join(dir2root, dir2, util.localpath(added[0]))
cmdline = ('%s %s %s %s' %
(util.shellquote(diffcmd), ' '.join(diffopts),
util.shellquote(dir1), util.shellquote(dir2)))
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.path.getmtime(copy_fn) != mtime:
ui.debug('File changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p option. The program
will be passed the names of two directories to compare. To pass
additional options to the program, use the -o option. These will
be passed before the names of the directories to compare.
When two revision arguments are given, then changes are
shown between those revisions. If only one revision is
specified then that revision is compared to the working
directory, and, when no revisions are specified, the
working directory files are compared to its parent.'''
program = opts['program'] or 'diff'
if opts['program']:
option = opts['option']
else:
option = opts['option'] or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '', _('comparison program to run')),
('o', 'option', [], _('pass option to comparison program')),
('r', 'rev', [], _('revision')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path: path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts, pats, opts)
mydiff.__doc__ = '''use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified
files, using the %(path)s program.
When two revision arguments are given, then changes are
shown between those revisions. If only one revision is
specified then that revision is compared to the working
directory, and, when no revisions are specified, the
working directory files are compared to its parent.''' % {
'path': util.uirepr(path),
}
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
| |
# -*- coding: UTF-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Univention Corporate Server (UCS) access module.
Provides the following functions for working with an UCS server.
- ldap_search(filter, base=None, attr=None)
Search the LDAP via Univention's LDAP wrapper (ULDAP)
- config_registry()
Return the UCR registry object
- base_dn()
Return the configured Base DN according to the UCR
- uldap()
Return a handle to the ULDAP LDAP wrapper
- umc_module_for_add(module, container_dn, superordinate=None)
Return a UMC module for creating a new object of the given type
- umc_module_for_edit(module, object_dn, superordinate=None)
Return a UMC module for editing an existing object of the given type
Any other module is not part of the "official" API and may change at any time.
"""
import re
__all__ = [
'ldap_search',
'config_registry',
'base_dn',
'uldap',
'umc_module_for_add',
'umc_module_for_edit',
]
_singletons = {}
def ldap_module():
import ldap as orig_ldap
return orig_ldap
def _singleton(name, constructor):
if name in _singletons:
return _singletons[name]
_singletons[name] = constructor()
return _singletons[name]
def config_registry():
def construct():
import univention.config_registry
ucr = univention.config_registry.ConfigRegistry()
ucr.load()
return ucr
return _singleton('config_registry', construct)
def base_dn():
return config_registry()['ldap/base']
def uldap():
"Return a configured univention uldap object"
def construct():
try:
secret_file = open('/etc/ldap.secret', 'r')
bind_dn = 'cn=admin,{}'.format(base_dn())
except IOError: # pragma: no cover
secret_file = open('/etc/machine.secret', 'r')
bind_dn = config_registry()["ldap/hostdn"]
pwd_line = secret_file.readline()
pwd = re.sub('\n', '', pwd_line)
import univention.admin.uldap
return univention.admin.uldap.access(
host=config_registry()['ldap/master'],
base=base_dn(),
binddn=bind_dn,
bindpw=pwd,
start_tls=1,
)
return _singleton('uldap', construct)
def config():
def construct():
import univention.admin.config
return univention.admin.config.config()
return _singleton('config', construct)
def init_modules():
def construct():
import univention.admin.modules
univention.admin.modules.update()
return True
return _singleton('modules_initialized', construct)
def position_base_dn():
def construct():
import univention.admin.uldap
return univention.admin.uldap.position(base_dn())
return _singleton('position_base_dn', construct)
def ldap_dn_tree_parent(dn, count=1):
dn_array = dn.split(',')
dn_array[0:count] = []
return ','.join(dn_array)
def ldap_search(filter, base=None, attr=None):
"""Replaces uldaps search and uses a generator.
!! Arguments are not the same."""
if base is None:
base = base_dn()
msgid = uldap().lo.lo.search(
base,
ldap_module().SCOPE_SUBTREE,
filterstr=filter,
attrlist=attr
)
# I used to have a try: finally: here but there seems to be a bug in python
# which swallows the KeyboardInterrupt
# The abandon now doesn't make too much sense
while True:
result_type, result_data = uldap().lo.lo.result(msgid, all=0)
if not result_data:
break
if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
break
else:
if result_type is ldap_module().RES_SEARCH_ENTRY:
for res in result_data:
yield res
uldap().lo.lo.abandon(msgid)
def module_by_name(module_name_):
"""Returns an initialized UMC module, identified by the given name.
The module is a module specification according to the udm commandline.
Example values are:
* users/user
* shares/share
* groups/group
If the module does not exist, a KeyError is raised.
The modules are cached, so they won't be re-initialized
in subsequent calls.
"""
def construct():
import univention.admin.modules
init_modules()
module = univention.admin.modules.get(module_name_)
univention.admin.modules.init(uldap(), position_base_dn(), module)
return module
return _singleton('module/%s' % module_name_, construct)
def get_umc_admin_objects():
"""Convenience accessor for getting univention.admin.objects.
This implements delayed importing, so the univention.* modules
are not loaded until this function is called.
"""
import univention.admin
return univention.admin.objects
def umc_module_for_add(module, container_dn, superordinate=None):
"""Returns an UMC module object prepared for creating a new entry.
The module is a module specification according to the udm commandline.
Example values are:
* users/user
* shares/share
* groups/group
The container_dn MUST be the dn of the container (not of the object to
be created itself!).
"""
mod = module_by_name(module)
position = position_base_dn()
position.setDn(container_dn)
# config, ldap objects from common module
obj = mod.object(config(), uldap(), position, superordinate=superordinate)
obj.open()
return obj
def umc_module_for_edit(module, object_dn, superordinate=None):
"""Returns an UMC module object prepared for editing an existing entry.
The module is a module specification according to the udm commandline.
Example values are:
* users/user
* shares/share
* groups/group
The object_dn MUST be the dn of the object itself, not the container!
"""
mod = module_by_name(module)
objects = get_umc_admin_objects()
position = position_base_dn()
position.setDn(ldap_dn_tree_parent(object_dn))
obj = objects.get(
mod,
config(),
uldap(),
position=position,
superordinate=superordinate,
dn=object_dn
)
obj.open()
return obj
def create_containers_and_parents(container_dn):
"""Create a container and if needed the parents containers"""
import univention.admin.uexceptions as uexcp
assert container_dn.startswith("cn=")
try:
parent = ldap_dn_tree_parent(container_dn)
obj = umc_module_for_add(
'container/cn',
parent
)
obj['name'] = container_dn.split(',')[0].split('=')[1]
obj['description'] = "container created by import"
except uexcp.ldapError:
create_containers_and_parents(parent)
obj = umc_module_for_add(
'container/cn',
parent
)
obj['name'] = container_dn.split(',')[0].split('=')[1]
obj['description'] = "container created by import"
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for describing the structure of a `tf.data` type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import sparse_ops
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY = {}
@six.add_metaclass(abc.ABCMeta)
class Structure(object):
"""Represents structural information, such as type and shape, about a value.
A `Structure` generalizes the `tf.Tensor.dtype` and `tf.Tensor.shape`
properties, so that we can define generic containers of objects including:
* `tf.Tensor`
* `tf.SparseTensor`
* Nested structures of the above.
TODO(b/110122868): In the future, a single `Structure` will replace the
`tf.data.Dataset.output_types`, `tf.data.Dataset.output_shapes`,
and `tf.data.Dataset.output_classes`, and similar properties and arguments in
the `tf.data.Iterator` and `Optional` classes.
"""
@abc.abstractproperty
def _flat_shapes(self):
"""A list of shapes matching the shapes of `self._to_tensor_list()`.
Returns:
A list of `tf.TensorShape` objects.
"""
raise NotImplementedError("Structure._flat_shapes")
@abc.abstractproperty
def _flat_types(self):
"""A list of types matching the types of `self._to_tensor_list()`.
Returns:
A list of `tf.DType` objects.
"""
raise NotImplementedError("Structure._flat_shapes")
@abc.abstractmethod
def is_compatible_with(self, other):
"""Returns `True` if `other` is compatible with this structure.
A structure `t` is a "subtype" of `s` if:
* `s` and `t` are instances of the same `Structure` subclass.
* The nested structures (if any) of `s` and `t` are the same, according to
`tf.contrib.framework.nest.assert_same_structure`, and each nested
structure of `t` is a "subtype" of the corresponding nested structure of
`s`.
* Any `tf.DType` components of `t` are the same as the corresponding
components in `s`.
* Any `tf.TensorShape` components of `t` are compatible with the
corresponding components in `s`, according to
`tf.TensorShape.is_compatible_with`.
Args:
other: A `Structure`.
Returns:
`True` if `other` is a subtype of this structure, otherwise `False`.
"""
raise NotImplementedError("Structure.is_compatible_with()")
@abc.abstractmethod
def _to_tensor_list(self, value):
"""Returns a flat list of `tf.Tensor` representing `value`.
This method can be used, along with `self._flat_shapes` and
`self._flat_types` to represent structured values in lower level APIs
(such as plain TensorFlow operations) that do not understand structure.
Requires: `self.is_compatible_with(Structure.from_value(value))`.
Args:
value: A value with compatible structure.
Returns:
A flat list of `tf.Tensor` representing `value`.
"""
raise NotImplementedError("Structure._to_tensor_list()")
@abc.abstractmethod
def _from_tensor_list(self, flat_value):
"""Builds a flat list of `tf.Tensor` into a value matching this structure.
Args:
flat_value: A list of `tf.Tensor` with compatible flat structure.
Returns:
A structured object matching this structure.
Raises:
ValueError: If the shapes and types of the tensors in `flat_value` are not
compatible with `self._flat_shapes` and `self._flat_types` respectively.
"""
raise NotImplementedError("Structure._from_tensor_list()")
def _from_compatible_tensor_list(self, flat_value):
"""A version of `_from_tensor_list()` that may avoid performing checks.
NOTE: This method should be used to avoid checks for performance reasons,
when the validity of `flat_value` has been validated by other means.
The shapes and types of the tensors in `flat_value` must be compatible with
`self._flat_shapes` and `self._flat_types` respectively. The behavior is
undefined if this requirement is not met.
Args:
flat_value: A list of `tf.Tensor` with compatible flat structure.
Returns:
A structured object matching this structure.
"""
return self._from_tensor_list(flat_value)
@staticmethod
def from_value(value):
"""Returns a `Structure` that represents the given `value`.
Args:
value: A potentially structured value.
Returns:
A `Structure` that is compatible with `value`.
Raises:
TypeError: If a structure cannot be built for `value`, because its type
or one of its component types is not supported.
"""
# TODO(b/110122868): Add support for custom types and Dataset to this
# method.
if isinstance(
value,
(sparse_tensor_lib.SparseTensor, sparse_tensor_lib.SparseTensorValue)):
return SparseTensorStructure.from_value(value)
elif isinstance(value, (tuple, dict)):
return NestedStructure.from_value(value)
else:
for converter_type, converter_fn in (
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY.items()):
if isinstance(value, converter_type):
return converter_fn(value)
try:
tensor = ops.convert_to_tensor(value)
except (ValueError, TypeError):
raise TypeError("Could not build a structure for %r" % value)
return TensorStructure.from_value(tensor)
@staticmethod
def _from_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object.
TODO(b/110122868): Remove this method once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if issubclass(flat_class, sparse_tensor_lib.SparseTensor):
flat_ret.append(SparseTensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(TensorStructure(flat_type, flat_shape))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to support
# all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % flat_type)
ret = nest.pack_sequence_as(output_classes, flat_ret)
if isinstance(ret, Structure):
return ret
else:
return NestedStructure(ret)
@staticmethod
def _register_custom_converter(type_object, converter_fn):
"""Registers `converter_fn` for converting values of the given type.
Args:
type_object: A Python `type` object representing the type of values
accepted by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the
type represented by `type_object`) and returns a `Structure`.
"""
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY[type_object] = converter_fn
@abc.abstractmethod
def _to_legacy_output_types(self):
raise NotImplementedError("Structure._to_legacy_output_types()")
@abc.abstractmethod
def _to_legacy_output_shapes(self):
raise NotImplementedError("Structure._to_legacy_output_shapes()")
@abc.abstractmethod
def _to_legacy_output_classes(self):
raise NotImplementedError("Structure._to_legacy_output_classes()")
# NOTE(mrry): The following classes make extensive use of non-public methods of
# their base class, so we disable the protected-access lint warning once here.
# pylint: disable=protected-access
class NestedStructure(Structure):
"""Represents a nested structure in which each leaf is a `Structure`."""
def __init__(self, nested_structure):
self._nested_structure = nested_structure
self._flat_nested_structure = nest.flatten(nested_structure)
self._flat_shapes_list = []
self._flat_types_list = []
for s in nest.flatten(nested_structure):
if not isinstance(s, Structure):
raise TypeError("nested_structure must be a (potentially nested) tuple "
"or dictionary of Structure objects.")
self._flat_shapes_list.extend(s._flat_shapes)
self._flat_types_list.extend(s._flat_types)
@property
def _flat_shapes(self):
return self._flat_shapes_list
@property
def _flat_types(self):
return self._flat_types_list
def is_compatible_with(self, other):
if not isinstance(other, NestedStructure):
return False
try:
# pylint: disable=protected-access
nest.assert_same_structure(self._nested_structure,
other._nested_structure)
except (ValueError, TypeError):
return False
return all(
substructure.is_compatible_with(other_substructure)
for substructure, other_substructure in zip(
nest.flatten(self._nested_structure),
nest.flatten(other._nested_structure)))
def _to_tensor_list(self, value):
ret = []
try:
flat_value = nest.flatten_up_to(self._nested_structure, value)
except (ValueError, TypeError):
raise ValueError("The value %r is not compatible with the nested "
"structure %r." % (value, self._nested_structure))
for sub_value, structure in zip(flat_value, self._flat_nested_structure):
if not structure.is_compatible_with(Structure.from_value(sub_value)):
raise ValueError("Component value %r is not compatible with the nested "
"structure %r." % (sub_value, structure))
ret.extend(structure._to_tensor_list(sub_value))
return ret
def _from_tensor_list(self, flat_value):
if len(flat_value) != len(self._flat_types):
raise ValueError("Expected %d flat values in NestedStructure but got %d."
% (len(self._flat_types), len(flat_value)))
flat_ret = []
for sub_value, structure in zip(flat_value, self._flat_nested_structure):
flat_ret.append(structure._from_tensor_list([sub_value]))
return nest.pack_sequence_as(self._nested_structure, flat_ret)
def _from_compatible_tensor_list(self, flat_value):
flat_ret = []
for sub_value, structure in zip(flat_value, self._flat_nested_structure):
flat_ret.append(structure._from_compatible_tensor_list([sub_value]))
return nest.pack_sequence_as(self._nested_structure, flat_ret)
@staticmethod
def from_value(value):
flat_nested_structure = [
Structure.from_value(sub_value) for sub_value in nest.flatten(value)
]
return NestedStructure(nest.pack_sequence_as(value, flat_nested_structure))
def _to_legacy_output_types(self):
return nest.map_structure(
lambda s: s._to_legacy_output_types(), self._nested_structure)
def _to_legacy_output_shapes(self):
return nest.map_structure(
lambda s: s._to_legacy_output_shapes(), self._nested_structure)
def _to_legacy_output_classes(self):
return nest.map_structure(
lambda s: s._to_legacy_output_classes(), self._nested_structure)
class TensorStructure(Structure):
"""Represents structural information about a `tf.Tensor`."""
def __init__(self, dtype, shape):
self._dtype = dtypes.as_dtype(dtype)
self._shape = tensor_shape.as_shape(shape)
@property
def _flat_shapes(self):
return [self._shape]
@property
def _flat_types(self):
return [self._dtype]
def is_compatible_with(self, other):
return (isinstance(other, TensorStructure) and
self._dtype.is_compatible_with(other._dtype) and
self._shape.is_compatible_with(other._shape))
def _to_tensor_list(self, value):
if not self.is_compatible_with(Structure.from_value(value)):
raise ValueError("Value %r is not convertible to a tensor with dtype %s "
"and shape %s." % (value, self._dtype, self._shape))
return [value]
def _from_tensor_list(self, flat_value):
if len(flat_value) != 1:
raise ValueError("TensorStructure corresponds to a single tf.Tensor.")
if not self.is_compatible_with(Structure.from_value(flat_value[0])):
raise ValueError("Cannot convert %r to a tensor with dtype %s and shape "
"%s." % (flat_value[0], self._dtype, self._shape))
return self._from_compatible_tensor_list(flat_value)
def _from_compatible_tensor_list(self, flat_value):
return flat_value[0]
@staticmethod
def from_value(value):
return TensorStructure(value.dtype, value.shape)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return ops.Tensor
class SparseTensorStructure(Structure):
"""Represents structural information about a `tf.SparseTensor`."""
def __init__(self, dtype, dense_shape):
self._dtype = dtypes.as_dtype(dtype)
self._dense_shape = tensor_shape.as_shape(dense_shape)
@property
def _flat_shapes(self):
return [tensor_shape.vector(3)]
@property
def _flat_types(self):
return [dtypes.variant]
def is_compatible_with(self, other):
return (isinstance(other, SparseTensorStructure) and
self._dtype.is_compatible_with(other._dtype) and
self._dense_shape.is_compatible_with(other._dense_shape))
def _to_tensor_list(self, value):
return [sparse_ops.serialize_sparse(value, out_type=dtypes.variant)]
def _from_tensor_list(self, flat_value):
if (len(flat_value) != 1 or flat_value[0].dtype != dtypes.variant or
not flat_value[0].shape.is_compatible_with(tensor_shape.vector(3))):
raise ValueError("SparseTensorStructure corresponds to a single "
"tf.variant vector of length 3.")
return self._from_compatible_tensor_list(flat_value)
def _from_compatible_tensor_list(self, flat_value):
return sparse_ops.deserialize_sparse(
flat_value[0], dtype=self._dtype, rank=self._dense_shape.ndims)
@staticmethod
def from_value(value):
sparse_tensor = sparse_tensor_lib.SparseTensor.from_value(value)
return SparseTensorStructure(
sparse_tensor.dtype,
tensor_util.constant_value_as_shape(sparse_tensor.dense_shape))
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._dense_shape
def _to_legacy_output_classes(self):
return sparse_tensor_lib.SparseTensor
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the RelaxedBernoulli distribution."""
# Dependency imports
import numpy as np
import scipy.special
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class RelaxedBernoulliTest(test_util.TestCase):
def testP(self):
"""Tests that parameter P is set correctly. Note that dist.p != dist.pdf."""
temperature = 1.0
p = [0.1, 0.4]
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertAllClose(p, self.evaluate(dist.probs))
def testLogits(self):
temperature = 2.0
logits = [-42., 42.]
dist = tfd.RelaxedBernoulli(temperature, logits=logits, validate_args=True)
self.assertAllClose(logits, self.evaluate(dist.logits))
self.assertAllClose(scipy.special.expit(logits),
self.evaluate(dist.probs_parameter()))
p = [0.01, 0.99, 0.42]
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertAllClose(scipy.special.logit(p),
self.evaluate(dist.logits_parameter()))
def testInvalidP(self):
temperature = 1.0
invalid_ps = [1.01, 2.]
for p in invalid_ps:
with self.assertRaisesOpError(
'Argument `probs` has components greater than 1.'):
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.evaluate(dist.probs)
invalid_ps = [-0.01, -3.]
for p in invalid_ps:
with self.assertRaisesOpError(
'Argument `probs` has components less than 0.'):
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.evaluate(dist.probs)
valid_ps = [0.0, 0.5, 1.0]
for p in valid_ps:
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertEqual(p, self.evaluate(dist.probs))
def testShapes(self):
for batch_shape in ([], [1], [2, 3, 4]):
temperature = 1.0
p = np.random.random(batch_shape).astype(np.float32)
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertAllEqual(batch_shape,
tensorshape_util.as_list(dist.batch_shape))
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([], tensorshape_util.as_list(dist.event_shape))
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
def testZeroTemperature(self):
"""If validate_args, raises error when temperature is 0."""
temperature = tf.constant(0.0)
p = tf.constant([0.1, 0.4])
with self.assertRaisesOpError('`temperature` must be positive.'):
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
sample = dist.sample(seed=test_util.test_seed())
self.evaluate(sample)
def testDtype(self):
temperature = tf.constant(1.0, dtype=tf.float32)
p = tf.constant([0.1, 0.4], dtype=tf.float32)
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertEqual(dist.dtype, tf.float32)
self.assertEqual(dist.dtype, dist.sample(
5, seed=test_util.test_seed()).dtype)
self.assertEqual(dist.probs.dtype, dist.prob([0.0]).dtype)
self.assertEqual(dist.probs.dtype, dist.log_prob([0.0]).dtype)
temperature = tf.constant(1.0, dtype=tf.float64)
p = tf.constant([0.1, 0.4], dtype=tf.float64)
dist64 = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
self.assertEqual(dist64.dtype, tf.float64)
self.assertEqual(dist64.dtype, dist64.sample(
5, seed=test_util.test_seed()).dtype)
def testLogProb(self):
t = np.array(1.0, dtype=np.float64)
p = np.array(0.1, dtype=np.float64) # P(x=1)
dist = tfd.RelaxedBernoulli(t, probs=p, validate_args=True)
xs = np.array([0.1, 0.3, 0.5, 0.9], dtype=np.float64)
# analytical density from Maddison et al. 2016
alpha = np.array(p / (1 - p), dtype=np.float64)
expected_log_pdf = (
np.log(t) + np.log(alpha) + (-t - 1) * (np.log(xs) + np.log(1 - xs)) -
2 * np.log(alpha * np.power(xs, -t) + np.power(1 - xs, -t)))
log_pdf = self.evaluate(dist.log_prob(xs))
self.assertAllClose(expected_log_pdf, log_pdf)
def testBoundaryConditions(self):
temperature = 1e-2
dist = tfd.RelaxedBernoulli(temperature, probs=1.0, validate_args=True)
self.assertAllClose(-np.inf, self.evaluate(dist.log_prob(0.0)))
self.assertAllClose(np.inf, self.evaluate(dist.log_prob(1.0)))
dist = tfd.RelaxedBernoulli(temperature, probs=0.0, validate_args=True)
self.assertAllClose(np.inf, self.evaluate(dist.log_prob(0.0)))
self.assertAllClose(-np.inf, self.evaluate(dist.log_prob(1.0)))
def testSamplesAtBoundaryNotNaN(self):
temperature = 1e-2
dist = tfd.RelaxedBernoulli(temperature, probs=1.0, validate_args=True)
self.assertFalse(np.any(np.isnan(self.evaluate(
dist.log_prob(dist.sample(10, seed=test_util.test_seed()))))))
dist = tfd.RelaxedBernoulli(temperature, probs=0.0, validate_args=True)
self.assertFalse(np.any(np.isnan(self.evaluate(
dist.log_prob(dist.sample(10, seed=test_util.test_seed()))))))
def testPdfAtBoundary(self):
dist = tfd.RelaxedBernoulli(temperature=0.1, logits=[[3., 5.], [3., 2]],
validate_args=True)
pdf_at_boundary = self.evaluate(dist.prob([0., 1.]))
log_pdf_at_boundary = self.evaluate(dist.log_prob([0., 1.]))
self.assertAllPositiveInf(pdf_at_boundary)
self.assertAllPositiveInf(log_pdf_at_boundary)
def testAssertValidSample(self):
temperature = 1e-2
p = [0.2, 0.6, 0.5]
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
with self.assertRaisesOpError('Sample must be non-negative.'):
self.evaluate(dist.log_cdf([0.3, -0.2, 0.5]))
with self.assertRaisesOpError('Sample must be less than or equal to `1`.'):
self.evaluate(dist.prob([0.3, 0.1, 1.2]))
def testSampleN(self):
"""mean of quantized samples still approximates the Bernoulli mean."""
temperature = 1e-2
p = [0.2, 0.6, 0.5]
dist = tfd.RelaxedBernoulli(temperature, probs=p, validate_args=True)
n = 10000
samples = dist.sample(n, seed=test_util.test_seed())
self.assertEqual(samples.dtype, tf.float32)
sample_values = self.evaluate(samples)
self.assertAllInRange(sample_values, 0., 1.)
frac_ones_like = np.sum(sample_values >= 0.5, axis=0) / n
self.assertAllClose(p, frac_ones_like, atol=1e-2)
def testParamTensorFromLogits(self):
x = tf.constant([-1., 0.5, 1.])
d = tfd.RelaxedBernoulli(temperature=1., logits=x, validate_args=True)
self.assertAllClose(
*self.evaluate([x, d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([tf.math.sigmoid(x),
d.probs_parameter()]),
atol=0,
rtol=1e-4)
def testParamTensorFromProbs(self):
x = tf.constant([0.1, 0.5, 0.4])
d = tfd.RelaxedBernoulli(temperature=1., probs=x, validate_args=True)
logit = lambda x: tf.math.log(x) - tf.math.log1p(-x)
self.assertAllClose(
*self.evaluate([logit(x), d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([x, d.probs_parameter()]),
atol=0, rtol=1e-4)
def testUnknownShape(self):
logits = tf.Variable(np.zeros((1, 5)), shape=tf.TensorShape((1, None)))
d = tfd.RelaxedBernoulli(0.5, logits, validate_args=True)
self.evaluate(logits.initializer)
d.sample(seed=test_util.test_seed())
if not tf.executing_eagerly():
logits = tf1.placeholder(tf.float32, shape=(1, None))
d = tfd.RelaxedBernoulli(0.5, logits=logits, validate_args=True)
d.sample(seed=test_util.test_seed())
def testCdfAgreesWithLogProb(self):
# Shape [N, 1, 1, 1]. This is to support broadcasting and the last dimension
# for evaluating against the quadrature nodes.
# Choose probs in a modest range such that the density isn't extremeely
# skewed for quadrature.
probs = tf.random.uniform(
minval=0.3,
maxval=0.7,
shape=[int(1e2)],
dtype=np.float64,
seed=test_util.test_seed())[..., tf.newaxis, tf.newaxis, tf.newaxis]
temp = np.array([0.5, 1., 2.], dtype=np.float64)[
..., np.newaxis, np.newaxis]
dist = tfd.RelaxedBernoulli(temp, probs=probs, validate_args=True)
x = np.array([0.1, 0.25, 0.5, 0.75, 0.9], dtype=np.float64)
# Do quadrature on the probability density from 0 to x.
nodes, weights = scipy.special.roots_legendre(500)
rescaled_nodes = x[..., np.newaxis] / 2. * (nodes + 1.)
expected_log_cdf = (tf.math.log(x / 2.) + tf.math.reduce_logsumexp(
np.log(weights) + dist.log_prob(rescaled_nodes), axis=-1))[
..., tf.newaxis, :]
actual_log_cdf, expected_log_cdf = self.evaluate(
[dist.log_cdf(x), expected_log_cdf])
self.assertAllClose(expected_log_cdf, actual_log_cdf, rtol=2e-2)
def testCdfNotNanNearEndpoints(self):
probs = np.linspace(0., 1., 30, dtype=np.float32)[..., None]
temp = 1e-1
dist = tfd.RelaxedBernoulli(temp, probs=probs, validate_args=True)
x = [0., 0.25, 0.5, 0.75, 1.]
cdf = self.evaluate(dist.cdf(x))
self.assertFalse(np.any(np.isnan(cdf)))
# Check that the CDF is strictly increasing for probs not in {0, 1}.
self.assertTrue(np.all(np.diff(cdf[1:-1]) >= 0.))
self.assertAllEqual([1., 1., 1., 1., 1.], cdf[0])
self.assertAllEqual([0., 0., 0., 0., 1.], cdf[-1])
@test_util.test_all_tf_execution_regimes
class RelaxedBernoulliFromVariableTest(test_util.TestCase):
@test_util.tf_tape_safety_test
def testGradientLogits(self):
x = tf.Variable([-1., 1])
self.evaluate(x.initializer)
d = tfd.RelaxedBernoulli(0.5, logits=x, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([0, 1])
g = tape.gradient(loss, d.trainable_variables)
self.assertLen(g, 1)
self.assertAllNotNone(g)
@test_util.tf_tape_safety_test
def testGradientProbs(self):
x = tf.Variable([0.1, 0.7])
self.evaluate(x.initializer)
d = tfd.RelaxedBernoulli(0.5, probs=x, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.sample(seed=test_util.test_seed())
g = tape.gradient(loss, d.trainable_variables)
self.assertLen(g, 1)
self.assertAllNotNone(g)
@test_util.tf_tape_safety_test
def testGradientTemperature(self):
x = tf.Variable([0.2, 2.])
self.evaluate(x.initializer)
d = tfd.RelaxedBernoulli(x, probs=[0.8, 0.5], validate_args=True)
with tf.GradientTape() as tape:
loss = -d.sample(seed=test_util.test_seed())
g = tape.gradient(loss, d.trainable_variables)
self.assertLen(g, 1)
self.assertAllNotNone(g)
def testAssertionsProbs(self):
x = tf.Variable([0.1, 0.7, 0.0])
d = tfd.RelaxedBernoulli(0.5, probs=x, validate_args=True)
self.evaluate(x.initializer)
self.evaluate(d.sample(seed=test_util.test_seed()))
with tf.control_dependencies([x.assign([0.1, -0.7, 0.0])]):
with self.assertRaisesOpError(
'Argument `probs` has components less than 0.'):
self.evaluate(d.sample(seed=test_util.test_seed()))
with tf.control_dependencies([x.assign([0.1, 1.7, 0.0])]):
with self.assertRaisesOpError(
'Argument `probs` has components greater than 1.'):
self.evaluate(d.sample(seed=test_util.test_seed()))
def testAssertionsTemperature(self):
x = tf.Variable(.8)
probs = [0.1, .35, 0.7]
d = tfd.RelaxedBernoulli(x, probs=probs, validate_args=True)
self.evaluate(x.initializer)
self.evaluate(d.sample(seed=test_util.test_seed()))
with tf.control_dependencies([x.assign(-1.2)]):
with self.assertRaisesOpError(
'Argument `temperature` must be positive.'):
self.evaluate(d.sample(seed=test_util.test_seed()))
def testSupportBijectorOutsideRange(self):
probs = np.array([0.45, 0.07, 0.32, 0.99])
temp = 1.
dist = tfd.RelaxedBernoulli(temp, probs=probs, validate_args=True)
eps = 1e-6
x = np.array([-2.3, -eps, 1. + eps, 1.4])
bijector_inverse_x = dist.experimental_default_event_space_bijector(
).inverse(x)
self.assertAllNan(self.evaluate(bijector_inverse_x))
if __name__ == '__main__':
test_util.main()
| |
"""
desispec.io.brick
=================
I/O routines for working with per-brick files.
See ``doc/DESI_SPECTRO_REDUX/PRODNAME/bricks/BRICKID/*-BRICKID.rst`` in desiDataModel
for a description of the relevant data models.
See :doc:`coadd` and `DESI-doc-1056 <https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=1056>`_
for general information about the coaddition dataflow and algorithms.
"""
import os
import os.path
import numpy as np
import astropy.io.fits
import desispec.io.util
class BrickBase(object):
"""Represents objects in a single brick and possibly also a single band b,r,z.
The constructor will open an existing file and create a new file and parent
directory if necessary. The :meth:`close` method must be called for any updates
or new data to be recorded. Successful completion of the constructor does not
guarantee that :meth:`close` will succeed.
Args:
path(str): Path to the brick file to open.
mode(str): File access mode to use. Should normally be 'readonly' or 'update'. Use 'update' to create a new file and its parent directory if necessary.
header: An optional header specification used to create a new file. See :func:`desispec.io.util.fitsheader` for details on allowed values.
Raises:
RuntimeError: Invalid mode requested.
IOError: Unable to open existing file in 'readonly' mode.
OSError: Unable to create a new parent directory in 'update' mode.
"""
def __init__(self,path,mode = 'readonly',header = None):
if mode not in ('readonly','update'):
raise RuntimeError('Invalid mode %r' % mode)
self.path = path
self.mode = mode
# Create a new file if necessary.
if self.mode == 'update' and not os.path.exists(self.path):
# Create the parent directory, if necessary.
head,tail = os.path.split(self.path)
if not os.path.exists(head):
os.makedirs(head)
# Create empty HDUs. It would be good to refactor io.frame to avoid any duplication here.
hdr = desispec.io.util.fitsheader(header)
hdr['EXTNAME'] = ('FLUX', 'no dimension')
hdu0 = astropy.io.fits.PrimaryHDU(header = hdr)
hdr['EXTNAME'] = ('IVAR', 'no dimension')
hdu1 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
hdu2 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('RESOLUTION', 'no dimension')
hdu3 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('FIBERMAP', 'no dimension')
# Create an HDU4 using the columns from fibermap with a few extras added.
columns = desispec.io.fibermap.fibermap_columns[:]
columns.extend([
('NIGHT','i4'),
('EXPID','i4'),
('INDEX','i4'),
])
data = np.empty(shape = (0,),dtype = columns)
hdr = desispec.io.util.fitsheader(header)
hdu4 = astropy.io.fits.BinTableHDU(data = data,header = hdr)
# Add comments for fibermap columns.
num_fibermap_columns = len(desispec.io.fibermap.fibermap_comments)
for i in range(1,1+num_fibermap_columns):
key = 'TTYPE%d' % i
name = hdu4.header[key]
comment = desispec.io.fibermap.fibermap_comments[name]
hdu4.header[key] = (name,comment)
# Add comments for our additional columns.
hdu4.header['TTYPE%d' % (1+num_fibermap_columns)] = ('NIGHT','Night of exposure YYYYMMDD')
hdu4.header['TTYPE%d' % (2+num_fibermap_columns)] = ('EXPID','Exposure ID')
hdu4.header['TTYPE%d' % (3+num_fibermap_columns)] = ('INDEX','Index of this object in other HDUs')
self.hdu_list = astropy.io.fits.HDUList([hdu0,hdu1,hdu2,hdu3,hdu4])
else:
self.hdu_list = astropy.io.fits.open(path,mode = self.mode)
def add_objects(self,flux,ivar,wave,resolution):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
Raises:
RuntimeError: Can only add objects in update mode.
"""
if self.mode != 'update':
raise RuntimeError('Can only add objects in update mode.')
# Concatenate the new per-object image HDU data or use it to initialize the HDU.
# HDU2 contains the wavelength grid shared by all objects so we only add it once.
if self.hdu_list[0].data is not None:
self.hdu_list[0].data = np.concatenate((self.hdu_list[0].data,flux,))
self.hdu_list[1].data = np.concatenate((self.hdu_list[1].data,ivar,))
assert np.array_equal(self.hdu_list[2].data,wave),'Wavelength arrays do not match.'
self.hdu_list[3].data = np.concatenate((self.hdu_list[3].data,resolution,))
else:
self.hdu_list[0].data = flux
self.hdu_list[1].data = ivar
self.hdu_list[2].data = wave
self.hdu_list[3].data = resolution
def get_wavelength_grid(self):
"""Return the wavelength grid used in this brick file.
"""
return self.hdu_list[2].data
def get_target(self,target_id):
"""Get the spectra and info for one target ID.
Args:
target_id(int): Target ID number to lookup.
Returns:
tuple: Tuple of numpy arrays (flux,ivar,resolution,info) of data associated
with this target ID. The flux,ivar,resolution arrays will have one entry
for each spectrum and the info array will have one entry per exposure.
The returned arrays are slices into the FITS file HDU data arrays, so this
call is relatively cheap (and any changes will be saved to the file if it
was opened in update mode.)
"""
exposures = (self.hdu_list[4].data['TARGETID'] == target_id)
index_list = np.unique(self.hdu_list[4].data['INDEX'][exposures])
return (self.hdu_list[0].data[index_list],self.hdu_list[1].data[index_list],
self.hdu_list[3].data[index_list],self.hdu_list[4].data[exposures])
def get_target_ids(self):
"""Return set of unique target IDs in this brick.
"""
return list(set(self.hdu_list[4].data['TARGETID']))
def get_num_spectra(self):
"""Get the number of spectra contained in this brick file.
Returns:
int: Number of objects contained in this brick file.
"""
return len(self.hdu_list[0].data)
def get_num_targets(self):
"""Get the number of distinct targets with at least one spectrum in this brick file.
Returns:
int: Number of unique targets represented with spectra in this brick file.
"""
return len(np.unique(self.hdu_list[4].data['TARGETID']))
def close(self):
"""Write any updates and close the brick file.
"""
if self.mode == 'update':
self.hdu_list.writeto(self.path,clobber = True)
self.hdu_list.close()
class Brick(BrickBase):
"""Represents the combined cframe exposures in a single brick and band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
def add_objects(self,flux,ivar,wave,resolution,object_data,night,expid):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
object_data(numpy.ndarray): Record array of fibermap rows for the objects to add.
night(str): Date string for the night these objects were observed in the format YYYYMMDD.
expid(int): Exposure number for these objects.
Raises:
RuntimeError: Can only add objects in update mode.
"""
BrickBase.add_objects(self,flux,ivar,wave,resolution)
# Augment object_data with constant NIGHT and EXPID columns.
augmented_data = np.empty(shape = object_data.shape,dtype = self.hdu_list[4].data.dtype)
for column_def in desispec.io.fibermap.fibermap_columns:
name = column_def[0]
# Special handling for the fibermap FILTER array, which is not output correctly
# by astropy.io.fits so we convert it to a comma-separated list.
if name == 'FILTER' and augmented_data[name].shape != object_data[name].shape:
for i,filters in enumerate(object_data[name]):
augmented_data[name][i] = ','.join(filters)
else:
augmented_data[name] = object_data[name]
augmented_data['NIGHT'] = int(night)
augmented_data['EXPID'] = expid
begin_index = len(self.hdu_list[4].data)
end_index = begin_index + len(flux)
augmented_data['INDEX'] = np.arange(begin_index,end_index,dtype=int)
# Always concatenate to our table since a new file will be created with a zero-length table.
self.hdu_list[4].data = np.concatenate((self.hdu_list[4].data,augmented_data,))
class CoAddedBrick(BrickBase):
"""Represents the co-added exposures in a single brick and, possibly, a single band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
| |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss functions used to train the FILM interpolation model.
The losses for training and test loops are configurable via gin. Training can
use more than one loss function. Test loop can also evaluate one ore more loss
functions, each of which can be summarized separately.
"""
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from . import vgg19_loss as vgg19
import gin.tf
import numpy as np
import tensorflow as tf
@gin.configurable('vgg', denylist=['example', 'prediction'])
def vgg_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor],
vgg_model_file: str,
weights: Optional[List[float]] = None) -> tf.Tensor:
"""Perceptual loss for images in [0,1] color range.
Args:
example: A dictionary with the ground truth image as 'y'.
prediction: The prediction dictionary with the image as 'image'.
vgg_model_file: The path containing the vgg19 weights in MATLAB format.
weights: An optional array of weights for different VGG layers. If None, the
default weights are used (see vgg19.vgg_loss documentation).
Returns:
The perceptual loss.
"""
return vgg19.vgg_loss(prediction['image'], example['y'], vgg_model_file,
weights)
@gin.configurable('style', denylist=['example', 'prediction'])
def style_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor],
vgg_model_file: str,
weights: Optional[List[float]] = None) -> tf.Tensor:
"""Computes style loss from images in [0..1] color range.
Args:
example: A dictionary with the ground truth image as 'y'.
prediction: The prediction dictionary with the image as 'image'.
vgg_model_file: The path containing the vgg19 weights in MATLAB format.
weights: An optional array of weights for different VGG layers. If None, the
default weights are used (see vgg19.vgg_loss documentation).
Returns:
A tf.Tensor of a scalar representing the style loss computed over multiple
vgg layer features.
"""
return vgg19.style_loss(prediction['image'], example['y'], vgg_model_file,
weights)
def l1_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor]) -> tf.Tensor:
return tf.reduce_mean(tf.abs(prediction['image'] - example['y']))
def l1_warped_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor]) -> tf.Tensor:
"""Computes an l1 loss using only warped images.
Args:
example: A dictionary with the ground truth image as 'y'.
prediction: The prediction dictionary with the image(s) as 'x0_warped'
and/or 'x1_warped'.
Returns:
A tf.Tensor of a scalar representing the linear combination of l1 losses
between prediction images and y.
"""
loss = tf.constant(0.0, dtype=tf.float32)
if 'x0_warped' in prediction:
loss += tf.reduce_mean(tf.abs(prediction['x0_warped'] - example['y']))
if 'x1_warped' in prediction:
loss += tf.reduce_mean(tf.abs(prediction['x1_warped'] - example['y']))
return loss
def l2_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor]) -> tf.Tensor:
return tf.reduce_mean(tf.square(prediction['image'] - example['y']))
def ssim_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor]) -> tf.Tensor:
image = prediction['image']
y = example['y']
return tf.reduce_mean(tf.image.ssim(image, y, max_val=1.0))
def psnr_loss(example: Mapping[str, tf.Tensor],
prediction: Mapping[str, tf.Tensor]) -> tf.Tensor:
return tf.reduce_mean(
tf.image.psnr(prediction['image'], example['y'], max_val=1.0))
def get_loss(loss_name: str) -> Callable[[Any, Any], tf.Tensor]:
"""Returns the loss function corresponding to the given name."""
if loss_name == 'l1':
return l1_loss
elif loss_name == 'l2':
return l2_loss
elif loss_name == 'ssim':
return ssim_loss
elif loss_name == 'vgg':
return vgg_loss
elif loss_name == 'style':
return style_loss
elif loss_name == 'psnr':
return psnr_loss
elif loss_name == 'l1_warped':
return l1_warped_loss
else:
raise ValueError('Invalid loss function %s' % loss_name)
# pylint: disable=unnecessary-lambda
def get_loss_op(loss_name):
"""Returns a function for creating a loss calculation op."""
loss = get_loss(loss_name)
return lambda example, prediction: loss(example, prediction)
def get_weight_op(weight_schedule):
"""Returns a function for creating an iteration dependent loss weight op."""
return lambda iterations: weight_schedule(iterations)
def create_losses(
loss_names: List[str], loss_weight_schedules: List[
tf.keras.optimizers.schedules.LearningRateSchedule]
) -> Dict[str, Tuple[Callable[[Any, Any], tf.Tensor], Callable[[Any],
tf.Tensor]]]:
"""Returns a dictionary of functions for creating loss and loss_weight ops.
As an example, create_losses(['l1', 'l2'], [PiecewiseConstantDecay(),
PiecewiseConstantDecay()]) returns a dictionary with two keys, and each value
being a tuple of ops for loss calculation and loss_weight sampling.
Args:
loss_names: Names of the losses.
loss_weight_schedules: Instances of loss weight schedules.
Returns:
A dictionary that contains the loss and weight schedule ops keyed by the
names.
"""
losses = dict()
for name, weight_schedule in zip(loss_names, loss_weight_schedules):
unique_values = np.unique(weight_schedule.values)
if len(unique_values) == 1 and unique_values[0] == 1.0:
# Special case 'no weight' for prettier TensorBoard summaries.
weighted_name = name
else:
# Weights are variable/scheduled, a constant "k" is used to
# indicate weights are iteration dependent.
weighted_name = 'k*' + name
losses[weighted_name] = (get_loss_op(name), get_weight_op(weight_schedule))
return losses
@gin.configurable
def training_losses(
loss_names: List[str],
loss_weights: Optional[List[float]] = None,
loss_weight_schedules: Optional[List[
tf.keras.optimizers.schedules.LearningRateSchedule]] = None,
loss_weight_parameters: Optional[List[Mapping[str, List[Any]]]] = None
) -> Mapping[str, Tuple[Callable[[Any, Any], tf.Tensor], Callable[[Any],
tf.Tensor]]]:
"""Creates the training loss functions and loss weight schedules."""
weight_schedules = []
if not loss_weights:
for weight_schedule, weight_parameters in zip(loss_weight_schedules,
loss_weight_parameters):
weight_schedules.append(weight_schedule(**weight_parameters))
else:
for loss_weight in loss_weights:
weight_parameters = {
'boundaries': [0],
'values': 2 * [
loss_weight,
]
}
weight_schedules.append(
tf.keras.optimizers.schedules.PiecewiseConstantDecay(
**weight_parameters))
return create_losses(loss_names, weight_schedules)
@gin.configurable
def test_losses(
loss_names: List[str],
loss_weights: Optional[List[float]] = None,
loss_weight_schedules: Optional[List[
tf.keras.optimizers.schedules.LearningRateSchedule]] = None,
loss_weight_parameters: Optional[List[Mapping[str, List[Any]]]] = None
) -> Mapping[str, Tuple[Callable[[Any, Any], tf.Tensor], Callable[[Any],
tf.Tensor]]]:
"""Creates the test loss functions and loss weight schedules."""
weight_schedules = []
if not loss_weights:
for weight_schedule, weight_parameters in zip(loss_weight_schedules,
loss_weight_parameters):
weight_schedules.append(weight_schedule(**weight_parameters))
else:
for loss_weight in loss_weights:
weight_parameters = {
'boundaries': [0],
'values': 2 * [
loss_weight,
]
}
weight_schedules.append(
tf.keras.optimizers.schedules.PiecewiseConstantDecay(
**weight_parameters))
return create_losses(loss_names, weight_schedules)
def aggregate_batch_losses(
batch_losses: List[Mapping[str, float]]) -> Mapping[str, float]:
"""Averages per batch losses into single dictionary for the whole epoch.
As an example, if the batch_losses contained per batch losses:
batch_losses = { {'l1': 0.2, 'ssim': 0.9}, {'l1': 0.3, 'ssim': 0.8}}
The returned dictionary would look like: { 'l1': 0.25, 'ssim': 0.95 }
Args:
batch_losses: A list of dictionary objects, with one entry for each loss.
Returns:
Single dictionary with the losses aggregated.
"""
transp_losses = {}
# Loop through all losses
for batch_loss in batch_losses:
# Loop through per batch losses of a single type:
for loss_name, loss in batch_loss.items():
if loss_name not in transp_losses:
transp_losses[loss_name] = []
transp_losses[loss_name].append(loss)
aggregate_losses = {}
for loss_name in transp_losses:
aggregate_losses[loss_name] = np.mean(transp_losses[loss_name])
return aggregate_losses
| |
#!/tools/net/bin/python
# Copyright (c) 1994, 1996, Tony J. Ibbs All rights reserved.
# Copyright (c) 2004, Derek Chen-Becker All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of py-iso8211 nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# $Id: format.py,v 1.6 2004/04/05 17:44:57 d-rock Exp $
"""ISO 8211 format controls - interpretation thereof."""
Changes = """Modifications:
1996/04/12 (or thereabouts) - work started on proper format control parsing.
later: Still under construction.
"""
Version = "0.1 under construction"
import sys
import os
import array
import string
from math import ceil
import misc
# I *like* TRUE and FALSE - so define them!
TRUE = 1
FALSE = 0
# Exceptions
iso8211_format_error = "ISO 8211 format controls"
# ----------------------------------------------------------------------
class Format(object):
"""ISO 8211 format controls.
Initialisation arguments:
None
A format control object contains:
octets the octet string
extended_chars TRUE if character subfields (A type) contain
extended character sets, escapes or whatever
(it is assumed that the user will set this by
hand in the first instance, since I don't expect
it to be used initially)
controls a list of Repeat and Control objects
flatlist a flattened version of "controls", containing
only Control objects (that is, with all explicit
repeats iterated out)
count the number of explicit format controls in this
object (this is identical to "len(flatlist)",
but is provided for convenience)
repeat_from the index of the entry in "flatlist" at which
repetition would start (or None if none)
repeat the slice of "flatlist" which is repeated
(this is flatlist[repeat_from:])
current_item the current Control object
current_index the current Control object's index
unit_size the unit size of a field, 0 indicates that a
field is delimited or has a single value
Usage:
1) Create a new Format object:
fmt = format.Format()
2) Parse a format control string using this object:
fmt.parse("(I(1),(2A(%),R(3)))")
Note that it is possible to parse a new string using the
same object - the results of the new parse will simply
overwrite those of the old one.
3) It is possible to iterate over the format controls:
for item in fmt:
print "item %s"%item
Note that this will never terminate, since format controls
repeat - in other words, "current_index" can become greater
than "count". If you want to go over each format item once,
do:
for which in range(fmt.count):
print "item %s"%fmt.item(which)
"""
def __init__(self):
# We start off `empty'
self.unset()
# We assume that we are simply dealing with standard ASCII
self.extended_chars = FALSE
def __del__(self):
pass
def __repr__(self):
return "Format: %s"%(`self.octets`)
def __getitem__(self,which):
"""Used in iteration - get the n'th format item.
Returns Control objects in order, repeating as necessary.
Note that "which" is 0 upwards, and that we will (in fact)
continue iterating forever...
"""
# Simply return the result of "item"
return self.item(which)
def unset(self):
"""Unset the format - as if the object had just been created.
(except that the "extended_chars" value is not touched)
"""
# Start with an empty list of format controls
self.octets = ""
self.controls = []
self.flatlist = []
self.repeat_from = None
self.repeat = []
self.current_item = None
self.current_index = -1 # i.e., the first item will be the next
self.count = 0
self.unit_size = 0
def _start_parse(self):
"""Perform processing required before a format is defined."""
self.unset()
def start_format(self):
"""Perform processing required before a format is defined `by hand'.
Call this before any calls to "add", "start_repeat" and "end_repeat"
are made. Don't forget to call "end_format" when finished...
"""
self._start_parse()
def add(self,control):
"""Add a format control to the current format.
The CONTROL should be a format control, such as A(3) or 2I
"""
# Ensure we have the initial opening parentheses, or else our comma separator
if self.octets == "":
self.octets = "("
else:
self.octets = self.octets + ","
# Remember the control
self.octets = self.octets + control
def start_repeat(self,count=1):
"""Start a repeat sequence, repetition count COUNT (default 1)."""
# Ensure we have the initial opening parentheses, or else our comma separator
if self.octets == "":
self.octets = "("
else:
self.octets = self.octets + ","
# Remember the repeat
if count != 1:
self.octets = self.octets + `count`
self.octets = self.octets + "("
def end_repeat(self):
"""End a repeat sequence."""
self.octets = self.octets + ")"
def end_format(self):
"""Perform processing required when a format has been defined `by hand'.
Call this after all the appropriate calls to "add", "start_repeat"
and "end_repeat" have been made. No more such calls should be made
after calling this.
"""
self.octets = self.octets + ")"
self.parse(self.octets)
def _end_parse(self):
"""Perform processing required when a format has been defined."""
# Flatten the resulting format
self._flatten()
# Which gives us the format length
self.count = len(self.flatlist)
# And enables us to work out the repeat slice
if self.repeat_from != None:
self.repeat = self.flatlist[self.repeat_from:]
else:
self.repeat = []
# Look for a REPEAT TO END
self._look_for_REPEAT_TO_END()
def parse(self,octets):
"""Parse the ISO 8211 format control string OCTETS."""
# Start the format off
self._start_parse()
# Remember the format string
self.octets = octets
#print "parsing %s" % misc.printable(octets)
# And `decode' the format string
# We use a `FormatParser' object to do the work for us
parser = FormatParser(self)
self.controls = parser.parse(octets)
self.unit_size = 0
for cnt in self.controls:
#cnt.show()
self.unit_size += cnt.byte_width
#print "Byte width: %s" % cnt.byte_width
# print " total size = %d" % self.unit_size
# Finish the formatting
self._end_parse()
def _look_for_REPEAT_TO_END(self):
"""Check for a last repeat clause to mark for writing out as REPEAT TO END.
If the very last item in the controls list is a REPEAT 1, then we can
mark it as a REPEAT TO END. We do this (rather nastily) by negating
its repeat count - i.e., setting it to -1.
"""
# Find the last clause in the control list
last = self.controls[-1]
# If the top level is not a REPEAT, then we don't have a REPEAT TO END situation
if last.is_control:
return
# Otherwise, we need to look inside this REPEAT to see if IT ends
# in a REPEAT, and so on...
#print "Looking for REPEAT TO END in ",`self.controls`
#print "Last item of list is ",`last`
while not last.is_control:
# Hah - it was a repeat clause as well
# Extract the last item from ITS data
last_item = last.clause[-1]
if not last_item.is_control:
last = last_item
else:
break
#print "Last item of list is ",`last`
# So that leaves us with "last" as the last clause
#print "Which leaves us with last item",`last`
# And if the repeat is 1, tell it that it is a REPEAT TO END
if last.repeat == 1:
last.repeat_to_end = TRUE
#print "Leaving last item as ",`last`
def item(self,which):
"""Return the Control object with index "which".
Note that "which" is 0 upwards, and that it may be greater
than the total number of controls in the format control string
(in which case format repetition will be used to determine which
control should be returned, and an IndexError exception will
be raised if repetition is not enabled for this format control
string).
"""
if which < 0:
raise IndexError,"Index should be 0 or more, not %d"%which
elif which < len(self.flatlist):
self.current_index = which
self.current_item = self.flatlist[which]
else:
# OK - we're into repeat territory
if self.repeat_from == None:
raise IndexError,"Format `%s' does not repeat"%self.octets
# Work out our position in the repeat list...
posn = which - len(self.flatlist)
posn = posn % len(self.repeat)
self.current_item = self.repeat[posn]
self.current_index = which
return self.current_item
def next_item(self):
"""Return the next Control object.
This works out what the next Control should be, and
expands out repetitions, etc, as necessary.
"""
return self.item(self.current_index + 1)
def rewind(self):
"""`Rewind' the format controls.
After calling this, "next_item()" will return the first
Control object again.
"""
self.current_item = None
self.current_index = -1
def _flatten_item(self,item):
"""Flatten a given item from the format control list into "flatlist"."""
# Do what seems indicated by its type
if item.is_control:
# It's a format control - simply iterate out the repeat
for count in range(item.repeat):
self.flatlist.append(item.control)
else:
# It's a repeat clause
# Note at what index in the flatlist the first entry
# for the repeat clause will be inserted
self.repeat_from = len(self.flatlist)
# And flatten out the clause the appropriate number of times
for count in range(item.repeat):
self._flatten_list(item.clause)
def _flatten_list(self,list):
"""Add a flattened format control list to "flatlist"."""
for item in list:
self._flatten_item(item)
def _flatten(self):
"""Flatten the "controls" list into the "flatlist"."""
self.flatlist = []
self.repeat_from = 0 # A reasonable guess
self._flatten_list(self.controls)
def _write_nesting(self,dfd,nesting,indent):
"""Write a number of spaces according to the NESTING*INDENT."""
# Actually, we can just do that
dfd.write(nesting*indent*" ")
def _write_item(self,dfd,item,nesting,indent):
"""Write out a representation of the given parsed item."""
# Do what seems indicated by its type
if item.is_control:
# It's a format control
self._write_nesting(dfd,nesting,indent)
if item.repeat != 1:
dfd.write("%2d "%(item.repeat))
else:
dfd.write(" ")
dfd.write("%s\n"%(`item.control`))
else:
# It's a repeat clause
self._write_nesting(dfd,nesting,indent)
# It is not *quite* explicit in B.2 that omitting a repeat
# count of 1 is legitimate in this circumstance, but there
# are examples in B.3 of such practice, and I think it looks
# neater..
if item.repeat_to_end:
dfd.write("REPEAT TO END\n")
elif item.repeat == 1:
dfd.write("REPEAT\n")
else:
dfd.write("REPEAT %d\n"%item.repeat)
self._write_list(dfd,item.clause,nesting+1,indent)
self._write_nesting(dfd,nesting,indent)
dfd.write("END REPEAT\n")
def _write_list(self,dfd,list,nesting,indent):
"""Write out a representation of a format control list."""
for item in list:
self._write_item(dfd,item,nesting,indent)
def write_DFD(self,dfd,indent=3):
"""Write out the appropriate DFD data for these format controls.
DFD is the file to write to
INDENT is the number of spaces to indent by - this is multiplied
by the `REPEAT' depth (that is, inside the first REPEAT
clause, 2*INDENT is used, etc.). It defaults to 3.
"""
self._write_list(dfd,self.controls,1,indent)
def show(self):
print "Format: %s"%(padding,self.octets)
# ----------------------------------------------------------------------
class FormatParser:
"""Used to parse a format string.
WARNING - No checking that we haven't gone off the end of the string.
WARNING - Various places need to check for IndexError
Initialisation arguments:
format the Format object we are parsing for
(this is needed so we can get at its
format building functions)
A FormatParser object contains:
octets the octet string we are to parse
extended_chars TRUE if we are allowing extended character sets
in character (A type) subfields
next which octet in "octets" we are looking at next
"""
def __init__(self,format):
self.format = format
self.extended_chars = format.extended_chars
self.octets = None # no format string to parse yet
self.next = 0 # next character we're looking at
def __del__(self):
# Remove any depencies, just in case
self.format = None
def _read_repeat_count(self):
"""Read and return a repeat count - no count means 1.
The current character is looked at first.
The first non-digit found is left as the current character."""
# Read the digit string
repeat = self._read_digit_string()
# And return the appropriate repeat count
if repeat == "":
return 1
else:
return int(repeat)
def _read_digit_string(self):
"""Read and return a digit string.
The current character is looked at first.
The first non-digit found is left as the current character.
"""
# Start with an empty string
digits = ""
# Keep adding characters to our digit string
# until we hit a non-digit (or the end of the string!)
while self.next < len(self.octets):
octet = self.octets[self.next]
if octet in string.digits:
digits = digits + octet
self.next = self.next + 1
else:
break
return digits
def _read_subfield_size(self,control):
"""Read and return a subfield size specification.
The current character is looked at first.
If it is an opening parenthesis then we have a subfield size to read.
If we have a subfield size to read, then it is either a subfield width,
or a subfield delimiter.
We return:
(None,None) if there is no subfield size
("W",width) if there is a subfield width
("D",delimiter) if there is a subfield delimiter
If we had an opening parenthesis then the character after the closing
parenthesis is left as the current character, otherwise the current
character is unchanged.
"""
# Do we have a subfield size specification?
if self.octets[self.next] != "(":
return (None,None) # No - return at once
else:
self.next = self.next + 1 # Yes - ignore the "("
# OK - we do have something to read
if self.extended_chars and control == "A":
# If we have extended character sets, we're not allowed
# fixed width character (A type) subfields...
width = None
delim = self._read_subfield_delim()
else:
# Otherwise, if it starts with a number then it is a width...
if self.octets[self.next] in string.digits:
width = self._read_subfield_width()
delim = None
else:
width = None
delim = self._read_subfield_delim()
# Regardless, check we have the required closing parenthesis
if self.octets[self.next] != ")":
raise iso8211_format_error,\
(FMT_EXC_CLOSEPAREN,self.octets[self.next],self.next,self.octets)
else:
self.next = self.next + 1 # we do - just ignore it
# And return what we must return
if width == None:
return ("D",delim)
else:
return ("W",width)
def _read_subfield_delim(self):
"""Read and return a subfield delimiter."""
# The current octet starts the delimiter
delim = self.octets[self.next]
self.next = self.next + 1
# If we have extended character sets, there might be more octets,
# but otherwise there can't be...
if self.extended_chars:
# The following is not accurate enough, but will do for
# testing stuff at the moment, I guess...
while self.next < len(self.octets):
octet = self.octets[self.next]
if octet != ")":
delim = delim + octet
self.next = self.next + 1
else:
break
return delim
def _read_subfield_width(self):
"""Read a subfield width.
The current character is looked at first.
The first non-digit found is left as the current character.
"""
# Read the digit string
width = self._read_digit_string()
# And return it as an integer
return int(width)
def _read_binary_form(self):
"""Read and return the format term and width for a binary form."""
# Which form it IS is determined by the first digit
what = self.octets[self.next]
if what not in string.digits or not (1 <= int(what) <= 5):
raise iso8211_format_error,\
(FMT_EXC_BINFORM,what,self.next,self.octets)
self.next = self.next + 1
# And its width is determined by the integer after that
width = self.octets[self.next]
self.next = self.next + 1
if width not in string.digits:
raise iso8211_format_error,\
(FMT_EXC_BINWIDTH,width,self.next,self.octets)
width = int(width)
# Check they're compatible
if ((what == "1" or what == "2") and (width != 1 and width !=2 and \
width != 3 and width != 4)) or \
((what == "3" or what == "4" or what == "5") and (width != 4 and width != 8)):
raise iso8211_format_error,\
(FMT_EXC_BININCOMPAT,what,width,self.next-2,self.octets)
# OK - so return them
return (what,width)
def _read_control(self):
"""Read a format control character(s) and return a Control object."""
# The current character should be the main one...
octet = self.octets[self.next]
self.next = self.next + 1
if octet == "A" or octet == "I" or octet == "R" or octet == "S" or \
octet == "C" or octet == "X":
# This is the simple case - we may be followed by a width
# or user delimiter
control = octet
which,size = self._read_subfield_size(octet)
elif octet == "B":
# This might be either bit string data, or it might be
# the MSOF "Btw" binary form. We decide which by looking
# at what follows it
control = octet
if self.octets[self.next] in string.digits:
# It is the MSOF binary form - read its type and size
type,size = self._read_binary_form()
# The `type' is really part of the control...
control = control + type
which = "B"
else:
# It is bit string data - treat it normally
which,size = self._read_subfield_size(octet)
if not which or not size:
print "BLA %s" % self.octets
elif octet == "b":
# This is the LSOF "btw" binary form - read its type and size
type,size = self._read_binary_form()
# The `type' is really part of the control...
control = octet + type
which = "B"
else:
raise iso8211_format_error,\
(FMT_EXC_BADCONTROL,octet,self.next-1,self.octets)
return Control(control,which,size)
def _read_clause(self):
"""Read a single clause of the format string.
A clause is defined as the stuff within a set of repeat parentheses.
A list of Repeat objects is returned.
"""
# Start with an empty list
clause = []
# Loop reading things...
while self.next < len(self.octets):
# Check for the end of this clause
# (Is an empty clause allowed? I can't remember offhand)
if self.octets[self.next] == ")":
self.next = self.next + 1 # ignore it
return clause # and return
# If we've already read an item, we expect a comma
if clause != []:
if self.octets[self.next] == ",":
self.next = self.next + 1 # ignore it
else:
raise iso8211_format_error,\
(FMT_EXC_COMMA,self.octets[self.next],self.next,self.octets)
# Then we can generally start off with a repeat count...
repeat = self._read_repeat_count()
# And we expect next either a new repeat clause, or
# a format control
if self.octets[self.next] == "(":
# Ignore the "("
self.next = self.next + 1
# Read this new clause
new_clause = self._read_clause()
# And add it to our list as a Repeat object
repeat_object = Repeat(repeat,clause=new_clause)
clause.append(repeat_object)
else:
# Read the format control into a Control object
control_object = self._read_control()
# And add it to our list as a Repeat object
repeat_object = Repeat(repeat,control=control_object)
clause.append(repeat_object)
# If we got here, then we ran out of format string, without a closing ")"
raise iso8211_format_error,(FMT_EXC_ENDCLOSE,self.octets)
def parse(self,octets):
"""Parse the format control string in OCTETS and return the parsed form.
The parsed form is returned as a list of Repeat objects.
"""
# Set ourselves up to start on this format control string
self.octets = octets # the format string to parse
self.next = 0 # the next character we're looking at
# Check that the first character is an opening parenthesis
if self.octets[self.next] != "(":
raise iso8211_format_error,(FMT_EXC_STARTOPEN,octets)
else:
self.next = self.next + 1 # if it's there, just ignore it
# Return the result of parsing the format string
return self._read_clause()
# ----------------------------------------------------------------------
class Repeat:
"""A repeat object, containing either a repeat clause or a Control object.
Initialisation arguments:
count the repeat count for this clause or Control
one of:
clause a list of Repeat objects forming a repeat clause
control a single Control object
A Repeat object contains:
repeat the repeat count for this clause or Control
one of:
clause a list of Repeat objects forming a repeat clause
control a single Control object
is_control TRUE if this Repeat contains a single Control
repeat_to_end TRUE if this Repeat contains a repeat clause which
is the last repeat clause in the format controls,
and which repeats until the end of the field - that is,
it can be written out using REPEAT TO END
byte_width The size of the contained controls/repeats, or zero if
undetermined/delimited
"""
def __init__(self,count,clause=None,control=None):
# Check we have the right number of arguments
if clause == None and control == None:
raise ValueError,"Repeat object requires either clause or control value"
if clause != None and control != None:
raise ValueError,"Repeat object requires only one of clause and control value"
# And remember them
self.repeat = count
self.clause = clause
self.control = control
self.is_control = (self.control != None)
self.repeat_to_end = FALSE
self.byte_width = self.calculate_size()
def calculate_size(self):
# print "Calulating size of ", self
if self.is_control:
# print "is_control, byte_width = ", self.repeat * self.control.byte_width
return self.repeat * self.control.byte_width
sum = 0
for subrepeat in self.clause:
# print " subclause:\n ", subrepeat, " = ", subrepeat.__class__
sum += subrepeat.byte_width
return sum
def __del__(self):
# Make some attempt to tidy up
self.clause = None
self.control = None
def __repr__(self):
if self.is_control:
return "%d %s"%(self.repeat,self.control)
elif self.repeat_to_end:
return "toend %s"%(self.clause)
else:
return "%d %s"%(self.repeat,self.clause)
def show(self):
print "Repeat: %s"%(`self`)
# ----------------------------------------------------------------------
class Control:
"""A single format control
Initialisation arguments:
control the format control character
(the format control character and its type for a binary form)
form "D" (Delimited) if it is delimited
"W" (Width) if it has an explicit width
"B" (Binary) if it is a binary form, "Btw" or "btw"
None if none of these applies
(so it is terminated by UT or FT, of course)
(with the exception of "B" - see 6.4.3.3 g))
size a delimiter if "form" is "D",
a width if "form" is "W" or "B"
and otherwise is None
byte_width the width in bytes of this control, if known (not for
delimited fields). Otherwise 0.
A Control object contains:
control, type, size as above
"""
def __init__(self,control,form,size):
#print "Init control:", control, form, size
self.control = control
self.form = form
self.size = size
if form != "D" and form != "W" and form != "B" and form != None:
raise iso8211_format_error,("Control error: form is `%s'"%form)
self.byte_width = 0
if form == "W":
self.byte_width = self.size
# special case for binary widths
if form == "B":
if self.size:
#self.byte_width = int(ceil(self.size / 8.0))
self.byte_width = self.size
else:
print "Binary control without size."
def __del__(self):
pass
def __repr__(self):
if self.form == "D":
return "%s(%s)"%(self.control,self.size)
elif self.form == "W":
return "%s(%d)"%(self.control,self.size)
elif self.form == "B":
return "%s%d"%(self.control,self.size)
elif self.form == None:
return "%s"%(self.control)
else:
return "?"
def show(self):
print "Control: %s"%(`self`)
# ----------------------------------------------------------------------
# Friendlier error handling
FMT_EXC_WRITE_1 = "Write error 1"
FMT_EXC_PRINT_1 = "Print error 1"
FMT_EXC_WRITE_2 = "Write error 2"
FMT_EXC_PRINT_2 = "Print error 2"
FMT_EXC_CLOSEPAREN = "Missing )"
FMT_EXC_BINFORM = "Bad binary form (form)"
FMT_EXC_BINWIDTH = "Bad binary form (width)"
FMT_EXC_BININCOMPAT = "Bad binary form (form and width)"
FMT_EXC_BADCONTROL = "Bad format control"
FMT_EXC_COMMA = "Missing comma"
FMT_EXC_STARTOPEN = "Missing ( at start"
FMT_EXC_ENDCLOSE = "Missing ) at end"
def explain(tuple):
"""Explain an "iso8211_format_error" exception, using the CONTEXT and TUPLE.
Normally, one would catch a random exception and do something like:
print "%s: %s"%(sys.exc_type,sys.exc_value)
to print out its name and value(s). This function can be used to
`translate' an iso8211 format error exception into something a bit
more helpful, by outputting a proper explanation for the exception.
For instance:
fmt = format.Format()
try:
fmt.parse(control_string)
except format.iso8211_format_error,details:
print "Error parsing format control string\n"
format.explain(details)
The first, short, method may be more appropriate in production systems,
where one expects (!) things to work, but this routine may be more
useful in diagnostic situations, or where the user is less au fait with
ISO/IEC 8211 itself.
"""
# There are various different format errors
# - the first item in the tuple should distinguish them
which = tuple[0]
rest = tuple[1:]
if which == FMT_EXC_WRITE_1:
print "Internal error 1 writing format controls item data\n" \
"The tuple",rest[0],"contains an unknown tuple code `%s'"%(rest[1])
elif which == FMT_EXC_PRINT_1:
print "Internal error 1 printing format controls item data\n" \
"The tuple",rest[0],"contains an unknown tuple code `%s'"%(rest[1])
elif which == FMT_EXC_WRITE_2:
print "Internal error 2 writing format controls item\n" \
"The tuple",rest[0],"contains an unknown tuple code `%s'"%(rest[1])
elif which == FMT_EXC_PRINT_2:
print "Internal error 2 printing format controls item\n" \
"The tuple",rest[0],"contains an unknown tuple code `%s'"%(rest[1])
elif which == FMT_EXC_CLOSEPAREN:
print "Missing `)' to end a subfield size specification\n" \
"A `%s' was found at offset %d in `%s',\n" \
"when a `)' was expected"%(rest)
elif which == FMT_EXC_BINFORM:
print "Unknown binary form `%s'\n"\
"The binary form given at offset %d in `%s'\n" \
"is not 1 to 5"%(rest)
elif which == FMT_EXC_BINWIDTH:
print "The width of a binary form must be a digit\n" \
"The value `%s' given at offset %d in `%s' is not"%(rest)
elif which == FMT_EXC_BININCOMPAT:
print "The binary form and width are incompatible\n" \
"The binary form %s and width %d cannot go together\n" \
"(at offset %d in `%s')"%(rest)
elif which == FMT_EXC_BADCONTROL:
print "Unrecognised format control `%s'\n" \
"The control character at offset %d in `%s'\n" \
"is not one of A,I,R,S,C,B,X or b"%(rest)
elif which == FMT_EXC_COMMA:
print "Missing comma\n" \
"Found `%s' instead of comma at offset %d in `%s'"%(rest)
elif which == FMT_EXC_STARTOPEN:
print "The format string does not start with a `('\n" \
"The first character in `%s' is not a `('"%(rest)
elif which == FMT_EXC_ENDCLOSE:
print "The format string does not end with a `)' to match the opening `('\n" \
"The last character in `%s' is not a `)'"%(rest)
else:
# Otherwise, just print out the tuple
print tuple
def test_show(format):
"""Print out various stuff for the given Format object."""
print "Format ",format.octets
print "Controls",format.controls
print "Flatlist",format.flatlist
print "Repeat from",format.repeat_from
print "Repeat is ",format.flatlist[format.repeat_from:]
print "Length is ",len(format.flatlist)
test_string_1 = "(I(1),I(2),I(3),(A(a),A(b)))"
test_string_2 = "(2(I(4),A,3A(#),R),(I(2)))"
test_string_3 = "(2(I(4),A,3A(#),R),(2(I(2)),B12))"
def test(what=None):
"""A simple test of parsing, flattening, etc.
WHAT may be either a number in the range 1..3 (in which case
a sample format control string is used), or a format control
string to `test'.
"""
if type(what) == type(1):
if what == 1:
fmt = test_string_1
elif what == 2:
fmt = test_string_2
elif what == 3:
fmt = test_string_3
else:
print "There is no test string #%d"%what
elif type(what) == type("string"):
fmt = what
else:
print "test() needs either an integer 1..3 or a string"
return
x = Format()
x.parse(fmt)
test_show(x)
print "Writing out DFD:"
x.write_DFD(sys.stdout)
print "Iterating over x:"
count = 0
for item in x:
print " Item %d is %s"%(x.current_index,item)
count = count + 1
if count > 20:
break
| |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from django_bootstrap3view.django_bootstrap3view_app.utils.render import render, render_string
from django_bootstrap3view.django_bootstrap3view_app.utils.python import convert_to_bool
class BaseService(object):
_repo = property(fget=lambda self: self.entity.objects)
_page_size = 10
default_query_params = {}
def __getattr__(self, name):
"""
Delegates automatically all undefined methods on the repository entity.
"""
def decorator(*args, **kwargs):
method = getattr(self._repo, name)
if method is None:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
if not kwargs.pop("without_filters", False):
for key, value in self.default_query_params.iteritems():
kwargs.setdefault(key, value)
return method(*args, **kwargs)
return decorator
def get_page(self, page=0, size=None, min_page=None, **kwargs):
if size is None:
size = self._page_size
page = int(page)
if min_page is not None:
min_page = int(min_page)
limit = (page + 1) * size
offset = min_page * size
else:
limit = (page + 1) * size
offset = size * page
return self._get_objects(self._get_page_query(offset, limit, **kwargs))
def _get_page_query(self, offset, limit, **kwargs):
return self.all()[offset:limit]
def list(self, start, size, **kwargs):
page = int(start / size)
return self.get_page(page=page, size=size, min_page=None, **kwargs)
def _get_objects(self, objects):
""" Override to add behaviour """
return objects
def get_one(self, *args, **kwargs):
objects = self.filter(*args, **kwargs)
return objects[0] if objects else None
def new(self, *args, **kwargs):
return self.entity(*args, **kwargs)
def _get_or_new(self, *args, **kwargs):
try:
obj, created = self.get_or_create(*args, **kwargs)
except:
obj, created = self.entity(*args, **kwargs), True
return obj, created
def get_or_new(self, *args, **kwargs):
obj, _ = self._get_or_new(*args, **kwargs)
return obj
def update_or_create(self, pre_create_function=None, pre_update_function=None, *args, **kwargs):
entity_id = kwargs.pop("id", None)
if entity_id:
if pre_update_function is not None:
pre_update_function(kwargs)
entity = self.get(id=entity_id)
for key, value in kwargs.iteritems():
setattr(entity, key, value)
else:
if pre_create_function is not None:
pre_create_function(kwargs)
entity = self.new(**kwargs)
entity.save()
return entity
def get_or_new_created(self, *args, **kwargs):
return self._get_or_new(*args, **kwargs)
def get_form(self):
return None
def _get_data(self, request, *args, **kwargs):
data = dict([(key, value) for key, value in request.POST.iteritems() if key != "csrfmiddlewaretoken"])
data.update(self._get_additional_data(request))
return data
def _get_additional_data(self, request, *args, **kwargs):
return {}
def _get_entity(self, request, *args, **kwargs):
return self.get_or_new(**self._get_data(request))
def _set_data(self, entity, request, *args, **kwargs):
data = self._get_data(request)
for key, value in data.iteritems():
setattr(entity, key, value)
return entity
def set_attrs(self, entity, attrs):
for key, value in attrs.iteritems():
setattr(entity, key, value)
def save_entity(self, entity, *args, **kwargs):
entity.save()
def save(self, request, *args, **kwargs):
entity = self._get_entity(request, *args, **kwargs)
self._set_data(entity, request, *args, **kwargs)
self.save_entity(entity, *args, **kwargs)
self._post_save(entity, request, *args, **kwargs)
return entity
def _post_save(self, entity, request, *args, **kwargs):
pass
def render(self, template, context):
return render(template, context)
def render_string(self, string, context):
return render_string(string, context)
def get_object_or_404(self, **kwargs):
return get_object_or_404(self.entity, **kwargs)
def delete(self, *args, **kwargs):
logical_delete = kwargs.pop("logical", False)
objs = self.filter(*args, **kwargs)
if not objs:
return False
for obj in objs:
if not logical_delete:
obj.delete()
else:
obj.active = False
obj.save()
return True
def get_formated_sum(self, value):
if value is None:
value = 0
return "%.2f" % value
def _render_row_value(self, row_data, render):
if isinstance(render, str):
if isinstance(row_data, dict):
return str(row_data[render])
else:
return str(getattr(row_data, render))
else:
return str(render(row_data))
def get_params(self, data, params):
dict_params = {}
for param in params:
dict_params[param] = data.get(param)
return dict_params
def convert_to_bool(self, data, params):
convert_to_bool(data, params)
def to_bool(self, param):
return bool(int(param))
def get_action_params(self, request, params_names, prefix="", bar_action=True):
complete_names = ["%s%s" % (prefix, param) for param in params_names]
params = self.get_params(request.POST, complete_names)
if bar_action:
boolean_params = ["%s%s" % (prefix, param) for param in ["is_main_action", "is_side_action"]]
self.convert_to_bool(params, boolean_params)
final_params = {}
for key, value in params.iteritems():
new_key = key.replace(prefix, "")
final_params[new_key] = value
return final_params
def check_nullables(self, data, params):
for param in params:
if not data.get(param):
data[param] = None
| |
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
def parse_input_graph(input_graph_def):
input_node_map = {}
for node in input_graph_def.node:
if node.name not in input_node_map:
input_node_map[node.name] = node
else:
print('Duplicate node name {}'.format(node.name))
return input_node_map
def get_valid_log(max_min_log):
with open(max_min_log) as f:
lines = f.readlines()
output = []
target_lines = [i.strip() for i in lines if i.strip().find(';') != -1]
for i in target_lines:
semi_count = i.count(';')
if semi_count == 2:
output.append(i)
elif semi_count % 2 != 0:
print("Invalid line")
else:
loop_times = int(semi_count / 2)
semi_index = [index for index, value in enumerate(i) if value == ";"]
for index in range(loop_times - 1):
output.append(i[semi_index[index * 2]: semi_index[index * 2 + 2]])
output.append(i[semi_index[loop_times * 2 - 2]:])
return output
def parse_requantization_ranges(max_min_log):
"""
Parse the max_min log to get requantization values
:param max_min_log: input min max log file
:return: dict saved the result
"""
print_suffix = "__print__"
post_fix = "__requant_min_max"
lines = get_valid_log(max_min_log)
res = {}
temp_min = {}
temp_max = {}
for i in lines:
if i.find(print_suffix + ";" + post_fix) == -1:
continue
max_line_data = i.split(print_suffix + ";" + post_fix)[-1]
min_value = max_line_data.split('][')[0].split('[')[1]
max_value = max_line_data.split('][')[1].split(']')[0]
name = i.split(';')[1].strip()[:-len(print_suffix)]
if name not in temp_min:
temp_min[name] = []
if name not in temp_max:
temp_max[name] = []
temp_min[name].append(float(min_value))
temp_max[name].append(float(max_value))
for key in temp_min:
target_min_index = int(round(len(temp_min[key]) * 0.05))
if target_min_index < 0:
target_min_index = 0
if key not in res:
res[key] = []
res[key].append(sorted(temp_min[key])[target_min_index])
for key in temp_max:
target_max_index = int(round(len(temp_max[key]) * 0.95))
if target_max_index > len(temp_max[key]) - 1:
target_max_index = len(temp_max[key]) - 1
res[key].append(sorted(temp_max[key])[target_max_index])
return res
def parse_max_min_log(max_min_log, fetch_max=True):
"""
Parse the max_ming log file
:param max_min_log: max_min log file
:param fetch_max: parse for freeze_max or not
:return: get the node name and value mapping
"""
print_suffix = "__print__"
if fetch_max:
postfix = "__max:"
else:
postfix = "__min:"
lines = get_valid_log(max_min_log)
res = {}
temp = {}
for i in lines:
if i.find(print_suffix + ";" + postfix) == -1:
continue
max_line_data = i.split(';')
name = max_line_data[1][:-len(print_suffix)]
value = max_line_data[-1].split('[')[-1].split(']')[0]
if "eightbit" in name and name not in temp:
temp[name] = []
if "eightbit" in name:
temp[name].append(float(value))
for key in temp:
target_index = int(len(temp[key]) * 0.95)
if target_index > len(temp[key]) - 1:
target_index = len(temp[key]) - 1
res[key] = sorted(temp[key])[target_index]
return res
def generate_output_graph_ranges(input_node_map, range_info):
output_graph_def = graph_pb2.GraphDef()
inputs_to_rename = {}
for node in input_node_map:
if node in range_info:
min_node = node_def_pb2.NodeDef()
min_node.op = "Const"
min_node.name = node + "/frozen_min"
inputs_to_rename[node + ":0"] = min_node.name + ":0"
min_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
min_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(float(range_info[node][0]), dtypes.float32, [])))
max_node = node_def_pb2.NodeDef()
max_node.op = "Const"
max_node.name = node + "/frozen_max"
inputs_to_rename[node + ":1"] = max_node.name + ":0"
max_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
max_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(float(range_info[node][1]), dtypes.float32, [])))
output_graph_def.node.extend([min_node, max_node])
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(input_node_map[node])
output_graph_def.node.extend([new_node])
for node in output_graph_def.node:
found_index = []
for input_index, input_name in enumerate(node.input):
for _, v in enumerate(inputs_to_rename):
if input_name == v:
found_index.append(input_index)
if found_index:
for sub_index in found_index:
node.input[sub_index] = inputs_to_rename[node.input[sub_index]]
return output_graph_def
def generate_output_graph(input_node_map, max_name_value, is_max=True):
"""
Generate transformed graph for freeze_max/freeze_min transformation.
:param input_node_map: input node name and nodedef mapping
:param max_name_value: target values
:param is_max: freeze_max flag
:return: transformed graph
"""
output_graph_def = graph_pb2.GraphDef()
inputs_to_rename = {}
for node in input_node_map:
if node in max_name_value:
new_node = node_def_pb2.NodeDef()
new_node.op = "Const"
new_node_postfix = "/frozen_max_only" if is_max else "/frozen_min_only"
new_node.name = node + new_node_postfix
inputs_to_rename[node] = new_node.name + ":0"
new_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
new_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(float(max_name_value[node]), dtypes.float32, [])))
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(input_node_map[node])
output_graph_def.node.extend([new_node])
for node in output_graph_def.node:
found = False
found_index = -1
found_value = ""
for input_index, input_name in enumerate(node.input):
for _, v in enumerate(inputs_to_rename):
if input_name == v:
found = True
found_index = input_index
found_value = v
break
if found:
break
if found:
post_fix = '/frozen_max_only:0' if is_max else '/frozen_min_only:0'
node.input[found_index] = found_value + post_fix
return output_graph_def
def freeze_requantization_range(input_graph_def, max_min_log):
"""
Freeze requantization range graph transformation
:param input_graph_def: input graphdef
:param max_min_log: max_min_log file
:return: transformed graph
"""
input_node_map = parse_input_graph(input_graph_def)
range_info = parse_requantization_ranges(max_min_log)
return generate_output_graph_ranges(input_node_map, range_info)
def freeze_max(input_graph_def, max_min_log):
"""
Freeze max graph transformation
:param input_graph_def: input graphdef
:param max_min_log: max_min_log
:return: transformed graph
"""
input_node_map = parse_input_graph(input_graph_def)
max_name_value = parse_max_min_log(max_min_log, True)
return generate_output_graph(input_node_map, max_name_value, True)
def freeze_min(input_graph_def, max_min_log):
"""
Freeze min graph transformation.
:param input_graph_def: input graphdef
:param max_min_log: max_min_log file
:return: transformed graph
"""
input_node_map = parse_input_graph(input_graph_def)
max_name_value = parse_max_min_log(max_min_log, False)
return generate_output_graph(input_node_map, max_name_value, False)
| |
# django imports
from django.db import IntegrityError
from django.db.models import Q
from django.db import connection
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
# permissions imports
from permissions.exceptions import Unauthorized
from permissions.models import ObjectPermission
from permissions.models import ObjectPermissionInheritanceBlock
from permissions.models import Permission
from permissions.models import PrincipalRoleRelation
from permissions.models import Role
# Roles ######################################################################
def add_role(principal, role):
"""Adds a global role to a principal.
**Parameters:**
principal
The principal (user or group) which gets the role added.
role
The role which is assigned.
"""
if isinstance(principal, User):
try:
ppr = PrincipalRoleRelation.objects.get(user=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(user=principal, role=role)
return True
else:
try:
ppr = PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(group=principal, role=role)
return True
return False
def add_local_role(obj, principal, role):
"""Adds a local role to a principal.
**Parameters:**
obj
The object for which the principal gets the role.
principal
The principal (user or group) which gets the role.
role
The role which is assigned.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, User):
try:
ppr = PrincipalRoleRelation.objects.get(user=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(user=principal, role=role, content=obj)
return True
else:
try:
ppr = PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(group=principal, role=role, content=obj)
return True
return False
def remove_role(principal, role):
"""Removes role from passed principal.
**Parameters:**
principal
The principal (user or group) from which the role is removed.
role
The role which is removed.
"""
try:
if isinstance(principal, User):
ppr = PrincipalRoleRelation.objects.get(
user=principal, role=role, content_id=None, content_type=None)
else:
ppr = PrincipalRoleRelation.objects.get(
group=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
return False
else:
ppr.delete()
return True
def remove_local_role(obj, principal, role):
"""Removes role from passed object and principle.
**Parameters:**
obj
The object from which the role is removed.
principal
The principal (user or group) from which the role is removed.
role
The role which is removed.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, User):
ppr = PrincipalRoleRelation.objects.get(
user=principal, role=role, content_id=obj.id, content_type=ctype)
else:
ppr = PrincipalRoleRelation.objects.get(
group=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
return False
else:
ppr.delete()
return True
def remove_roles(principal):
"""Removes all roles passed principal (user or group).
**Parameters:**
principal
The principal (user or group) from which all roles are removed.
"""
if isinstance(principal, User):
ppr = PrincipalRoleRelation.objects.filter(
user=principal, content_id=None, content_type=None)
else:
ppr = PrincipalRoleRelation.objects.filter(
group=principal, content_id=None, content_type=None)
if ppr:
ppr.delete()
return True
else:
return False
def remove_local_roles(obj, principal):
"""Removes all local roles from passed object and principal (user or
group).
**Parameters:**
obj
The object from which the roles are removed.
principal
The principal (user or group) from which the roles are removed.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, User):
ppr = PrincipalRoleRelation.objects.filter(
user=principal, content_id=obj.id, content_type=ctype)
else:
ppr = PrincipalRoleRelation.objects.filter(
group=principal, content_id=obj.id, content_type=ctype)
if ppr:
ppr.delete()
return True
else:
return False
def get_roles(user, obj=None):
"""Returns *all* roles of the passed user.
This takes direct roles and roles via the user's groups into account.
If an object is passed local roles will also added. Then all local roles
from all ancestors and all user's groups are also taken into account.
This is the method to use if one want to know whether the passed user
has a role in general (for the passed object).
**Parameters:**
user
The user for which the roles are returned.
obj
The object for which local roles will returned.
"""
roles = []
groups = user.groups.all()
groups_ids_str = ", ".join([str(g.id) for g in groups])
groups_ids_str = groups_ids_str or "''"
# Gobal roles for user and the user's groups
cursor = connection.cursor()
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE (user_id=%s OR group_id IN (%s))
AND content_id is NULL""" % (user.id, groups_ids_str))
for row in cursor.fetchall():
roles.append(get_role(row[0]))
# Local roles for user and the user's groups and all ancestors of the
# passed object.
while obj:
ctype = ContentType.objects.get_for_model(obj)
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE (user_id='%s' OR group_id IN (%s))
AND content_id='%s'
AND content_type_id='%s'""" % (user.id, groups_ids_str, obj.id, ctype.id))
for row in cursor.fetchall():
roles.append(get_role(row[0]))
try:
obj = obj.get_parent_for_permissions()
except AttributeError:
obj = None
return roles
def get_global_roles(principal):
"""Returns *direct* global roles of passed principal (user or group).
"""
if isinstance(principal, User):
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
user=principal, content_id=None, content_type=None)]
else:
if isinstance(principal, Group):
principal = (principal,)
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
group__in=principal, content_id=None, content_type=None)]
def get_local_roles(obj, principal):
"""Returns *direct* local roles for passed principal and content object.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, User):
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
user=principal, content_id=obj.id, content_type=ctype)]
else:
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
group=principal, content_id=obj.id, content_type=ctype)]
# Permissions ################################################################
def check_permission(obj, user, codename, roles=None):
"""Checks whether passed user has passed permission for passed obj.
**Parameters:**
obj
The object for which the permission should be checked.
codename
The permission's codename which should be checked.
user
The user for which the permission should be checked.
roles
If given these roles will be assigned to the user temporarily before
the permissions are checked.
"""
if not has_permission(obj, user, codename):
raise Unauthorized("User '%s' doesn't have permission '%s' for object '%s' (%s)"
% (user, codename, obj.slug, obj.__class__.__name__))
def grant_permission(obj, role, permission):
"""Grants passed permission to passed role. Returns True if the permission
was able to be added, otherwise False.
**Parameters:**
obj
The content object for which the permission should be granted.
role
The role for which the permission should be granted.
permission
The permission which should be granted. Either a permission
object or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermission.objects.get(role=role, content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermission.DoesNotExist:
ObjectPermission.objects.create(role=role, content=obj, permission=permission)
return True
def remove_permission(obj, role, permission):
"""Removes passed permission from passed role and object. Returns True if
the permission has been removed.
**Parameters:**
obj
The content object for which a permission should be removed.
role
The role for which a permission should be removed.
permission
The permission which should be removed. Either a permission object
or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
op = ObjectPermission.objects.get(role=role, content_type = ct, content_id=obj.id, permission = permission)
except ObjectPermission.DoesNotExist:
return False
op.delete()
return True
def has_permission(obj, user, codename, roles=None):
"""Checks whether the passed user has passed permission for passed object.
**Parameters:**
obj
The object for which the permission should be checked.
codename
The permission's codename which should be checked.
request
The current request.
roles
If given these roles will be assigned to the user temporarily before
the permissions are checked.
"""
ct = ContentType.objects.get_for_model(obj)
cache_key = "%s-%s-%s" % (ct, obj.id, codename)
result = _get_cached_permission(user, cache_key)
if result is not None:
return result
if roles is None:
roles = []
if user.is_superuser:
return True
if not user.is_anonymous():
roles.extend(get_roles(user, obj))
result = False
while obj is not None:
p = ObjectPermission.objects.filter(
content_type=ct, content_id=obj.id, role__in=roles, permission__codename = codename).values("id")
if len(p) > 0:
result = True
break
if is_inherited(obj, codename) == False:
result = False
break
try:
obj = obj.get_parent_for_permissions()
ct = ContentType.objects.get_for_model(obj)
except AttributeError:
result = False
break
_cache_permission(user, cache_key, result)
return result
# Inheritance ################################################################
def add_inheritance_block(obj, permission):
"""Adds an inheritance for the passed permission on the passed obj.
**Parameters:**
permission
The permission for which an inheritance block should be added.
Either a permission object or the codename of a permission.
obj
The content object for which an inheritance block should be added.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermissionInheritanceBlock.objects.get(content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermissionInheritanceBlock.DoesNotExist:
try:
result = ObjectPermissionInheritanceBlock.objects.create(content=obj, permission=permission)
except IntegrityError:
return False
return True
def remove_inheritance_block(obj, permission):
"""Removes a inheritance block for the passed permission from the passed
object.
**Parameters:**
obj
The content object for which an inheritance block should be added.
permission
The permission for which an inheritance block should be removed.
Either a permission object or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
opi = ObjectPermissionInheritanceBlock.objects.get(content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermissionInheritanceBlock.DoesNotExist:
return False
opi.delete()
return True
def is_inherited(obj, codename):
"""Returns True if the passed permission is inherited for passed object.
**Parameters:**
obj
The content object for which the permission should be checked.
codename
The permission which should be checked. Must be the codename of
the permission.
"""
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermissionInheritanceBlock.objects.get(
content_type=ct, content_id=obj.id, permission__codename = codename)
except ObjectDoesNotExist:
return True
else:
return False
def get_group(id):
"""Returns the group with passed id or None.
"""
try:
return Group.objects.get(pk=id)
except Group.DoesNotExist:
return None
def get_role(id):
"""Returns the role with passed id or None.
"""
try:
return Role.objects.get(pk=id)
except Role.DoesNotExist:
return None
def get_user(id):
"""Returns the user with passed id or None.
"""
try:
return User.objects.get(pk=id)
except User.DoesNotExist:
return None
def has_group(user, group):
"""Returns True if passed user has passed group.
"""
if isinstance(group, str):
group = Group.objects.get(name=group)
return group in user.groups.all()
def reset(obj):
"""Resets all permissions and inheritance blocks of passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
ObjectPermissionInheritanceBlock.objects.filter(content_id=obj.id, content_type=ctype).delete()
ObjectPermission.objects.filter(content_id=obj.id, content_type=ctype).delete()
# Registering ################################################################
def register_permission(name, codename, ctypes=[]):
"""Registers a permission to the framework. Returns the permission if the
registration was successfully, otherwise False.
**Parameters:**
name
The unique name of the permission. This is displayed to the
customer.
codename
The unique codename of the permission. This is used internally to
identify the permission.
content_types
The content type for which the permission is active. This can be
used to display only reasonable permissions for an object. This
must be a Django ContentType
"""
try:
p = Permission.objects.create(name=name, codename=codename)
ctypes = [ContentType.objects.get_for_model(ctype) for ctype in ctypes]
if ctypes:
p.content_types = ctypes
p.save()
except IntegrityError:
return False
return p
def unregister_permission(codename):
"""Unregisters a permission from the framework
**Parameters:**
codename
The unique codename of the permission.
"""
try:
permission = Permission.objects.get(codename=codename)
except Permission.DoesNotExist:
return False
permission.delete()
return True
def register_role(name):
"""Registers a role with passed name to the framework. Returns the new
role if the registration was successfully, otherwise False.
**Parameters:**
name
The unique role name.
"""
try:
role = Role.objects.create(name=name)
except IntegrityError:
return False
return role
def unregister_role(name):
"""Unregisters the role with passed name.
**Parameters:**
name
The unique role name.
"""
try:
role = Role.objects.get(name=name)
except Role.DoesNotExist:
return False
role.delete()
return True
def register_group(name):
"""Registers a group with passed name to the framework. Returns the new
group if the registration was successfully, otherwise False.
Actually this creates just a default Django Group.
**Parameters:**
name
The unique group name.
"""
try:
group = Group.objects.create(name=name)
except IntegrityError:
return False
return group
def unregister_group(name):
"""Unregisters the group with passed name. Returns True if the
unregistration was succesfull otherwise False.
Actually this deletes just a default Django Group.
**Parameters:**
name
The unique role name.
"""
try:
group = Group.objects.get(name=name)
except Group.DoesNotExist:
return False
group.delete()
return True
def _cache_permission(user, cache_key, data):
"""Stores the passed data on the passed user object.
**Parameters:**
user
The user on which the data is stored.
cache_key
The key under which the data is stored.
data
The data which is stored.
"""
if not getattr(user, "permissions", None):
user.permissions = {}
user.permissions[cache_key] = data
def _get_cached_permission(user, cache_key):
"""Returns the stored data from passed user object for passed cache_key.
**Parameters:**
user
The user from which the data is retrieved.
cache_key
The key under which the data is stored.
"""
permissions = getattr(user, "permissions", None)
if permissions:
return user.permissions.get(cache_key, None)
| |
import collections
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, assert_dask_graph, assert_max_deps
def groupby_internal_repr():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
gp = pdf.groupby('y')
dp = ddf.groupby('y')
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby('y')['x']
dp = ddf.groupby('y')['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby('y')[['x']]
dp = ddf.groupby('y')[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)['x']
dp = ddf.groupby(ddf.y)['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)[['x']]
dp = ddf.groupby(ddf.y)[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
def groupby_error():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
with tm.assertRaises(KeyError):
ddf.groupby('A')
with tm.assertRaises(KeyError):
ddf.groupby(['x', 'A'])
dp = ddf.groupby('y')
msg = 'Column not found: '
with tm.assertRaisesRegexp(KeyError, msg):
dp['A']
with tm.assertRaisesRegexp(KeyError, msg):
dp[['x', 'A']]
def groupby_internal_head():
pdf = pd.DataFrame({'A': [1, 2] * 10,
'B': np.random.randn(20),
'C': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.groupby('A')._head().sum(),
pdf.head().groupby('A').sum())
assert_eq(ddf.groupby(ddf['A'])._head().sum(),
pdf.head().groupby(pdf['A']).sum())
assert_eq(ddf.groupby(ddf['A'] + 1)._head().sum(),
pdf.head().groupby(pdf['A'] + 1).sum())
def test_full_groupby():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(df, npartitions=3)
pytest.raises(Exception, lambda: df.groupby('does_not_exist'))
pytest.raises(Exception, lambda: df.groupby('a').does_not_exist)
assert 'b' in dir(df.groupby('a'))
def func(df):
df['b'] = df.b - df.b.mean()
return df
assert_eq(df.groupby('a').apply(func),
ddf.groupby('a').apply(func))
def test_groupby_dir():
df = pd.DataFrame({'a': range(10), 'b c d e': range(10)})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby('a')
assert 'a' in dir(g)
assert 'b c d e' not in dir(g)
def test_groupby_on_index():
full = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
d = dd.from_pandas(full, npartitions=3)
e = d.set_index('a')
efull = full.set_index('a')
assert_eq(d.groupby('a').b.mean(), e.groupby(e.index).b.mean())
def func(df):
df.loc[:, 'b'] = df.b - df.b.mean()
return df
assert_eq(d.groupby('a').apply(func).set_index('a'),
e.groupby(e.index).apply(func))
assert_eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert_eq(d.groupby('a').apply(func).set_index('a'),
full.groupby('a').apply(func).set_index('a'))
assert_eq(efull.groupby(efull.index).apply(func),
e.groupby(e.index).apply(func))
def test_groupby_multilevel_getitem():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
cases = [(ddf.groupby('a')['b'], df.groupby('a')['b']),
(ddf.groupby(['a', 'b']), df.groupby(['a', 'b'])),
(ddf.groupby(['a', 'b'])['c'], df.groupby(['a', 'b'])['c']),
(ddf.groupby('a')[['b', 'c']], df.groupby('a')[['b', 'c']]),
(ddf.groupby('a')[['b']], df.groupby('a')[['b']]),
(ddf.groupby(['a', 'b', 'c']), df.groupby(['a', 'b', 'c']))]
for d, p in cases:
assert isinstance(d, dd.groupby._GroupBy)
assert isinstance(p, pd.core.groupby.GroupBy)
assert_eq(d.sum(), p.sum())
assert_eq(d.min(), p.min())
assert_eq(d.max(), p.max())
assert_eq(d.count(), p.count())
assert_eq(d.mean(), p.mean().astype(float))
def test_groupby_multilevel_agg():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
sol = df.groupby(['a']).mean()
res = ddf.groupby(['a']).mean()
assert_eq(res, sol)
sol = df.groupby(['a', 'c']).mean()
res = ddf.groupby(['a', 'c']).mean()
assert_eq(res, sol)
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
meta = dsk[('x', 0)]
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert_eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert_eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert_eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert_eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert_eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert_eq(s.groupby('strings')['data'].nunique(), expected)
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
result = ddf.groupby('x').apply(func, meta=('y', 'i8'))
expected = df.groupby('x').apply(func)
expected.name = 'y'
assert_eq(result, expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert_eq(dg.count(), pdg.count())
assert_eq(dg.sum(), pdg.sum())
assert_eq(dg.min(), pdg.min())
assert_eq(dg.max(), pdg.max())
assert_eq(dg.size(), pdg.size())
def test_series_groupby_errors():
s = pd.Series([1, 2, 2, 1, 1])
ss = dd.from_pandas(s, npartitions=2)
msg = "Grouper for '1' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([1, 2]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([1, 2]) # dask should raise the same error
msg = "Grouper for '2' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([2]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([2]) # dask should raise the same error
msg = "No group keys passed!"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([]) # dask should raise the same error
sss = dd.from_pandas(s, npartitions=3)
pytest.raises(NotImplementedError, lambda: ss.groupby(sss))
with tm.assertRaises(KeyError):
s.groupby('x') # pandas
with tm.assertRaises(KeyError):
ss.groupby('x') # dask should raise the same error
def test_groupby_index_array():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.A.groupby(df.index.month).nunique(),
ddf.A.groupby(ddf.index.month).nunique(), check_names=False)
def test_groupby_set_index():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
pytest.raises(NotImplementedError,
lambda: ddf.groupby(df.index.month, as_index=False))
def test_split_apply_combine_on_series():
pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7],
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(pdf, npartitions=3)
for ddkey, pdkey in [('b', 'b'), (ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
assert_eq(ddf.groupby(ddkey).a.min(), pdf.groupby(pdkey).a.min())
assert_eq(ddf.groupby(ddkey).a.max(), pdf.groupby(pdkey).a.max())
assert_eq(ddf.groupby(ddkey).a.count(), pdf.groupby(pdkey).a.count())
assert_eq(ddf.groupby(ddkey).a.mean(), pdf.groupby(pdkey).a.mean())
assert_eq(ddf.groupby(ddkey).a.nunique(), pdf.groupby(pdkey).a.nunique())
assert_eq(ddf.groupby(ddkey).a.size(), pdf.groupby(pdkey).a.size())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddkey).a.var(ddof),
pdf.groupby(pdkey).a.var(ddof))
assert_eq(ddf.groupby(ddkey).a.std(ddof),
pdf.groupby(pdkey).a.std(ddof))
assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())
assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())
assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())
assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())
assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean())
assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddkey).var(ddof),
pdf.groupby(pdkey).var(ddof), check_dtype=False)
assert_eq(ddf.groupby(ddkey).std(ddof),
pdf.groupby(pdkey).std(ddof), check_dtype=False)
for ddkey, pdkey in [(ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
assert_eq(ddf.a.groupby(ddkey).sum(), pdf.a.groupby(pdkey).sum(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).max(), pdf.a.groupby(pdkey).max(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).count(), pdf.a.groupby(pdkey).count(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).mean(), pdf.a.groupby(pdkey).mean(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).nunique(), pdf.a.groupby(pdkey).nunique(), check_names=False)
for ddof in [0, 1, 2]:
assert_eq(ddf.a.groupby(ddkey).var(ddof),
pdf.a.groupby(pdkey).var(ddof))
assert_eq(ddf.a.groupby(ddkey).std(ddof),
pdf.a.groupby(pdkey).std(ddof))
for i in [0, 4, 7]:
assert_eq(ddf.groupby(ddf.b > i).a.sum(), pdf.groupby(pdf.b > i).a.sum())
assert_eq(ddf.groupby(ddf.b > i).a.min(), pdf.groupby(pdf.b > i).a.min())
assert_eq(ddf.groupby(ddf.b > i).a.max(), pdf.groupby(pdf.b > i).a.max())
assert_eq(ddf.groupby(ddf.b > i).a.count(), pdf.groupby(pdf.b > i).a.count())
assert_eq(ddf.groupby(ddf.b > i).a.mean(), pdf.groupby(pdf.b > i).a.mean())
assert_eq(ddf.groupby(ddf.b > i).a.nunique(), pdf.groupby(pdf.b > i).a.nunique())
assert_eq(ddf.groupby(ddf.b > i).a.size(), pdf.groupby(pdf.b > i).a.size())
assert_eq(ddf.groupby(ddf.a > i).b.sum(), pdf.groupby(pdf.a > i).b.sum())
assert_eq(ddf.groupby(ddf.a > i).b.min(), pdf.groupby(pdf.a > i).b.min())
assert_eq(ddf.groupby(ddf.a > i).b.max(), pdf.groupby(pdf.a > i).b.max())
assert_eq(ddf.groupby(ddf.a > i).b.count(), pdf.groupby(pdf.a > i).b.count())
assert_eq(ddf.groupby(ddf.a > i).b.mean(), pdf.groupby(pdf.a > i).b.mean())
assert_eq(ddf.groupby(ddf.a > i).b.nunique(), pdf.groupby(pdf.a > i).b.nunique())
assert_eq(ddf.groupby(ddf.b > i).b.size(), pdf.groupby(pdf.b > i).b.size())
assert_eq(ddf.groupby(ddf.b > i).sum(), pdf.groupby(pdf.b > i).sum())
assert_eq(ddf.groupby(ddf.b > i).min(), pdf.groupby(pdf.b > i).min())
assert_eq(ddf.groupby(ddf.b > i).max(), pdf.groupby(pdf.b > i).max())
assert_eq(ddf.groupby(ddf.b > i).count(), pdf.groupby(pdf.b > i).count())
assert_eq(ddf.groupby(ddf.b > i).mean(), pdf.groupby(pdf.b > i).mean())
assert_eq(ddf.groupby(ddf.b > i).size(), pdf.groupby(pdf.b > i).size())
assert_eq(ddf.groupby(ddf.a > i).sum(), pdf.groupby(pdf.a > i).sum())
assert_eq(ddf.groupby(ddf.a > i).min(), pdf.groupby(pdf.a > i).min())
assert_eq(ddf.groupby(ddf.a > i).max(), pdf.groupby(pdf.a > i).max())
assert_eq(ddf.groupby(ddf.a > i).count(), pdf.groupby(pdf.a > i).count())
assert_eq(ddf.groupby(ddf.a > i).mean(), pdf.groupby(pdf.a > i).mean())
assert_eq(ddf.groupby(ddf.a > i).size(), pdf.groupby(pdf.a > i).size())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddf.b > i).std(ddof),
pdf.groupby(pdf.b > i).std(ddof))
for ddkey, pdkey in [('a', 'a'), (ddf.a, pdf.a),
(ddf.a + 1, pdf.a + 1), (ddf.a > 3, pdf.a > 3)]:
assert_eq(ddf.groupby(ddkey).b.sum(), pdf.groupby(pdkey).b.sum())
assert_eq(ddf.groupby(ddkey).b.min(), pdf.groupby(pdkey).b.min())
assert_eq(ddf.groupby(ddkey).b.max(), pdf.groupby(pdkey).b.max())
assert_eq(ddf.groupby(ddkey).b.count(), pdf.groupby(pdkey).b.count())
assert_eq(ddf.groupby(ddkey).b.mean(), pdf.groupby(pdkey).b.mean())
assert_eq(ddf.groupby(ddkey).b.nunique(), pdf.groupby(pdkey).b.nunique())
assert_eq(ddf.groupby(ddkey).b.size(), pdf.groupby(pdkey).b.size())
assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())
assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())
assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())
assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())
assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean().astype(float))
assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddkey).b.std(ddof),
pdf.groupby(pdkey).b.std(ddof))
assert (sorted(ddf.groupby('b').a.sum().dask) ==
sorted(ddf.groupby('b').a.sum().dask))
assert (sorted(ddf.groupby(ddf.a > 3).b.mean().dask) ==
sorted(ddf.groupby(ddf.a > 3).b.mean().dask))
# test raises with incorrect key
pytest.raises(KeyError, lambda: ddf.groupby('x'))
pytest.raises(KeyError, lambda: ddf.groupby(['a', 'x']))
pytest.raises(KeyError, lambda: ddf.groupby('a')['x'])
pytest.raises(KeyError, lambda: ddf.groupby('a')['b', 'x'])
pytest.raises(KeyError, lambda: ddf.groupby('a')[['b', 'x']])
# test graph node labels
assert_dask_graph(ddf.groupby('b').a.sum(), 'series-groupby-sum')
assert_dask_graph(ddf.groupby('b').a.min(), 'series-groupby-min')
assert_dask_graph(ddf.groupby('b').a.max(), 'series-groupby-max')
assert_dask_graph(ddf.groupby('b').a.count(), 'series-groupby-count')
assert_dask_graph(ddf.groupby('b').a.var(), 'series-groupby-var')
# mean consists from sum and count operations
assert_dask_graph(ddf.groupby('b').a.mean(), 'series-groupby-sum')
assert_dask_graph(ddf.groupby('b').a.mean(), 'series-groupby-count')
assert_dask_graph(ddf.groupby('b').a.nunique(), 'series-groupby-nunique')
assert_dask_graph(ddf.groupby('b').a.size(), 'series-groupby-size')
assert_dask_graph(ddf.groupby('b').sum(), 'dataframe-groupby-sum')
assert_dask_graph(ddf.groupby('b').min(), 'dataframe-groupby-min')
assert_dask_graph(ddf.groupby('b').max(), 'dataframe-groupby-max')
assert_dask_graph(ddf.groupby('b').count(), 'dataframe-groupby-count')
# mean consists from sum and count operations
assert_dask_graph(ddf.groupby('b').mean(), 'dataframe-groupby-sum')
assert_dask_graph(ddf.groupby('b').mean(), 'dataframe-groupby-count')
assert_dask_graph(ddf.groupby('b').size(), 'dataframe-groupby-size')
def test_groupby_reduction_split_every():
pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7] * 100,
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100})
ddf = dd.from_pandas(pdf, npartitions=15)
def call(g, m, **kwargs):
return getattr(g, m)(**kwargs)
# DataFrame
for m in ['sum', 'min', 'max', 'count', 'mean', 'size', 'var', 'std']:
res = call(ddf.groupby('b'), m, split_every=2)
sol = call(pdf.groupby('b'), m)
assert_eq(res, sol)
assert call(ddf.groupby('b'), m)._name != res._name
res = call(ddf.groupby('b'), 'var', split_every=2, ddof=2)
sol = call(pdf.groupby('b'), 'var', ddof=2)
assert_eq(res, sol)
assert call(ddf.groupby('b'), 'var', ddof=2)._name != res._name
# Series, post select
for m in ['sum', 'min', 'max', 'count', 'mean', 'nunique', 'size',
'var', 'std']:
res = call(ddf.groupby('b').a, m, split_every=2)
sol = call(pdf.groupby('b').a, m)
assert_eq(res, sol)
assert call(ddf.groupby('b').a, m)._name != res._name
res = call(ddf.groupby('b').a, 'var', split_every=2, ddof=2)
sol = call(pdf.groupby('b').a, 'var', ddof=2)
assert_eq(res, sol)
assert call(ddf.groupby('b').a, 'var', ddof=2)._name != res._name
# Series, pre select
for m in ['sum', 'min', 'max', 'count', 'mean', 'nunique', 'size',
'var', 'std']:
res = call(ddf.a.groupby(ddf.b), m, split_every=2)
sol = call(pdf.a.groupby(pdf.b), m)
# There's a bug in pandas 0.18.0 with `pdf.a.groupby(pdf.b).count()`
# not forwarding the series name. Skip name checks here for now.
assert_eq(res, sol, check_names=False)
assert call(ddf.a.groupby(ddf.b), m)._name != res._name
res = call(ddf.a.groupby(ddf.b), 'var', split_every=2, ddof=2)
sol = call(pdf.a.groupby(pdf.b), 'var', ddof=2)
assert_eq(res, sol)
assert call(ddf.a.groupby(ddf.b), 'var', ddof=2)._name != res._name
def test_apply_shuffle():
pdf = pd.DataFrame({'A': [1, 2, 3, 4] * 5,
'B': np.random.randn(20),
'C': np.random.randn(20),
'D': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.groupby('A').apply(lambda x: x.sum()),
pdf.groupby('A').apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A']).apply(lambda x: x.sum()),
pdf.groupby(pdf['A']).apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A'] + 1).apply(lambda x: x.sum()),
pdf.groupby(pdf['A'] + 1).apply(lambda x: x.sum()))
# SeriesGroupBy
assert_eq(ddf.groupby('A')['B'].apply(lambda x: x.sum()),
pdf.groupby('A')['B'].apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A'])['B'].apply(lambda x: x.sum()),
pdf.groupby(pdf['A'])['B'].apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A'] + 1)['B'].apply(lambda x: x.sum()),
pdf.groupby(pdf['A'] + 1)['B'].apply(lambda x: x.sum()))
# DataFrameGroupBy with column slice
assert_eq(ddf.groupby('A')[['B', 'C']].apply(lambda x: x.sum()),
pdf.groupby('A')[['B', 'C']].apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A'])[['B', 'C']].apply(lambda x: x.sum()),
pdf.groupby(pdf['A'])[['B', 'C']].apply(lambda x: x.sum()))
assert_eq(ddf.groupby(ddf['A'] + 1)[['B', 'C']].apply(lambda x: x.sum()),
pdf.groupby(pdf['A'] + 1)[['B', 'C']].apply(lambda x: x.sum()))
def test_numeric_column_names():
# df.groupby(0)[df.columns] fails if all columns are numbers (pandas bug)
# This ensures that we cast all column iterables to list beforehand.
df = pd.DataFrame({0: [0, 1, 0, 1],
1: [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.groupby(0).sum(), df.groupby(0).sum())
assert_eq(ddf.groupby(0).apply(lambda x: x),
df.groupby(0).apply(lambda x: x))
def test_groupby_apply_tasks():
df = pd.util.testing.makeTimeDataFrame()
df['A'] = df.A // 0.1
df['B'] = df.B // 0.1
ddf = dd.from_pandas(df, npartitions=10)
with dask.set_options(shuffle='tasks'):
for ind in [lambda x: 'A', lambda x: x.A]:
a = df.groupby(ind(df)).apply(len)
b = ddf.groupby(ind(ddf)).apply(len)
assert_eq(a, b.compute())
assert not any('partd' in k[0] for k in b.dask)
a = df.groupby(ind(df)).B.apply(len)
b = ddf.groupby(ind(ddf)).B.apply(len)
assert_eq(a, b.compute())
assert not any('partd' in k[0] for k in b.dask)
def test_groupby_multiprocessing():
from dask.multiprocessing import get
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'B': ['1','1','a','a','a']})
ddf = dd.from_pandas(df, npartitions=3)
with dask.set_options(get=get):
assert_eq(ddf.groupby('B').apply(lambda x: x),
df.groupby('B').apply(lambda x: x))
def test_groupby_normalize_index():
full = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
d = dd.from_pandas(full, npartitions=3)
assert d.groupby('a').index == 'a'
assert d.groupby(d['a']).index == 'a'
assert d.groupby(d['a'] > 2).index._name == (d['a'] > 2)._name
assert d.groupby(['a', 'b']).index == ['a', 'b']
assert d.groupby([d['a'], d['b']]).index == ['a', 'b']
assert d.groupby([d['a'], 'b']).index == ['a', 'b']
@pytest.mark.parametrize('spec', [
{'b': {'c': 'mean'}, 'c': {'a': 'max', 'a': 'min'}},
{'b': 'mean', 'c': ['min', 'max']},
{'b': np.sum, 'c': ['min', np.max, np.std, np.var]},
['sum', 'mean', 'min', 'max', 'count', 'size', 'std', 'var'],
'var',
])
@pytest.mark.parametrize('split_every', [False, None])
@pytest.mark.parametrize('grouper', [
lambda df: 'a',
lambda df: ['a', 'd'],
lambda df: [df['a'], df['d']],
lambda df: df['a'],
lambda df: df['a'] > 2,
])
def test_aggregate__examples(spec, split_every, grouper):
pdf = pd.DataFrame({'a': [1, 2, 3, 1, 1, 2, 4, 3, 7] * 10,
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,
'c': [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,
'd': [3, 2, 1, 3, 2, 1, 2, 6, 4] * 10},
columns=['c', 'b', 'a', 'd'])
ddf = dd.from_pandas(pdf, npartitions=10)
assert_eq(pdf.groupby(grouper(pdf)).agg(spec),
ddf.groupby(grouper(ddf)).agg(spec, split_every=split_every))
@pytest.mark.parametrize('spec', [
{'b': 'sum', 'c': 'min', 'd': 'max'},
['sum'],
['sum', 'mean', 'min', 'max', 'count', 'size', 'std', 'var'],
'sum', 'size',
])
@pytest.mark.parametrize('split_every', [False, None])
@pytest.mark.parametrize('grouper', [
pytest.mark.xfail(reason="Grouper for '{0}' not 1-dimensional")(lambda df: [df['a'], df['d']]),
lambda df: df['a'],
lambda df: df['a'] > 2,
])
def test_series_aggregate__examples(spec, split_every, grouper):
pdf = pd.DataFrame({'a': [1, 2, 3, 1, 1, 2, 4, 3, 7] * 10,
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,
'c': [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,
'd': [3, 2, 1, 3, 2, 1, 2, 6, 4] * 10},
columns=['c', 'b', 'a', 'd'])
ps = pdf['c']
ddf = dd.from_pandas(pdf, npartitions=10)
ds = ddf['c']
assert_eq(ps.groupby(grouper(pdf)).agg(spec),
ds.groupby(grouper(ddf)).agg(spec, split_every=split_every))
@pytest.mark.parametrize('spec', [
'sum', 'min', 'max', 'count', 'size',
'std', # NOTE: for std the result is not recast ot the original dtype
pytest.mark.xfail(reason="pandas recast to original type")('var'),
pytest.mark.xfail(reason="pandas recast to original type")('mean')
])
def test_aggregate__single_element_groups(spec):
pdf = pd.DataFrame({'a': [1, 1, 3, 3],
'b': [4, 4, 16, 16],
'c': [1, 1, 4, 4],
'd': [1, 1, 3, 3]},
columns=['c', 'b', 'a', 'd'])
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf.groupby(['a', 'd']).agg(spec),
ddf.groupby(['a', 'd']).agg(spec))
def test_aggregate_build_agg_args__reuse_of_intermediates():
"""Aggregate reuses intermediates. For example, with sum, count, and mean
the sums and counts are only calculated once accross the graph and reused to
compute the mean.
"""
from dask.dataframe.groupby import _build_agg_args
no_mean_spec = [
('foo', 'sum', 'input'),
('bar', 'count', 'input'),
]
with_mean_spec = [
('foo', 'sum', 'input'),
('bar', 'count', 'input'),
('baz', 'mean', 'input'),
]
no_mean_chunks, no_mean_aggs, no_mean_finalizers = _build_agg_args(no_mean_spec)
with_mean_chunks, with_mean_aggs, with_mean_finalizers = _build_agg_args(with_mean_spec)
assert len(no_mean_chunks) == len(with_mean_chunks)
assert len(no_mean_aggs) == len(with_mean_aggs)
assert len(no_mean_finalizers) == len(no_mean_spec)
assert len(with_mean_finalizers) == len(with_mean_spec)
def test_aggregate__dask():
dask_holder = collections.namedtuple('dask_holder', ['dask'])
get_agg_dask = lambda obj: dask_holder({
k: v for (k, v) in obj.dask.items() if k[0].startswith('aggregate')
})
specs = [
{'b': {'c': 'mean'}, 'c': {'a': 'max', 'a': 'min'}},
{'b': 'mean', 'c': ['min', 'max']},
['sum', 'mean', 'min', 'max', 'count', 'size', 'std', 'var'],
'sum', 'mean', 'min', 'max', 'count', 'std', 'var',
# NOTE: the 'size' spec is special since it bypasses aggregate
#'size'
]
pdf = pd.DataFrame({'a': [1, 2, 3, 1, 1, 2, 4, 3, 7] * 100,
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100,
'c': [0, 1, 2, 3, 4, 5, 6, 7, 8] * 100,
'd': [3, 2, 1, 3, 2, 1, 2, 6, 4] * 100},
columns=['c', 'b', 'a', 'd'])
ddf = dd.from_pandas(pdf, npartitions=100)
for spec in specs:
result1 = ddf.groupby(['a', 'b']).agg(spec, split_every=2)
result2 = ddf.groupby(['a', 'b']).agg(spec, split_every=2)
agg_dask1 = get_agg_dask(result1)
agg_dask2 = get_agg_dask(result2)
core_agg_dask1 = {k: v for (k, v) in agg_dask1.dask.items()
if not k[0].startswith('aggregate-finalize')}
core_agg_dask2 = {k: v for (k, v) in agg_dask2.dask.items()
if not k[0].startswith('aggregate-finalize')}
# check that the number of paritions used is fixed by split_every
assert_max_deps(agg_dask1, 2)
assert_max_deps(agg_dask2, 2)
# check for deterministic key names
# not finalize passes the meta object, which cannot tested with ==
assert core_agg_dask1 == core_agg_dask2
# the length of the dask does not depend on the passed spec
for other_spec in specs:
other = ddf.groupby(['a', 'b']).agg(other_spec, split_every=2)
assert len(other.dask) == len(result1.dask)
assert len(other.dask) == len(result2.dask)
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from trove.common import exception
from trove.common import pagination
from trove.common import wsgi
from trove.common.utils import correct_id_with_req
from trove.extensions.mysql.common import populate_validated_databases
from trove.extensions.mysql.common import populate_users
from trove.extensions.mysql.common import unquote_user_host
from trove.extensions.mysql import models
from trove.extensions.mysql import views
from trove.guestagent.db import models as guest_models
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
import trove.common.apischema as apischema
LOG = logging.getLogger(__name__)
class RootController(wsgi.Controller):
"""Controller for instance functionality."""
def index(self, req, tenant_id, instance_id):
"""Returns True if root is enabled for the given instance;
False otherwise.
"""
LOG.info(_("Getting root enabled for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
is_root_enabled = models.Root.load(context, instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def create(self, req, tenant_id, instance_id):
"""Enable the root user for the db instance."""
LOG.info(_("Enabling root for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
root = models.Root.create(context, instance_id, user_name)
return wsgi.Result(views.RootCreatedView(root).data(), 200)
class UserController(wsgi.Controller):
"""Controller for instance functionality."""
schemas = apischema.user
@classmethod
def get_schema(cls, action, body):
action_schema = super(UserController, cls).get_schema(action, body)
if 'update_all' == action:
update_type = body.keys()[0]
action_schema = action_schema.get(update_type, {})
return action_schema
def index(self, req, tenant_id, instance_id):
"""Return all users."""
LOG.info(_("Listing users for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
users, next_marker = models.Users.load(context, instance_id)
view = views.UsersView(users)
paged = pagination.SimplePaginatedDataView(req.url, 'users', view,
next_marker)
return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of users."""
LOG.info(_("Creating users for instance '%s'") % instance_id)
LOG.info(logging.mask_password(_("req : '%s'\n\n") % req))
LOG.info(logging.mask_password(_("body : '%s'\n\n") % body))
context = req.environ[wsgi.CONTEXT_KEY]
users = body['users']
try:
model_users = populate_users(users)
models.User.create(context, instance_id, model_users)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
LOG.info(_("Deleting user for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
try:
user = guest_models.MySQLUser()
user.name = username
user.host = host
found_user = models.User.load(context, instance_id, username,
host)
if not found_user:
user = None
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
models.User.delete(context, instance_id, user.serialize())
return wsgi.Result(None, 202)
def show(self, req, tenant_id, instance_id, id):
"""Return a single user."""
LOG.info(_("Showing a user for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
try:
user = models.User.load(context, instance_id, username, host)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
view = views.UserView(user)
return wsgi.Result(view.data(), 200)
def update(self, req, body, tenant_id, instance_id, id):
"""Change attributes for one user."""
LOG.info(_("Updating user attributes for instance '%s'") % instance_id)
LOG.info(logging.mask_password(_("req : '%s'\n\n") % req))
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, hostname = unquote_user_host(id)
user = None
user_attrs = body['user']
try:
user = models.User.load(context, instance_id, username, hostname)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
try:
models.User.update_attributes(context, instance_id, username,
hostname, user_attrs)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def update_all(self, req, body, tenant_id, instance_id):
"""Change the password of one or more users."""
LOG.info(_("Updating user passwords for instance '%s'") % instance_id)
LOG.info(logging.mask_password(_("req : '%s'\n\n") % req))
context = req.environ[wsgi.CONTEXT_KEY]
users = body['users']
model_users = []
for user in users:
try:
mu = guest_models.MySQLUser()
mu.name = user['name']
mu.host = user.get('host')
mu.password = user['password']
found_user = models.User.load(context, instance_id,
mu.name, mu.host)
if not found_user:
user_and_host = mu.name
if mu.host:
user_and_host += '@' + mu.host
raise exception.UserNotFound(uuid=user_and_host)
model_users.append(mu)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
models.User.change_password(context, instance_id, model_users)
return wsgi.Result(None, 202)
class UserAccessController(wsgi.Controller):
"""Controller for adding and removing database access for a user."""
schemas = apischema.user
@classmethod
def get_schema(cls, action, body):
schema = {}
if 'update_all' == action:
schema = cls.schemas.get(action).get('databases')
return schema
def _get_user(self, context, instance_id, user_id):
username, hostname = unquote_user_host(user_id)
try:
user = models.User.load(context, instance_id, username, hostname)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=user_id)
return user
def index(self, req, tenant_id, instance_id, user_id):
"""Show permissions for the given user."""
LOG.info(_("Showing user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
# Make sure this user exists.
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
view = views.UserAccessView(access.databases)
return wsgi.Result(view.data(), 200)
def update(self, req, body, tenant_id, instance_id, user_id):
"""Grant access for a user to one or more databases."""
LOG.info(_("Granting user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
databases = [db['name'] for db in body['databases']]
models.User.grant(context, instance_id, username, hostname, databases)
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, user_id, id):
"""Revoke access for a user."""
LOG.info(_("Revoking user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
databases = [db.name for db in access.databases]
if id not in databases:
raise exception.DatabaseNotFound(uuid=id)
models.User.revoke(context, instance_id, username, hostname, id)
return wsgi.Result(None, 202)
class SchemaController(wsgi.Controller):
"""Controller for instance functionality."""
schemas = apischema.dbschema
def index(self, req, tenant_id, instance_id):
"""Return all schemas."""
LOG.info(_("Listing schemas for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
schemas, next_marker = models.Schemas.load(context, instance_id)
view = views.SchemasView(schemas)
paged = pagination.SimplePaginatedDataView(req.url, 'databases', view,
next_marker)
return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of schemas."""
LOG.info(_("Creating schema for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("body : '%s'\n\n") % body)
context = req.environ[wsgi.CONTEXT_KEY]
schemas = body['databases']
model_schemas = populate_validated_databases(schemas)
models.Schema.create(context, instance_id, model_schemas)
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
LOG.info(_("Deleting schema for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
schema = guest_models.ValidatedMySQLDatabase()
schema.name = id
models.Schema.delete(context, instance_id, schema.serialize())
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def show(self, req, tenant_id, instance_id, id):
raise webob.exc.HTTPNotImplemented()
| |
#!/usr/bin/python
from datetime import date,datetime
import sqlalchemy
import json
import pprint
import uuid
import logging
import os
import utils
from config import ComicStreamerConfig
from comicstreamerlib.folders import AppFolders
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import deferred
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, DateTime, LargeBinary, Table, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine, func
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.associationproxy import _AssociationList
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
SCHEMA_VERSION=3
Base = declarative_base()
Session = sessionmaker()
def resultSetToJson(rset, listname="aaData", total=None):
return json.dumps(resultSetToDict(rset, listname, total), cls=alchemy_encoder(), check_circular=False)
def resultSetToDict(rset, listname="aaData", total=None):
l = []
for r in rset:
l.append(r)
results_dict = {}
results_dict[listname] = l
results_dict['page_count'] = len(l)
if total is None:
results_dict['total_count'] = len(l)
else:
results_dict['total_count'] = total
return results_dict
def alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj,_AssociationList):
# Convert association list into python list
return list(obj)
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_')
and x != 'metadata'
and not x.endswith('_raw')
and x != "persons"
and x != "roles"
and x != "issue_num"
and x != "file"
and x != "folder"
and x != "thumbnail"
]:
value = obj.__getattribute__(field)
if (isinstance(value, date)):
value = str(value)
if value is not None:
fields[field] = value
else:
fields[field] = ""
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
# Junction table
comics_characters_table = Table('comics_characters', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('character_id', Integer, ForeignKey('characters.id'))
)
# Junction table
comics_teams_table = Table('comics_teams', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('team_id', Integer, ForeignKey('teams.id'))
)
# Junction table
comics_locations_table = Table('comics_locations', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('location_id', Integer, ForeignKey('locations.id'))
)
# Junction table
comics_storyarcs_table = Table('comics_storyarcs', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('storyarc_id', Integer, ForeignKey('storyarcs.id'))
)
# Junction table
comics_generictags_table = Table('comics_generictags', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('generictags_id', Integer, ForeignKey('generictags.id'))
)
# Junction table
comics_genres_table = Table('comics_genres', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('genre_id', Integer, ForeignKey('genres.id'))
)
"""
# Junction table
readinglists_comics_table = Table('readinglists_comics', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('readinglist_id', Integer, ForeignKey('readinglists.id'))
)
"""
class CreditComparator(RelationshipProperty.Comparator):
def __eq__(self, other):
return self.person() == other
class MyComparator(ColumnProperty.Comparator):
def __eq__(self, other):
#return func.lower(self.__clause_element__()) == func.lower(other)
#print "-----------ATB------", type(self.__clause_element__()), type(other)
# for the children objects, make all equal comparisons be likes
return self.__clause_element__().ilike(func.lower(unicode(other)))
class Comic(Base):
__tablename__ = 'comics'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(Integer, primary_key=True)
path = Column(String, unique=True)
folder = Column(String)
file = Column(String)
series = Column(String)
issue = Column(String)
issue_num = Column(Float)
date = Column(DateTime) # will be a composite of month,year,day for sorting/filtering
day = Column(Integer)
month = Column(Integer)
year = Column(Integer)
volume = Column(Integer)
page_count = Column(Integer)
comments = Column(String)
publisher = Column(String)
title = Column(String)
imprint = Column(String)
weblink = Column(String)
filesize = Column(Integer)
hash = Column(String)
deleted_ts = Column(DateTime)
lastread_ts = Column(DateTime)
lastread_page = Column(Integer)
thumbnail = deferred(Column(LargeBinary))
#hash = Column(String)
added_ts = Column(DateTime, default=datetime.utcnow) # when the comic was added to the DB
mod_ts = Column(DateTime) # the last modified date of the file
credits_raw = relationship('Credit', #secondary=credits_,
cascade="all, delete", )#, backref='comics')
characters_raw = relationship('Character', secondary=comics_characters_table,
cascade="save-update,delete")#, backref='comics')
teams_raw = relationship('Team', secondary=comics_teams_table,
cascade="save-update,delete") #)#, backref='comics')
locations_raw = relationship('Location', secondary=comics_locations_table,
cascade="save-update,delete") #, backref='comics')
storyarcs_raw = relationship('StoryArc', secondary=comics_storyarcs_table,
cascade="save-update,delete") #, backref='comics')
generictags_raw = relationship('GenericTag', secondary=comics_generictags_table,
cascade="save-update,delete") #, backref='comics')
genres_raw = relationship('Genre', secondary=comics_genres_table,
cascade="save-update,delete") #, backref='comics')
persons_raw = relationship("Person",
secondary="join(Credit, Person, Credit.person_id == Person.id)",
primaryjoin="and_(Comic.id == Credit.comic_id)",
#passive_updates=False,
viewonly=True
)
roles_raw = relationship("Role",
secondary="join(Credit, Role, Credit.role_id == Role.id)",
primaryjoin="and_(Comic.id == Credit.comic_id)",
#passive_updates=False,
viewonly=True
)
#credits = association_proxy('credits_raw', 'person_role_dict')
characters = association_proxy('characters_raw', 'name')
teams = association_proxy('teams_raw', 'name')
locations = association_proxy('locations_raw', 'name')
storyarcs = association_proxy('storyarcs_raw', 'name')
generictags = association_proxy('generictags_raw', 'name')
persons = association_proxy('persons_raw', 'name')
roles = association_proxy('roles_raw', 'name')
genres = association_proxy('genres_raw', 'name')
#bookmark = relationship("Bookmark", backref="comic", lazy="dynamic") #uselist=False,
def __repr__(self):
out = u"<Comic(id={0}, path={1},\n series={2}, issue={3}, year={4} pages={5}\n{6}".format(
self.id, self.folder+self.file,self.series,self.issue,self.year,self.page_count,self.characters)
return out
@property
def credits(self):
"""Merge credits together into a dict with role name as key, and lists of persons"""
out_dict = {}
# iterate over the list of credits mini dicts:
for c in self.credits_raw:
if c.role and c.person:
if not out_dict.has_key(c.role.name):
out_dict[c.role.name] = []
out_dict[c.role.name].append(c.person.name)
return out_dict
class Credit(Base):
__tablename__ = 'credits'
#__table_args__ = {'extend_existing': True}
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True)
person_id = Column(Integer, ForeignKey('persons.id'), primary_key=True)
#bidirectional attribute/collection of "comic"/"credits"
#comic = relationship(Comic,
# backref=backref("credits_backref_raw"),
# #cascade="all, delete-orphan")
# )
person = relationship("Person", cascade="all, delete") #, backref='credits')
role = relationship("Role" , cascade="all, delete") #, backref='credits')
def __init__(self, person=None, role=None):
self.person = person
self.role = role
#@property
#def person_role_tuple(self):
# return (self.person.name, self.role.name)
#@property
#def person_role_dict(self):
# return { self.role.name : [self.person.name] }
#def __repr__(self):
# return u"<Credit(person={0},role={1})>".format(self.person_role_tuple[1], self.person_role_tuple[0])
class Role(Base):
__tablename__ = "roles"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Person(Base):
__tablename__ = "persons"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Character(Base):
__tablename__ = "characters"
id = Column(Integer, primary_key=True)
#name = Column(String, unique=True)
name = ColumnProperty(
Column('name', String, unique = True),
#comparator_factory=MyComparator
)
def __repr__(self):
out = u"<Character(id={0},name='{1}')>".format(self.id, self.name)
return out
class Team(Base):
__tablename__ = "teams"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Location(Base):
__tablename__ = "locations"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class StoryArc(Base):
__tablename__ = "storyarcs"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class GenericTag(Base):
__tablename__ = "generictags"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Genre(Base):
__tablename__ = "genres"
id = Column(Integer, primary_key=True)
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class DeletedComic(Base):
__tablename__ = "deletedcomics"
id = Column(Integer, primary_key=True)
comic_id = Column(Integer)
ts = Column(DateTime, default=datetime.utcnow)
def __unicode__(self):
out = u"DeletedComic: {0}:{1}".format(self.id, self.comic_id)
return out
"""
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String)
password_digest = Column(String)
class Bookmark(Base):
__tablename__ = "bookmarks"
__table_args__ = {'sqlite_autoincrement': True}
#id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
page = Column(Integer)
updated = Column(DateTime)
class Favorite(Base):
__tablename__ = "favorites"
id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
class ReadingList(Base):
__tablename__ = "readinglists"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
name = Column(String)
comics = relationship('Comic', secondary=readinglists_comics_table,
#cascade="delete", #, backref='comics')
)
"""
class SchemaInfo(Base):
__tablename__ = "schemainfo"
id = Column(Integer, primary_key=True)
schema_version = Column(Integer)
class DatabaseInfo(Base):
__tablename__ = "dbid"
id = Column(Integer, primary_key=True)
uuid = Column(String)
created = Column(DateTime, default=datetime.utcnow)
last_updated = Column(DateTime)
def __str__(self):
out = u"{0}".format(self.uuid)
return out
class SchemaVersionException(Exception):
pass
class DataManager():
def __init__(self):
self.dbfile = os.path.join(AppFolders.appData(), "comicdb.sqlite")
self.engine = create_engine('sqlite:///'+ self.dbfile, echo=False)
session_factory = sessionmaker(bind=self.engine)
self.Session = scoped_session(session_factory)
def delete(self):
if os.path.exists( self.dbfile ):
os.unlink( self.dbfile )
def create(self):
# if we don't have a UUID for this DB, add it.
Base.metadata.create_all(self.engine)
session = self.Session()
results = session.query(SchemaInfo).first()
if results is None:
schemainfo = SchemaInfo()
schemainfo.schema_version = SCHEMA_VERSION
session.add(schemainfo)
logging.debug("Setting scheme version".format(schemainfo.schema_version))
session.commit()
else:
if results.schema_version != SCHEMA_VERSION:
raise SchemaVersionException
results = session.query(DatabaseInfo).first()
if results is None:
dbinfo = DatabaseInfo()
dbinfo.uuid = unicode(uuid.uuid4().hex)
dbinfo.last_updated = datetime.utcnow()
session.add(dbinfo)
session.commit()
logging.debug("Added new uuid".format(dbinfo.uuid))
"""
# Eventually, there will be multi-user support, but for now,
# just have a single user entry
results = session.query(User).first()
if results is None:
user = User()
user.name = ""
user.password_digest = utils.getDigest("")
session.add(user)
session.commit()
"""
if __name__ == "__main__":
dm = DataManager()
dm.create()
| |
import pickle
import datetime
import pytest
import six
from pyrsistent import (
PRecord, field, InvariantException, ny, pset, PSet, CheckedPVector,
PTypeError, pset_field, pvector_field, pmap_field, pmap, PMap,
pvector, PVector, v, m)
class ARecord(PRecord):
x = field(type=(int, float))
y = field()
def test_create():
r = ARecord(x=1, y='foo')
assert r.x == 1
assert r.y == 'foo'
assert isinstance(r, ARecord)
def test_correct_assignment():
r = ARecord(x=1, y='foo')
r2 = r.set('x', 2.0)
r3 = r2.set('y', 'bar')
assert r2 == {'x': 2.0, 'y': 'foo'}
assert r3 == {'x': 2.0, 'y': 'bar'}
assert isinstance(r3, ARecord)
def test_direct_assignment_not_possible():
with pytest.raises(AttributeError):
ARecord().x = 1
def test_cannot_assign_undeclared_fields():
with pytest.raises(AttributeError):
ARecord().set('z', 5)
def test_cannot_assign_wrong_type_to_fields():
try:
ARecord().set('x', 'foo')
assert False
except PTypeError as e:
assert e.source_class == ARecord
assert e.field == 'x'
assert e.expected_types == set([int, float])
assert e.actual_type is type('foo')
def test_cannot_construct_with_undeclared_fields():
with pytest.raises(AttributeError):
ARecord(z=5)
def test_cannot_construct_with_fields_of_wrong_type():
with pytest.raises(TypeError):
ARecord(x='foo')
def test_support_record_inheritance():
class BRecord(ARecord):
z = field()
r = BRecord(x=1, y='foo', z='bar')
assert isinstance(r, BRecord)
assert isinstance(r, ARecord)
assert r == {'x': 1, 'y': 'foo', 'z': 'bar'}
def test_single_type_spec():
class A(PRecord):
x = field(type=int)
r = A(x=1)
assert r.x == 1
with pytest.raises(TypeError):
r.set('x', 'foo')
def test_remove():
r = ARecord(x=1, y='foo')
r2 = r.remove('y')
assert isinstance(r2, ARecord)
assert r2 == {'x': 1}
def test_remove_non_existing_member():
r = ARecord(x=1, y='foo')
with pytest.raises(KeyError):
r.remove('z')
def test_field_invariant_must_hold():
class BRecord(PRecord):
x = field(invariant=lambda x: (x > 1, 'x too small'))
y = field(mandatory=True)
try:
BRecord(x=1)
assert False
except InvariantException as e:
assert e.invariant_errors == ('x too small',)
assert e.missing_fields == ('BRecord.y',)
def test_global_invariant_must_hold():
class BRecord(PRecord):
__invariant__ = lambda r: (r.x <= r.y, 'y smaller than x')
x = field()
y = field()
BRecord(x=1, y=2)
try:
BRecord(x=2, y=1)
assert False
except InvariantException as e:
assert e.invariant_errors == ('y smaller than x',)
assert e.missing_fields == ()
def test_set_multiple_fields():
a = ARecord(x=1, y='foo')
b = a.set(x=2, y='bar')
assert b == {'x': 2, 'y': 'bar'}
def test_initial_value():
class BRecord(PRecord):
x = field(initial=1)
y = field(initial=2)
a = BRecord()
assert a.x == 1
assert a.y == 2
def test_type_specification_must_be_a_type():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=1)
def test_initial_must_be_of_correct_type():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=int, initial='foo')
def test_invariant_must_be_callable():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(invariant='foo')
def test_global_invariants_are_inherited():
class BRecord(PRecord):
__invariant__ = lambda r: (r.x % r.y == 0, 'modulo')
x = field()
y = field()
class CRecord(BRecord):
__invariant__ = lambda r: (r.x > r.y, 'size')
try:
CRecord(x=5, y=3)
assert False
except InvariantException as e:
assert e.invariant_errors == ('modulo',)
def test_global_invariants_must_be_callable():
with pytest.raises(TypeError):
class CRecord(PRecord):
__invariant__ = 1
def test_repr():
r = ARecord(x=1, y=2)
assert repr(r) == 'ARecord(x=1, y=2)' or repr(r) == 'ARecord(y=2, x=1)'
def test_factory():
class BRecord(PRecord):
x = field(type=int, factory=int)
assert BRecord(x=2.5) == {'x': 2}
def test_factory_must_be_callable():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=int, factory=1)
def test_nested_record_construction():
class BRecord(PRecord):
x = field(int, factory=int)
class CRecord(PRecord):
a = field()
b = field(type=BRecord)
r = CRecord.create({'a': 'foo', 'b': {'x': '5'}})
assert isinstance(r, CRecord)
assert isinstance(r.b, BRecord)
assert r == {'a': 'foo', 'b': {'x': 5}}
def test_pickling():
x = ARecord(x=2.0, y='bar')
y = pickle.loads(pickle.dumps(x, -1))
assert x == y
assert isinstance(y, ARecord)
def test_all_invariant_errors_reported():
class BRecord(PRecord):
x = field(factory=int, invariant=lambda x: (x >= 0, 'x negative'))
y = field(mandatory=True)
class CRecord(PRecord):
a = field(invariant=lambda x: (x != 0, 'a zero'))
b = field(type=BRecord)
try:
CRecord.create({'a': 0, 'b': {'x': -5}})
assert False
except InvariantException as e:
assert set(e.invariant_errors) == set(['x negative', 'a zero'])
assert e.missing_fields == ('BRecord.y',)
def test_precord_factory_method_is_idempotent():
class BRecord(PRecord):
x = field()
y = field()
r = BRecord(x=1, y=2)
assert BRecord.create(r) is r
def test_serialize():
class BRecord(PRecord):
d = field(type=datetime.date,
factory=lambda d: datetime.datetime.strptime(d, "%d%m%Y").date(),
serializer=lambda format, d: d.strftime('%Y-%m-%d') if format == 'ISO' else d.strftime('%d%m%Y'))
assert BRecord(d='14012015').serialize('ISO') == {'d': '2015-01-14'}
assert BRecord(d='14012015').serialize('other') == {'d': '14012015'}
def test_nested_serialize():
class BRecord(PRecord):
d = field(serializer=lambda format, d: format)
class CRecord(PRecord):
b = field()
serialized = CRecord(b=BRecord(d='foo')).serialize('bar')
assert serialized == {'b': {'d': 'bar'}}
assert isinstance(serialized, dict)
def test_serializer_must_be_callable():
with pytest.raises(TypeError):
class CRecord(PRecord):
x = field(serializer=1)
def test_transform_without_update_returns_same_precord():
r = ARecord(x=2.0, y='bar')
assert r.transform([ny], lambda x: x) is r
class Application(PRecord):
name = field(type=(six.text_type,) + six.string_types)
image = field(type=(six.text_type,) + six.string_types)
class ApplicationVector(CheckedPVector):
__type__ = Application
class Node(PRecord):
applications = field(type=ApplicationVector)
def test_nested_create_serialize():
node = Node(applications=[Application(name='myapp', image='myimage'),
Application(name='b', image='c')])
node2 = Node.create({'applications': [{'name': 'myapp', 'image': 'myimage'},
{'name': 'b', 'image': 'c'}]})
assert node == node2
serialized = node.serialize()
restored = Node.create(serialized)
assert restored == node
def test_pset_field_initial_value():
"""
``pset_field`` results in initial value that is empty.
"""
class Record(PRecord):
value = pset_field(int)
assert Record() == Record(value=[])
def test_pset_field_custom_initial():
"""
A custom initial value can be passed in.
"""
class Record(PRecord):
value = pset_field(int, initial=(1, 2))
assert Record() == Record(value=[1, 2])
def test_pset_field_factory():
"""
``pset_field`` has a factory that creates a ``PSet``.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1, 2])
assert isinstance(record.value, PSet)
def test_pset_field_checked_set():
"""
``pset_field`` results in a set that enforces its type.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1, 2])
with pytest.raises(TypeError):
record.value.add("hello")
def test_pset_field_type():
"""
``pset_field`` enforces its type.
"""
class Record(PRecord):
value = pset_field(int)
record = Record()
with pytest.raises(TypeError):
record.set("value", None)
def test_pset_field_mandatory():
"""
``pset_field`` is a mandatory field.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1])
with pytest.raises(InvariantException):
record.remove("value")
def test_pset_field_default_non_optional():
"""
By default ``pset_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pset_field(int)
with pytest.raises(TypeError):
Record(value=None)
def test_pset_field_explicit_non_optional():
"""
If ``optional`` argument is ``False`` then ``pset_field`` is
non-optional, i.e. does not allow ``None``.
"""
class Record(PRecord):
value = pset_field(int, optional=False)
with pytest.raises(TypeError):
Record(value=None)
def test_pset_field_optional():
"""
If ``optional`` argument is true, ``None`` is acceptable alternative
to a set.
"""
class Record(PRecord):
value = pset_field(int, optional=True)
assert ((Record(value=[1, 2]).value, Record(value=None).value) ==
(pset([1, 2]), None))
def test_pset_field_name():
"""
The created set class name is based on the type of items in the set.
"""
class Something(object):
pass
class Record(PRecord):
value = pset_field(Something)
value2 = pset_field(int)
assert ((Record().value.__class__.__name__,
Record().value2.__class__.__name__) ==
("SomethingPSet", "IntPSet"))
def test_pvector_field_initial_value():
"""
``pvector_field`` results in initial value that is empty.
"""
class Record(PRecord):
value = pvector_field(int)
assert Record() == Record(value=[])
def test_pvector_field_custom_initial():
"""
A custom initial value can be passed in.
"""
class Record(PRecord):
value = pvector_field(int, initial=(1, 2))
assert Record() == Record(value=[1, 2])
def test_pvector_field_factory():
"""
``pvector_field`` has a factory that creates a ``PVector``.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1, 2])
assert isinstance(record.value, PVector)
def test_pvector_field_checked_vector():
"""
``pvector_field`` results in a vector that enforces its type.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1, 2])
with pytest.raises(TypeError):
record.value.append("hello")
def test_pvector_field_type():
"""
``pvector_field`` enforces its type.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record()
with pytest.raises(TypeError):
record.set("value", None)
def test_pvector_field_mandatory():
"""
``pvector_field`` is a mandatory field.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1])
with pytest.raises(InvariantException):
record.remove("value")
def test_pvector_field_default_non_optional():
"""
By default ``pvector_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pvector_field(int)
with pytest.raises(TypeError):
Record(value=None)
def test_pvector_field_explicit_non_optional():
"""
If ``optional`` argument is ``False`` then ``pvector_field`` is
non-optional, i.e. does not allow ``None``.
"""
class Record(PRecord):
value = pvector_field(int, optional=False)
with pytest.raises(TypeError):
Record(value=None)
def test_pvector_field_optional():
"""
If ``optional`` argument is true, ``None`` is acceptable alternative
to a sequence.
"""
class Record(PRecord):
value = pvector_field(int, optional=True)
assert ((Record(value=[1, 2]).value, Record(value=None).value) ==
(pvector([1, 2]), None))
def test_pvector_field_name():
"""
The created set class name is based on the type of items in the set.
"""
class Something(object):
pass
class Record(PRecord):
value = pvector_field(Something)
value2 = pvector_field(int)
assert ((Record().value.__class__.__name__,
Record().value2.__class__.__name__) ==
("SomethingPVector", "IntPVector"))
def test_pvector_field_create_from_nested_serialized_data():
class Foo(PRecord):
foo = field(type=str)
class Bar(PRecord):
bar = pvector_field(Foo)
data = Bar(bar=v(Foo(foo="foo")))
Bar.create(data.serialize()) == data
def test_pmap_field_initial_value():
"""
``pmap_field`` results in initial value that is empty.
"""
class Record(PRecord):
value = pmap_field(int, int)
assert Record() == Record(value={})
def test_pmap_field_factory():
"""
``pmap_field`` has a factory that creates a ``PMap``.
"""
class Record(PRecord):
value = pmap_field(int, int)
record = Record(value={1: 1234})
assert isinstance(record.value, PMap)
def test_pmap_field_checked_map_key():
"""
``pmap_field`` results in a map that enforces its key type.
"""
class Record(PRecord):
value = pmap_field(int, type(None))
record = Record(value={1: None})
with pytest.raises(TypeError):
record.value.set("hello", None)
def test_pmap_field_checked_map_value():
"""
``pmap_field`` results in a map that enforces its value type.
"""
class Record(PRecord):
value = pmap_field(int, type(None))
record = Record(value={1: None})
with pytest.raises(TypeError):
record.value.set(2, 4)
def test_pmap_field_mandatory():
"""
``pmap_field`` is a mandatory field.
"""
class Record(PRecord):
value = pmap_field(int, int)
record = Record()
with pytest.raises(InvariantException):
record.remove("value")
def test_pmap_field_default_non_optional():
"""
By default ``pmap_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pmap_field(int, int)
# Ought to be TypeError, but pyrsistent doesn't quite allow that:
with pytest.raises(AttributeError):
Record(value=None)
def test_pmap_field_explicit_non_optional():
"""
If ``optional`` argument is ``False`` then ``pmap_field`` is
non-optional, i.e. does not allow ``None``.
"""
class Record(PRecord):
value = pmap_field(int, int, optional=False)
# Ought to be TypeError, but pyrsistent doesn't quite allow that:
with pytest.raises(AttributeError):
Record(value=None)
def test_pmap_field_optional():
"""
If ``optional`` argument is true, ``None`` is acceptable alternative
to a set.
"""
class Record(PRecord):
value = pmap_field(int, int, optional=True)
assert ((Record(value={1: 2}).value, Record(value=None).value) ==
pmap({1: 2}), None)
def test_pmap_field_name():
"""
The created map class name is based on the types of items in the map.
"""
class Something(object):
pass
class Another(object):
pass
class Record(PRecord):
value = pmap_field(Something, Another)
value2 = pmap_field(int, float)
assert ((Record().value.__class__.__name__,
Record().value2.__class__.__name__) ==
("SomethingAnotherPMap", "IntFloatPMap"))
def test_pmap_field_invariant():
"""
The ``invariant`` parameter is passed through to ``field``.
"""
class Record(PRecord):
value = pmap_field(
int, int,
invariant=(
lambda pmap: (len(pmap) == 1, "Exactly one item required.")
)
)
with pytest.raises(InvariantException):
Record(value={})
with pytest.raises(InvariantException):
Record(value={1: 2, 3: 4})
assert Record(value={1: 2}).value == {1: 2}
def test_pmap_field_create_from_nested_serialized_data():
class Foo(PRecord):
foo = field(type=str)
class Bar(PRecord):
bar = pmap_field(str, Foo)
data = Bar(bar=m(foo_key=Foo(foo="foo")))
Bar.create(data.serialize()) == data
| |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import time
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from oslo_utils import strutils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(client_base.Client):
def __init__(self, volume_list=None, **kwargs):
super(Client, self).__init__(**kwargs)
vfiler = kwargs.get('vfiler', None)
self.connection.set_vfiler(vfiler)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
self.volume_list = volume_list
self._init_features()
def _init_features(self):
super(Client, self)._init_features()
ontapi_version = self.get_ontapi_version() # major, minor
ontapi_1_20 = ontapi_version >= (1, 20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20)
def send_ems_log_message(self, message_dict):
"""Sends a message to the Data ONTAP EMS log."""
# NOTE(cknight): Cannot use deepcopy on the connection context
node_client = copy.copy(self)
node_client.connection = copy.copy(self.connection)
node_client.connection.set_timeout(25)
try:
node_client.connection.set_vfiler(None)
node_client.send_request('ems-autosupport-log', message_dict)
LOG.debug('EMS executed successfully.')
except netapp_api.NaApiError as e:
LOG.warning(_LW('Failed to invoke EMS. %s'), e)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info')
result = self.connection.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
initiator_exists = True
try:
auth_list = netapp_api.NaElement('iscsi-initiator-auth-list-info')
auth_list.add_new_child('initiator', iqn)
self.connection.invoke_successfully(auth_list, True)
except netapp_api.NaApiError:
initiator_exists = False
return initiator_exists
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
wwpns = []
port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info')
result = self.connection.invoke_successfully(port_name_list_api)
port_names = result.get_child_by_name('fcp-port-names')
if port_names:
for port_name_info in port_names.get_children():
wwpn = port_name_info.get_child_content('port-name').lower()
wwpns.append(wwpn)
return wwpns
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name')
result = self.connection.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
command = ("iscsi security add -i %(iqn)s -s CHAP "
"-p %(password)s -n %(username)s") % {
'iqn': iqn,
'password': password,
'username': username,
}
LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn})
try:
ssh_pool = self.ssh_client.ssh_pool
with ssh_pool.item() as ssh:
self.ssh_client.execute_command(ssh, command)
except Exception as e:
msg = _('Failed to set CHAP authentication for target IQN '
'%(iqn)s. Details: %(ex)s') % {
'iqn': iqn,
'ex': e,
}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning(_LW("Error finding LUNs for volume %s."
" Verify volume exists."), vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
return lun_list
def _get_vol_luns(self, vol_name):
"""Gets the LUNs for a volume."""
api = netapp_api.NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.connection.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
igroup_list = []
if not initiator_list:
return igroup_list
initiator_set = set(initiator_list)
igroup_list_info = netapp_api.NaElement('igroup-list-info')
result = self.connection.invoke_successfully(igroup_list_info, True)
initiator_groups = result.get_child_by_name(
'initiator-groups') or netapp_api.NaElement('none')
for initiator_group_info in initiator_groups.get_children():
initiator_set_for_igroup = set()
initiators = initiator_group_info.get_child_by_name(
'initiators') or netapp_api.NaElement('none')
for initiator_info in initiators.get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = {'initiator-group-os-type':
initiator_group_info.get_child_content(
'initiator-group-os-type'),
'initiator-group-type':
initiator_group_info.get_child_content(
'initiator-group-type'),
'initiator-group-name':
initiator_group_info.get_child_content(
'initiator-group-name')}
igroup_list.append(igroup)
return igroup_list
def clone_lun(self, path, clone_path, name, new_name,
space_reserved='true', src_block=0,
dest_block=0, block_count=0, source_snapshot=None):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
zapi_args = {
'source-path': path,
'destination-path': clone_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range =\
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number':
six.text_type(src_block),
'destination-block-number':
six.text_type(dest_block),
'block-count':
six.text_type(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.connection.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = netapp_api.NaElement('clone-list-status')
cl_id = netapp_api.NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_id,
'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.connection.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
lun_info = netapp_api.NaElement.create_node_with_children(
'lun-list-info', **args)
result = self.connection.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in API format."""
vol_request = netapp_api.NaElement('volume-list-info')
res = self.connection.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def get_lun_map(self, path):
lun_map_list = netapp_api.NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
return self.connection.invoke_successfully(lun_map_list, True)
def set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.connection.invoke_successfully(space_res, True)
def get_actual_path_for_export(self, export_path):
"""Gets the actual path on the filer for export path."""
storage_path = netapp_api.NaElement.create_node_with_children(
'nfs-exportfs-storage-path', **{'pathname': export_path})
result = self.connection.invoke_successfully(storage_path,
enable_tunneling=True)
if result.get_child_content('actual-pathname'):
return result.get_child_content('actual-pathname')
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def clone_file(self, src_path, dest_path, source_snapshot=None):
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
zapi_args = {
'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
result = self.connection.invoke_successfully(clone_start,
enable_tunneling=True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
try:
self._wait_for_clone_finish(clone_id, vol_uuid)
except netapp_api.NaApiError as e:
if e.code != 'UnknownCloneId':
self._clear_clone(clone_id)
raise
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
"""Waits till a clone operation is complete or errored out."""
clone_ls_st = netapp_api.NaElement('clone-list-status')
clone_id = netapp_api.NaElement('clone-id')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self.connection.invoke_successfully(clone_ls_st,
enable_tunneling=True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
state = ops_info[0].get_child_content('clone-state')
if state == 'completed':
task_running = False
elif state == 'failed':
code = ops_info[0].get_child_content('error')
reason = ops_info[0].get_child_content('reason')
raise netapp_api.NaApiError(code, reason)
else:
time.sleep(1)
else:
raise netapp_api.NaApiError(
'UnknownCloneId',
'No clone operation for clone id %s found on the filer'
% (clone_id))
def _clear_clone(self, clone_id):
"""Clear the clone information.
Invoke this in case of failed clone.
"""
clone_clear = netapp_api.NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
retry = 3
while retry:
try:
self.connection.invoke_successfully(clone_clear,
enable_tunneling=True)
break
except netapp_api.NaApiError:
# Filer might be rebooting
time.sleep(5)
retry = retry - 1
def get_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = netapp_api.NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):
ifconfig = netapp_api.NaElement('net-ifconfig-get')
return self.connection.invoke_successfully(ifconfig)
def get_flexvol_capacity(self, flexvol_path):
"""Gets total capacity and free capacity, in bytes, of the flexvol."""
api_args = {'volume': flexvol_path, 'verbose': 'false'}
result = self.send_request('volume-list-info', api_args)
flexvol_info_list = result.get_child_by_name('volumes')
flexvol_info = flexvol_info_list.get_children()[0]
size_total = float(flexvol_info.get_child_content('size-total'))
size_available = float(
flexvol_info.get_child_content('size-available'))
return {
'size-total': size_total,
'size-available': size_available,
}
def get_performance_instance_names(self, object_name):
"""Get names of performance instances for a node."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-instance-list-info',
api_args,
enable_tunneling=False)
instance_names = []
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance_info in instances.get_children():
instance_names.append(instance_info.get_child_content('name'))
return instance_names
def get_performance_counters(self, object_name, instance_names,
counter_names):
"""Gets or or more 7-mode Data ONTAP performance counters."""
api_args = {
'objectname': object_name,
'instances': [
{'instance': instance} for instance in instance_names
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
result = self.send_request('perf-object-get-instances',
api_args,
enable_tunneling=False)
counter_data = []
timestamp = result.get_child_content('timestamp')
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance in instances.get_children():
instance_name = instance.get_child_content('name')
counters = instance.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
counter_name = counter.get_child_content('name')
counter_value = counter.get_child_content('value')
counter_data.append({
'instance-name': instance_name,
'timestamp': timestamp,
counter_name: counter_value,
})
return counter_data
def get_system_name(self):
"""Get the name of the 7-mode Data ONTAP controller."""
result = self.send_request('system-get-info',
{},
enable_tunneling=False)
system_info = result.get_child_by_name('system-info')
system_name = system_info.get_child_content('system-name')
return system_name
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
snapshot_list_info = netapp_api.NaElement('snapshot-list-info')
snapshot_list_info.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(snapshot_list_info,
enable_tunneling=True)
snapshots = result.get_child_by_name('snapshots')
if not snapshots:
msg = _('No snapshots could be found on volume %s.')
raise exception.VolumeBackendAPIException(data=msg % volume_name)
snapshot_list = snapshots.get_children()
snapshot = None
for s in snapshot_list:
if (snapshot_name == s.get_child_content('name')) and (snapshot
is None):
snapshot = {
'name': s.get_child_content('name'),
'volume': s.get_child_content('volume'),
'busy': strutils.bool_from_string(
s.get_child_content('busy')),
}
snapshot_owners_list = s.get_child_by_name(
'snapshot-owners-list') or netapp_api.NaElement('none')
snapshot_owners = set([snapshot_owner.get_child_content(
'owner') for snapshot_owner in
snapshot_owners_list.get_children()])
snapshot['owners'] = snapshot_owners
elif (snapshot_name == s.get_child_content('name')) and (
snapshot is not None):
msg = _('Could not find unique snapshot %(snap)s on '
'volume %(vol)s.')
msg_args = {'snap': snapshot_name, 'vol': volume_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
if not snapshot:
raise exception.SnapshotNotFound(snapshot_id=snapshot_name)
return snapshot
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion."""
snapshots = []
for volume_name in volume_list:
api_args = {
'target-name': volume_name,
'target-type': 'volume',
'terse': 'true',
}
result = self.send_request('snapshot-list-info', api_args)
snapshots.extend(
self._parse_snapshot_list_info_result(result, volume_name))
return snapshots
def _parse_snapshot_list_info_result(self, result, volume_name):
snapshots = []
snapshots_elem = result.get_child_by_name(
'snapshots') or netapp_api.NaElement('none')
snapshot_info_list = snapshots_elem.get_children()
for snapshot_info in snapshot_info_list:
snapshot_name = snapshot_info.get_child_content('name')
snapshot_busy = strutils.bool_from_string(
snapshot_info.get_child_content('busy'))
snapshot_id = snapshot_info.get_child_content(
'snapshot-instance-uuid')
if (not snapshot_busy and
snapshot_name.startswith(client_base.DELETED_PREFIX)):
snapshots.append({
'name': snapshot_name,
'instance_id': snapshot_id,
'volume_name': volume_name,
})
return snapshots
| |
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _average_and_variance(values, sample_weight=None):
"""
Compute the (weighted) average and variance.
Parameters
----------
values : array-like of shape = [n_samples] or [n_samples, n_outputs]
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average : float
The weighted average
variance : float
The weighted variance
"""
values = np.asarray(values)
if values.ndim == 1:
values = values.reshape((-1, 1))
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 1:
sample_weight = sample_weight.reshape((-1, 1))
average = np.average(values, weights=sample_weight)
variance = np.average((values - average)**2, weights=sample_weight)
return average, variance
def mean_absolute_error(y_true, y_pred, sample_weight=None):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(np.abs(y_pred - y_true).mean(axis=1),
weights=sample_weight)
def mean_squared_error(y_true, y_pred, sample_weight=None):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(((y_pred - y_true) ** 2).mean(axis=1),
weights=sample_weight)
def explained_variance_score(y_true, y_pred, sample_weight=None):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
_, numerator = _average_and_variance(y_true - y_pred, sample_weight)
_, denominator = _average_and_variance(y_true, sample_weight)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred, sample_weight=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
| |
# import unittest
import unittest
from unittest.mock import Mock, MagicMock
import os
import sys
import asyncio
import xmlrunner
from test import common
from younit import set_test_hang_alarm
from younit import clear_test_hang_alarm
from younit import close_all_threads
from younit import asyncio_test, AsyncMock
import mooq
# @unittest.skip("skipped")
class InMemoryDirectProduceConsumeTest(common.TransportTestCase):
async def async_setUp(self):
await self.GIVEN_InMemoryBrokerStarted("localhost",1234)
await self.GIVEN_ConnectionResourceCreated("localhost",1234,"in_memory")
await self.GIVEN_ChannelResourceCreated()
await self.GIVEN_ProducerRegistered(exchange_name="fake_exch",
exchange_type="direct")
async def async_tearDown(self):
await self.CloseBroker()
# @unittest.skip("skipped")
@asyncio_test
async def test_callback_is_run(self):
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="direct",
routing_keys=["fake_routing_key"],
callback = self.fake_callback)
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_message",
routing_key="fake_routing_key")
await self.WHEN_ProcessEventsNTimes(2)
self.THEN_CallbackReceivesMessage("fake_message")
# @unittest.skip("skipped")
@asyncio_test
async def test_callback_is_not_run_if_routing_key_mismatch(self):
fake_callback = Mock()
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="direct",
routing_keys=["fake_routing_key"],
callback = fake_callback)
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_message",
routing_key="another_routing_key")
await self.WHEN_ProcessEventsOnce()
self.THEN_CallbackIsNotRun(fake_callback)
# @unittest.skip("skipped")
@asyncio_test
async def test_callback_run_if_multiple_routing_keys(self):
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="direct",
routing_keys=["fake_routing_key","fake_routing_key2"],
callback = self.fake_callback)
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_message",
routing_key="fake_routing_key2")
await self.WHEN_ProcessEventsOnce()
self.THEN_CallbackReceivesMessage("fake_message")
# @unittest.skip("skipped")
@asyncio_test
async def test_callback_run_if_two_exclusive_queues_registered(self):
await self.GIVEN_ConsumerRegistered(queue_name = None,
exchange_name="fake_exch",
exchange_type="direct",
routing_keys=["fake_routing_key"],
callback = self.fake_callback)
await self.GIVEN_ConsumerRegistered(queue_name = None,
exchange_name="fake_exch",
exchange_type="direct",
routing_keys=["fake_routing_key"],
callback = self.fake_callback2)
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_message",
routing_key="fake_routing_key")
await self.WHEN_ProcessEventsOnce()
self.THEN_CallbackReceivesMessage("fake_message")
self.THEN_Callback2ReceivesMessage("fake_message")
# @unittest.skip("skipped")
@asyncio_test
async def test_bad_exchange(self):
fake_callback = Mock()
with self.assertRaises(mooq.BadExchange):
await self.WHEN_ConsumerRegistered( queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="fanout",
routing_keys=["fake_routing_key"],
callback = fake_callback)
# @unittest.skip("skipped")
class DirectBadExchangeTest(common.TransportTestCase):
async def async_setUp(self):
await self.GIVEN_InMemoryBrokerStarted("localhost",1234)
await self.GIVEN_ConnectionResourceCreated("localhost",1234,"in_memory")
await self.GIVEN_ChannelResourceCreated()
await self.GIVEN_ProducerRegistered(exchange_name="fake_exch",
exchange_type="direct")
async def async_tearDown(self):
await self.CloseBroker()
# @unittest.skip("skipped")
@asyncio_test
async def test_bad_exchange(self):
with self.assertRaises(mooq.BadExchange):
await self.WHEN_ProducerRegistered(exchange_name="fake_exch",
exchange_type="fanout")
class TopicTestCase(common.TransportTestCase):
async def async_setUp(self):
await self.GIVEN_InMemoryBrokerStarted("localhost",1234)
await self.GIVEN_ConnectionResourceCreated("localhost",1234,"in_memory")
await self.GIVEN_ChannelResourceCreated()
await self.GIVEN_ProducerRegistered(exchange_name="fake_exch",
exchange_type="topic")
async def async_tearDown(self):
await self.CloseBroker()
# @unittest.skip("skipped")
class InMemoryTopicBallColourTest(TopicTestCase):
async def async_setUp(self):
await super().async_setUp()
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="topic",
routing_keys=["ball.*","*.red"],
callback = self.fake_callback)
async def async_tearDown(self):
await super().async_tearDown()
# @unittest.skip("skipped")
@asyncio_test
async def test_ball_yellow(self):
await self.GWT_BallColour_RunsCallback("ball.yellow")
# @unittest.skip("skipped")
@asyncio_test
async def test_ball_red(self):
await self.GWT_BallColour_RunsCallback("ball.red")
# @unittest.skip("skipped")
@asyncio_test
async def test_apple_red(self):
await self.GWT_BallColour_RunsCallback("apple.red")
# @unittest.skip("skipped")
@asyncio_test
async def test_apple_yellow(self):
await self.GWT_BallColour_DoesntRunCallback("apple.yellow")
async def GWT_BallColour(self,*,routing_key,callback_run):
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_msg",
routing_key=routing_key)
await self.WHEN_ProcessEventsOnce()
if callback_run:
self.THEN_CallbackReceivesMessage("fake_msg")
else:
self.THEN_CallbackDoesntReceiveMessage()
async def GWT_BallColour_RunsCallback(self,routing_key):
await self.GWT_BallColour(routing_key=routing_key,callback_run=True)
async def GWT_BallColour_DoesntRunCallback(self,routing_key):
await self.GWT_BallColour(routing_key=routing_key,callback_run=False)
# @unittest.skip("skipped")
class InMemoryTopicAllWildcardsTest(TopicTestCase):
async def async_setUp(self):
await super().async_setUp()
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="topic",
routing_keys=["*.*"],
callback = self.fake_callback)
async def async_tearDown(self):
await super().async_tearDown()
# @unittest.skip("skipped")
@asyncio_test
async def test_runs_callback(self):
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_msg",
routing_key="absolutely.everything")
await self.WHEN_ProcessEventsOnce()
self.THEN_CallbackReceivesMessage("fake_msg")
# @unittest.skip("skipped")
class InMemoryTopicRunTwiceTest(TopicTestCase):
async def async_setUp(self):
await super().async_setUp()
self.fake_callback = AsyncMock()
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue",
exchange_name="fake_exch",
exchange_type="topic",
routing_keys=["*.*","ball.red"],
callback = self.fake_callback)
# @unittest.skip("skipped")
@asyncio_test
async def test_runs_callback_twice(self):
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_msg",
routing_key="ball.red")
await self.WHEN_ProcessEventsNTimes(3)
self.THEN_CallbackIsRun(self.fake_callback,num_times=2)
# @unittest.skip("skipped")
class FanoutTestCase(common.TransportTestCase):
async def async_setUp(self):
await super().async_setUp()
await self.GIVEN_InMemoryBrokerStarted("localhost",1234)
await self.GIVEN_ConnectionResourceCreated("localhost",1234,"in_memory")
await self.GIVEN_ChannelResourceCreated()
await self.GIVEN_ProducerRegistered(exchange_name="fake_exch",
exchange_type="fanout")
async def async_tearDown(self):
await self.CloseBroker()
# @unittest.skip("skipped")
@asyncio_test
async def test_routes_to_all_consumer_queues(self):
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue1",
exchange_name="fake_exch",
exchange_type="fanout",
routing_keys="",
callback = self.fake_callback)
await self.GIVEN_ConsumerRegistered(queue_name="fake_consumer_queue2",
exchange_name="fake_exch",
exchange_type="fanout",
routing_keys="",
callback = self.fake_callback2)
await self.GIVEN_MessagePublished(exchange_name="fake_exch",
msg="fake_msg",
routing_key="")
await self.WHEN_ProcessEventsNTimes(1)
self.THEN_CallbackReceivesMessage("fake_msg")
self.THEN_Callback2ReceivesMessage("fake_msg")
# @unittest.skip("skipped")
class ExchangeDoesntExistTest(common.TransportTestCase):
async def async_setUp(self):
await super().async_setUp()
await self.GIVEN_InMemoryBrokerStarted("localhost",1234)
await self.GIVEN_ConnectionResourceCreated("localhost",1234,"in_memory")
await self.GIVEN_ChannelResourceCreated()
async def async_tearDown(self):
await self.CloseBroker()
# @unittest.skip("skipped")
@asyncio_test
async def test_publish_to_exchange_that_doesnt_exist(self):
with self.assertRaises(mooq.BadExchange):
await self.WHEN_MessagePublished(exchange_name="fake_exch",
msg="fake_message",
routing_key="fake_routing_key")
if __name__ == '__main__':
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='test-reports'),
# these make sure that some options that are not applicable
# remain hidden from the help menu.
failfast=False, buffer=False, catchbreak=False)
| |
#!/usr/bin/env python
"""Tests for host table in search view."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestHostTable(gui_test_lib.SearchClientTestBase):
"""Tests the main content view."""
def setUp(self):
super(TestHostTable, self).setUp()
self.client_ids = self.SetupClients(10)
def testUserLabelIsShownAsBootstrapSuccessLabel(self):
self.AddClientLabel(self.client_ids[0], self.token.username, u"foo")
self.Open("/#/search?q=.")
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label-success:contains('foo')" % self.client_ids[0])
def testSystemLabelIsShownAsRegularBootstrapLabel(self):
self.AddClientLabel(self.client_ids[0], u"GRR", u"bar")
self.Open("/#/search?q=.")
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label:not(.label-success):contains('bar')" % self.client_ids[0])
def testLabelButtonIsDisabledByDefault(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsVisible, "css=button[name=AddLabels][disabled]")
def testLabelButtonIsEnabledWhenClientIsSelected(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsVisible, "css=button[name=AddLabels][disabled]")
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.WaitUntilNot(self.IsVisible, "css=button[name=AddLabels][disabled]")
def testAddClientsLabelsDialogShowsListOfSelectedClients(self):
self.Open("/#/search?q=.")
# Select 3 clients and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[2])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[6])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Check that all 3 client ids are shown in the dialog.
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[2])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[6])
def testLabelIsAppliedCorrectlyViaAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
# Click proceed and check that success message is displayed and that
# proceed button is replaced with close button.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Label was successfully added")
self.WaitUntilNot(
self.IsVisible, "css=*[name=AddClientsLabelsDialog] "
"button[name=Proceed]")
# Click on "Close" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Check that label has appeared in the clients list.
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label-success:contains('issue 42')" % self.client_ids[0])
def testAppliedLabelBecomesSearchableImmediately(self):
self.Open("/#/search?q=.")
# Select 2 clients and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[1])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
# Click proceed and check that success message is displayed and that
# proceed button is replaced with close button.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Label was successfully added")
self.WaitUntilNot(
self.IsVisible, "css=*[name=AddClientsLabelsDialog] "
"button[name=Proceed]")
# Click on "Close" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Search using the new label and check that the labeled clients are shown.
self.Open("/#main=HostTable&q=label:\"issue 42\"")
self.WaitUntil(self.IsTextPresent, "%s" % self.client_ids[0])
self.WaitUntil(self.IsTextPresent, "%s" % self.client_ids[1])
# Now we test if we can remove the label and if the search index is updated.
# Select 1 client and click 'Remove Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=RemoveLabels]:not([disabled])")
# The label should already be prefilled in the dropdown.
self.WaitUntil(self.IsTextPresent, "issue 42")
self.Click("css=*[name=RemoveClientsLabelsDialog] button[name=Proceed]")
# Open client search with label and check that labeled client is not shown
# anymore.
self.Open("/#main=HostTable&q=label:\"issue 42\"")
self.WaitUntil(self.IsTextPresent, self.client_ids[1])
# This client must not be in the results anymore.
self.assertFalse(self.IsTextPresent(self.client_ids[0]))
def testSelectionIsPreservedWhenAddClientsLabelsDialogIsCancelled(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Click on "Cancel" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Ensure that checkbox is still checked
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[0])
def testSelectionIsResetWhenLabelIsAppliedViaAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name, click on "Proceed" and "Close" buttons.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
# Ensure that checkbox is not checked anymore.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
def testCheckAllCheckboxSelectsAllClients(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsTextPresent, self.client_ids[0])
# Check that checkboxes for certain clients are unchecked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[6])
# Click on 'check all checkbox'
self.Click("css=input.client-checkbox.select-all")
# Check that checkboxes for certain clients are now checked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[6])
# Click once more on 'check all checkbox'.
self.Click("css=input.client-checkbox.select-all")
# Check that checkboxes for certain clients are now again unchecked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[6])
def testClientsSelectedWithSelectAllAreShownInAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsTextPresent, self.client_ids[0])
# Click on 'check all checkbox'.
self.Click("css=input.client-checkbox.select-all")
# Click on 'Apply Label' button.
self.Click("css=button[name=AddLabels]:not([disabled])")
# Check that client ids are shown in the dialog.
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[6])
if __name__ == "__main__":
app.run(test_lib.main)
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder.openstack.common import log as logging
from cinder import utils
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('osapi_volume_base_URL',
default=None,
help='Base URL that will be presented to users in links '
'to the OpenStack Volume API',
deprecated_name='osapi_compute_link_prefix'),
]
CONF = cfg.CONF
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1'
# Regex that matches alphanumeric characters, periods, hypens,
# colons and underscores:
# ^ assert position at start of the string
# [\w\.\-\:\_] match expression
# $ assert position at end of the string
VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE)
def validate_key_names(key_names_list):
"""Validate each item of the list to match key name regex."""
for key_name in key_names_list:
if not VALID_KEY_NAME_REGEX.match(key_name):
return False
return True
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_limit_param(request)
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_limit_param(request):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET['limit'])
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.cinder.com/v1.1/123'
Returns: 'http://www.cinder.com/123'
Given: 'http://www.cinder.com/v1.1'
Returns: 'http://www.cinder.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = _('href %s does not contain version') % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
CONF.osapi_volume_base_URL)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable.
The next link is included if:
1) 'limit' param is specified and equals the number of volumes.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of volumes is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of volumes is
CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns links
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
| |
#!/usr/bin/env python
# Copyright 2010-2015 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
r"""Read and write BGZF compressed files (the GZIP variant used in BAM).
The SAM/BAM file format (Sequence Alignment/Map) comes in a plain text
format (SAM), and a compressed binary format (BAM). The latter uses a
modified form of gzip compression called BGZF (Blocked GNU Zip Format),
which can be applied to any file format to provide compression with
efficient random access. BGZF is described together with the SAM/BAM
file format at http://samtools.sourceforge.net/SAM1.pdf
Please read the text below about 'virtual offsets' before using BGZF
files for random access.
Aim of this module
------------------
The Python gzip library can be used to read BGZF files, since for
decompression they are just (specialised) gzip files. What this
module aims to facilitate is random access to BGZF files (using the
'virtual offset' idea), and writing BGZF files (which means using
suitably sized gzip blocks and writing the extra 'BC' field in the
gzip headers). As in the gzip library, the zlib library is used
internally.
In addition to being required for random access to and writing of
BAM files, the BGZF format can also be used on other sequential
data (in the sense of one record after another), such as most of
the sequence data formats supported in Bio.SeqIO (like FASTA,
FASTQ, GenBank, etc) or large MAF alignments.
The Bio.SeqIO indexing functions use this module to support BGZF files.
Technical Introduction to BGZF
------------------------------
The gzip file format allows multiple compressed blocks, each of which
could be a stand alone gzip file. As an interesting bonus, this means
you can use Unix "cat" to combined to gzip files into one by
concatenating them. Also, each block can have one of several compression
levels (including uncompressed, which actually takes up a little bit
more space due to the gzip header).
What the BAM designers realised was that while random access to data
stored in traditional gzip files was slow, breaking the file into
gzip blocks would allow fast random access to each block. To access
a particular piece of the decompressed data, you just need to know
which block it starts in (the offset of the gzip block start), and
how far into the (decompressed) contents of the block you need to
read.
One problem with this is finding the gzip block sizes efficiently.
You can do it with a standard gzip file, but it requires every block
to be decompressed -- and that would be rather slow. Additionally
typical gzip files may use very large blocks.
All that differs in BGZF is that compressed size of each gzip block
is limited to 2^16 bytes, and an extra 'BC' field in the gzip header
records this size. Traditional decompression tools can ignore this,
and unzip the file just like any other gzip file.
The point of this is you can look at the first BGZF block, find out
how big it is from this 'BC' header, and thus seek immediately to
the second block, and so on.
The BAM indexing scheme records read positions using a 64 bit
'virtual offset', comprising coffset << 16 | uoffset, where coffset
is the file offset of the BGZF block containing the start of the read
(unsigned integer using up to 64-16 = 48 bits), and uoffset is the
offset within the (decompressed) block (unsigned 16 bit integer).
This limits you to BAM files where the last block starts by 2^48
bytes, or 256 petabytes, and the decompressed size of each block
is at most 2^16 bytes, or 64kb. Note that this matches the BGZF
'BC' field size which limits the compressed size of each block to
2^16 bytes, allowing for BAM files to use BGZF with no gzip
compression (useful for intermediate files in memory to reduced
CPU load).
Warning about namespaces
------------------------
It is considered a bad idea to use "from XXX import ``*``" in Python, because
it pollutes the namespace. This is a real issue with Bio.bgzf (and the
standard Python library gzip) because they contain a function called open
i.e. Suppose you do this:
>>> from Bio.bgzf import *
>>> print(open.__module__)
Bio.bgzf
Or,
>>> from gzip import *
>>> print(open.__module__)
gzip
Notice that the open function has been replaced. You can "fix" this if you
need to by importing the built-in open function:
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
However, what we recommend instead is to use the explicit namespace, e.g.
>>> from Bio import bgzf
>>> print(bgzf.open.__module__)
Bio.bgzf
Example
-------
This is an ordinary GenBank file compressed using BGZF, so it can
be decompressed using gzip,
>>> import gzip
>>> handle = gzip.open("GenBank/NC_000932.gb.bgz", "r")
>>> assert 0 == handle.tell()
>>> line = handle.readline()
>>> assert 80 == handle.tell()
>>> line = handle.readline()
>>> assert 143 == handle.tell()
>>> data = handle.read(70000)
>>> assert 70143 == handle.tell()
>>> handle.close()
We can also access the file using the BGZF reader - but pay
attention to the file offsets which will be explained below:
>>> handle = BgzfReader("GenBank/NC_000932.gb.bgz", "r")
>>> assert 0 == handle.tell()
>>> print(handle.readline().rstrip())
LOCUS NC_000932 154478 bp DNA circular PLN 15-APR-2009
>>> assert 80 == handle.tell()
>>> print(handle.readline().rstrip())
DEFINITION Arabidopsis thaliana chloroplast, complete genome.
>>> assert 143 == handle.tell()
>>> data = handle.read(70000)
>>> assert 987828735 == handle.tell()
>>> print(handle.readline().rstrip())
f="GeneID:844718"
>>> print(handle.readline().rstrip())
CDS complement(join(84337..84771,85454..85843))
>>> offset = handle.seek(make_virtual_offset(55074, 126))
>>> print(handle.readline().rstrip())
68521 tatgtcattc gaaattgtat aaagacaact cctatttaat agagctattt gtgcaagtat
>>> handle.close()
Notice the handle's offset looks different as a BGZF file. This
brings us to the key point about BGZF, which is the block structure:
>>> handle = open("GenBank/NC_000932.gb.bgz", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 15073; data start 0, data length 65536
Raw start 15073, raw length 17857; data start 65536, data length 65536
Raw start 32930, raw length 22144; data start 131072, data length 65536
Raw start 55074, raw length 22230; data start 196608, data length 65536
Raw start 77304, raw length 14939; data start 262144, data length 43478
Raw start 92243, raw length 28; data start 305622, data length 0
>>> handle.close()
In this example the first three blocks are 'full' and hold 65536 bytes
of uncompressed data. The fourth block isn't full and holds 43478 bytes.
Finally there is a special empty fifth block which takes 28 bytes on
disk and serves as an 'end of file' (EOF) marker. If this is missing,
it is possible your BGZF file is incomplete.
By reading ahead 70,000 bytes we moved into the second BGZF block,
and at that point the BGZF virtual offsets start to look different
to a simple offset into the decompressed data as exposed by the gzip
library.
As an example, consider seeking to the decompressed position 196734.
Since 196734 = 65536 + 65536 + 65536 + 126 = 65536*3 + 126, this
is equivalent to jumping the first three blocks (which in this
specific example are all size 65536 after decompression - which
does not always hold) and starting at byte 126 of the fourth block
(after decompression). For BGZF, we need to know the fourth block's
offset of 55074 and the offset within the block of 126 to get the
BGZF virtual offset.
>>> print(55074 << 16 | 126)
3609329790
>>> print(bgzf.make_virtual_offset(55074, 126))
3609329790
Thus for this BGZF file, decompressed position 196734 corresponds
to the virtual offset 3609329790. However, another BGZF file with
different contents would have compressed more or less efficiently,
so the compressed blocks would be different sizes. What this means
is the mapping between the uncompressed offset and the compressed
virtual offset depends on the BGZF file you are using.
If you are accessing a BGZF file via this module, just use the
handle.tell() method to note the virtual offset of a position you
may later want to return to using handle.seek().
The catch with BGZF virtual offsets is while they can be compared
(which offset comes first in the file), you cannot safely subtract
them to get the size of the data between them, nor add/subtract
a relative offset.
Of course you can parse this file with Bio.SeqIO using BgzfReader,
although there isn't any benefit over using gzip.open(...), unless
you want to index BGZF compressed sequence files:
>>> from Bio import SeqIO
>>> handle = BgzfReader("GenBank/NC_000932.gb.bgz")
>>> record = SeqIO.read(handle, "genbank")
>>> handle.close()
>>> print(record.id)
NC_000932.1
"""
from __future__ import print_function
import sys
import zlib
import struct
from Bio._py3k import _as_bytes, _as_string
from Bio._py3k import open as _open
# For Python 2 can just use: _bgzf_magic = '\x1f\x8b\x08\x04'
# but need to use bytes on Python 3
_bgzf_magic = b"\x1f\x8b\x08\x04"
_bgzf_header = b"\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00\x42\x43\x02\x00"
_bgzf_eof = b"\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00BC\x02\x00\x1b\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00"
_bytes_BC = b"BC"
def open(filename, mode="rb"):
"""Open a BGZF file for reading, writing or appending."""
if "r" in mode.lower():
return BgzfReader(filename, mode)
elif "w" in mode.lower() or "a" in mode.lower():
return BgzfWriter(filename, mode)
else:
raise ValueError("Bad mode %r" % mode)
def make_virtual_offset(block_start_offset, within_block_offset):
"""Compute a BGZF virtual offset from block start and within block offsets.
The BAM indexing scheme records read positions using a 64 bit
'virtual offset', comprising in C terms:
block_start_offset << 16 | within_block_offset
Here block_start_offset is the file offset of the BGZF block
start (unsigned integer using up to 64-16 = 48 bits), and
within_block_offset within the (decompressed) block (unsigned
16 bit integer).
>>> make_virtual_offset(0, 0)
0
>>> make_virtual_offset(0, 1)
1
>>> make_virtual_offset(0, 2**16 - 1)
65535
>>> make_virtual_offset(0, 2**16)
Traceback (most recent call last):
...
ValueError: Require 0 <= within_block_offset < 2**16, got 65536
>>> 65536 == make_virtual_offset(1, 0)
True
>>> 65537 == make_virtual_offset(1, 1)
True
>>> 131071 == make_virtual_offset(1, 2**16 - 1)
True
>>> 6553600000 == make_virtual_offset(100000, 0)
True
>>> 6553600001 == make_virtual_offset(100000, 1)
True
>>> 6553600010 == make_virtual_offset(100000, 10)
True
>>> make_virtual_offset(2**48, 0)
Traceback (most recent call last):
...
ValueError: Require 0 <= block_start_offset < 2**48, got 281474976710656
"""
if within_block_offset < 0 or within_block_offset >= 65536:
raise ValueError("Require 0 <= within_block_offset < 2**16, got %i" %
within_block_offset)
if block_start_offset < 0 or block_start_offset >= 281474976710656:
raise ValueError("Require 0 <= block_start_offset < 2**48, got %i" %
block_start_offset)
return (block_start_offset << 16) | within_block_offset
def split_virtual_offset(virtual_offset):
"""Divides a 64-bit BGZF virtual offset into block start & within block offsets.
>>> (100000, 0) == split_virtual_offset(6553600000)
True
>>> (100000, 10) == split_virtual_offset(6553600010)
True
"""
start = virtual_offset >> 16
return start, virtual_offset ^ (start << 16)
def BgzfBlocks(handle):
"""Low level debugging function to inspect BGZF blocks.
Expects a BGZF compressed file opened in binary read mode using
the builtin open function. Do not use a handle from this bgzf
module or the gzip module's open function which will decompress
the file.
Returns the block start offset (see virtual offsets), the block
length (add these for the start of the next block), and the
decompressed length of the blocks contents (limited to 65536 in
BGZF), as an iterator - one tuple per BGZF block.
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
>>> handle = open("SamBam/ex1.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 18239; data start 0, data length 65536
Raw start 18239, raw length 18223; data start 65536, data length 65536
Raw start 36462, raw length 18017; data start 131072, data length 65536
Raw start 54479, raw length 17342; data start 196608, data length 65536
Raw start 71821, raw length 17715; data start 262144, data length 65536
Raw start 89536, raw length 17728; data start 327680, data length 65536
Raw start 107264, raw length 17292; data start 393216, data length 63398
Raw start 124556, raw length 28; data start 456614, data length 0
>>> handle.close()
Indirectly we can tell this file came from an old version of
samtools because all the blocks (except the final one and the
dummy empty EOF marker block) are 65536 bytes. Later versions
avoid splitting a read between two blocks, and give the header
its own block (useful to speed up replacing the header). You
can see this in ex1_refresh.bam created using samtools 0.1.18:
samtools view -b ex1.bam > ex1_refresh.bam
>>> handle = open("SamBam/ex1_refresh.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 53; data start 0, data length 38
Raw start 53, raw length 18195; data start 38, data length 65434
Raw start 18248, raw length 18190; data start 65472, data length 65409
Raw start 36438, raw length 18004; data start 130881, data length 65483
Raw start 54442, raw length 17353; data start 196364, data length 65519
Raw start 71795, raw length 17708; data start 261883, data length 65411
Raw start 89503, raw length 17709; data start 327294, data length 65466
Raw start 107212, raw length 17390; data start 392760, data length 63854
Raw start 124602, raw length 28; data start 456614, data length 0
>>> handle.close()
The above example has no embedded SAM header (thus the first block
is very small at just 38 bytes of decompressed data), while the next
example does (a larger block of 103 bytes). Notice that the rest of
the blocks show the same sizes (they contain the same read data):
>>> handle = open("SamBam/ex1_header.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 104; data start 0, data length 103
Raw start 104, raw length 18195; data start 103, data length 65434
Raw start 18299, raw length 18190; data start 65537, data length 65409
Raw start 36489, raw length 18004; data start 130946, data length 65483
Raw start 54493, raw length 17353; data start 196429, data length 65519
Raw start 71846, raw length 17708; data start 261948, data length 65411
Raw start 89554, raw length 17709; data start 327359, data length 65466
Raw start 107263, raw length 17390; data start 392825, data length 63854
Raw start 124653, raw length 28; data start 456679, data length 0
>>> handle.close()
"""
data_start = 0
while True:
start_offset = handle.tell()
# This may raise StopIteration which is perfect here
block_length, data = _load_bgzf_block(handle)
data_len = len(data)
yield start_offset, block_length, data_start, data_len
data_start += data_len
def _load_bgzf_block(handle, text_mode=False):
"""Internal function to load the next BGZF function (PRIVATE)."""
magic = handle.read(4)
if not magic:
# End of file
raise StopIteration
if magic != _bgzf_magic:
raise ValueError(r"A BGZF (e.g. a BAM file) block should start with "
r"%r, not %r; handle.tell() now says %r"
% (_bgzf_magic, magic, handle.tell()))
gzip_mod_time, gzip_extra_flags, gzip_os, extra_len = \
struct.unpack("<LBBH", handle.read(8))
block_size = None
x_len = 0
while x_len < extra_len:
subfield_id = handle.read(2)
subfield_len = struct.unpack("<H", handle.read(2))[0] # uint16_t
subfield_data = handle.read(subfield_len)
x_len += subfield_len + 4
if subfield_id == _bytes_BC:
assert subfield_len == 2, "Wrong BC payload length"
assert block_size is None, "Two BC subfields?"
block_size = struct.unpack("<H", subfield_data)[0] + 1 # uint16_t
assert x_len == extra_len, (x_len, extra_len)
assert block_size is not None, "Missing BC, this isn't a BGZF file!"
# Now comes the compressed data, CRC, and length of uncompressed data.
deflate_size = block_size - 1 - extra_len - 19
d = zlib.decompressobj(-15) # Negative window size means no headers
data = d.decompress(handle.read(deflate_size)) + d.flush()
expected_crc = handle.read(4)
expected_size = struct.unpack("<I", handle.read(4))[0]
assert expected_size == len(data), \
"Decompressed to %i, not %i" % (len(data), expected_size)
# Should cope with a mix of Python platforms...
crc = zlib.crc32(data)
if crc < 0:
crc = struct.pack("<i", crc)
else:
crc = struct.pack("<I", crc)
assert expected_crc == crc, \
"CRC is %s, not %s" % (crc, expected_crc)
if text_mode:
return block_size, _as_string(data)
else:
return block_size, data
class BgzfReader(object):
r"""BGZF reader, acts like a read only handle but seek/tell differ.
Let's use the BgzfBlocks function to have a peak at the BGZF blocks
in an example BAM file,
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
>>> handle = open("SamBam/ex1.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 18239; data start 0, data length 65536
Raw start 18239, raw length 18223; data start 65536, data length 65536
Raw start 36462, raw length 18017; data start 131072, data length 65536
Raw start 54479, raw length 17342; data start 196608, data length 65536
Raw start 71821, raw length 17715; data start 262144, data length 65536
Raw start 89536, raw length 17728; data start 327680, data length 65536
Raw start 107264, raw length 17292; data start 393216, data length 63398
Raw start 124556, raw length 28; data start 456614, data length 0
>>> handle.close()
Now let's see how to use this block information to jump to
specific parts of the decompressed BAM file:
>>> handle = BgzfReader("SamBam/ex1.bam", "rb")
>>> assert 0 == handle.tell()
>>> magic = handle.read(4)
>>> assert 4 == handle.tell()
So far nothing so strange, we got the magic marker used at the
start of a decompressed BAM file, and the handle position makes
sense. Now however, let's jump to the end of this block and 4
bytes into the next block by reading 65536 bytes,
>>> data = handle.read(65536)
>>> len(data)
65536
>>> assert 1195311108 == handle.tell()
Expecting 4 + 65536 = 65540 were you? Well this is a BGZF 64-bit
virtual offset, which means:
>>> split_virtual_offset(1195311108)
(18239, 4)
You should spot 18239 as the start of the second BGZF block, while
the 4 is the offset into this block. See also make_virtual_offset,
>>> make_virtual_offset(18239, 4)
1195311108
Let's jump back to almost the start of the file,
>>> make_virtual_offset(0, 2)
2
>>> handle.seek(2)
2
>>> handle.close()
Note that you can use the max_cache argument to limit the number of
BGZF blocks cached in memory. The default is 100, and since each
block can be up to 64kb, the default cache could take up to 6MB of
RAM. The cache is not important for reading through the file in one
pass, but is important for improving performance of random access.
"""
def __init__(self, filename=None, mode="r", fileobj=None, max_cache=100):
# TODO - Assuming we can seek, check for 28 bytes EOF empty block
# and if missing warn about possible truncation (as in samtools)?
if max_cache < 1:
raise ValueError("Use max_cache with a minimum of 1")
# Must open the BGZF file in binary mode, but we may want to
# treat the contents as either text or binary (unicode or
# bytes under Python 3)
if fileobj:
assert filename is None
handle = fileobj
assert "b" in handle.mode.lower()
else:
if "w" in mode.lower() or "a" in mode.lower():
raise ValueError("Must use read mode (default), not write or append mode")
handle = _open(filename, "rb")
self._text = "b" not in mode.lower()
if self._text:
self._newline = "\n"
else:
self._newline = b"\n"
self._handle = handle
self.max_cache = max_cache
self._buffers = {}
self._block_start_offset = None
self._block_raw_length = None
self._load_block(handle.tell())
def _load_block(self, start_offset=None):
if start_offset is None:
# If the file is being read sequentially, then _handle.tell()
# should be pointing at the start of the next block.
# However, if seek has been used, we can't assume that.
start_offset = self._block_start_offset + self._block_raw_length
if start_offset == self._block_start_offset:
self._within_block_offset = 0
return
elif start_offset in self._buffers:
# Already in cache
self._buffer, self._block_raw_length = self._buffers[start_offset]
self._within_block_offset = 0
self._block_start_offset = start_offset
return
# Must hit the disk... first check cache limits,
while len(self._buffers) >= self.max_cache:
# TODO - Implemente LRU cache removal?
self._buffers.popitem()
# Now load the block
handle = self._handle
if start_offset is not None:
handle.seek(start_offset)
self._block_start_offset = handle.tell()
try:
block_size, self._buffer = _load_bgzf_block(handle, self._text)
except StopIteration:
# EOF
block_size = 0
if self._text:
self._buffer = ""
else:
self._buffer = b""
self._within_block_offset = 0
self._block_raw_length = block_size
# Finally save the block in our cache,
self._buffers[self._block_start_offset] = self._buffer, block_size
def tell(self):
"""Returns a 64-bit unsigned BGZF virtual offset."""
if 0 < self._within_block_offset and \
self._within_block_offset == len(self._buffer):
# Special case where we're right at the end of a (non empty) block.
# For non-maximal blocks could give two possible virtual offsets,
# but for a maximal block can't use 65536 as the within block
# offset. Therefore for consistency, use the next block and a
# within block offset of zero.
return (self._block_start_offset + self._block_raw_length) << 16
else:
# return make_virtual_offset(self._block_start_offset,
# self._within_block_offset)
# TODO - Include bounds checking as in make_virtual_offset?
return (self._block_start_offset << 16) | self._within_block_offset
def seek(self, virtual_offset):
"""Seek to a 64-bit unsigned BGZF virtual offset."""
# Do this inline to avoid a function call,
# start_offset, within_block = split_virtual_offset(virtual_offset)
start_offset = virtual_offset >> 16
within_block = virtual_offset ^ (start_offset << 16)
if start_offset != self._block_start_offset:
# Don't need to load the block if already there
# (this avoids a function call since _load_block would do nothing)
self._load_block(start_offset)
assert start_offset == self._block_start_offset
if within_block > len(self._buffer):
if not (within_block == 0 and len(self._buffer) == 0):
raise ValueError("Within offset %i but block size only %i"
% (within_block, len(self._buffer)))
self._within_block_offset = within_block
# assert virtual_offset == self.tell(), \
# "Did seek to %i (%i, %i), but tell says %i (%i, %i)" \
# % (virtual_offset, start_offset, within_block,
# self.tell(), self._block_start_offset,
# self._within_block_offset)
return virtual_offset
def read(self, size=-1):
if size < 0:
raise NotImplementedError("Don't be greedy, that could be massive!")
elif size == 0:
if self._text:
return ""
else:
return b""
elif self._within_block_offset + size <= len(self._buffer):
# This may leave us right at the end of a block
# (lazy loading, don't load the next block unless we have too)
data = self._buffer[self._within_block_offset:self._within_block_offset + size]
self._within_block_offset += size
assert data # Must be at least 1 byte
return data
else:
data = self._buffer[self._within_block_offset:]
size -= len(data)
self._load_block() # will reset offsets
# TODO - Test with corner case of an empty block followed by
# a non-empty block
if not self._buffer:
return data # EOF
elif size:
# TODO - Avoid recursion
return data + self.read(size)
else:
# Only needed the end of the last block
return data
def readline(self):
i = self._buffer.find(self._newline, self._within_block_offset)
# Three cases to consider,
if i == -1:
# No newline, need to read in more data
data = self._buffer[self._within_block_offset:]
self._load_block() # will reset offsets
if not self._buffer:
return data # EOF
else:
# TODO - Avoid recursion
return data + self.readline()
elif i + 1 == len(self._buffer):
# Found new line, but right at end of block (SPECIAL)
data = self._buffer[self._within_block_offset:]
# Must now load the next block to ensure tell() works
self._load_block() # will reset offsets
assert data
return data
else:
# Found new line, not at end of block (easy case, no IO)
data = self._buffer[self._within_block_offset:i + 1]
self._within_block_offset = i + 1
# assert data.endswith(self._newline)
return data
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def __iter__(self):
return self
def close(self):
self._handle.close()
self._buffer = None
self._block_start_offset = None
self._buffers = None
def seekable(self):
return True
def isatty(self):
return False
def fileno(self):
return self._handle.fileno()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class BgzfWriter(object):
def __init__(self, filename=None, mode="w", fileobj=None, compresslevel=6):
if fileobj:
assert filename is None
handle = fileobj
else:
if "w" not in mode.lower() and "a" not in mode.lower():
raise ValueError("Must use write or append mode, not %r" % mode)
if "a" in mode.lower():
handle = _open(filename, "ab")
else:
handle = _open(filename, "wb")
self._text = "b" not in mode.lower()
self._handle = handle
self._buffer = b""
self.compresslevel = compresslevel
def _write_block(self, block):
# print("Saving %i bytes" % len(block))
start_offset = self._handle.tell()
assert len(block) <= 65536
# Giving a negative window bits means no gzip/zlib headers,
# -15 used in samtools
c = zlib.compressobj(self.compresslevel,
zlib.DEFLATED,
-15,
zlib.DEF_MEM_LEVEL,
0)
compressed = c.compress(block) + c.flush()
del c
assert len(compressed) < 65536, \
"TODO - Didn't compress enough, try less data in this block"
crc = zlib.crc32(block)
# Should cope with a mix of Python platforms...
if crc < 0:
crc = struct.pack("<i", crc)
else:
crc = struct.pack("<I", crc)
bsize = struct.pack("<H", len(compressed) + 25) # includes -1
crc = struct.pack("<I", zlib.crc32(block) & 0xffffffff)
uncompressed_length = struct.pack("<I", len(block))
# Fixed 16 bytes,
# gzip magic bytes (4) mod time (4),
# gzip flag (1), os (1), extra length which is six (2),
# sub field which is BC (2), sub field length of two (2),
# Variable data,
# 2 bytes: block length as BC sub field (2)
# X bytes: the data
# 8 bytes: crc (4), uncompressed data length (4)
data = _bgzf_header + bsize + compressed + crc + uncompressed_length
self._handle.write(data)
def write(self, data):
# TODO - Check bytes vs unicode
data = _as_bytes(data)
# block_size = 2**16 = 65536
data_len = len(data)
if len(self._buffer) + data_len < 65536:
# print("Cached %r" % data)
self._buffer += data
return
else:
# print("Got %r, writing out some data..." % data)
self._buffer += data
while len(self._buffer) >= 65536:
self._write_block(self._buffer[:65536])
self._buffer = self._buffer[65536:]
def flush(self):
while len(self._buffer) >= 65536:
self._write_block(self._buffer[:65535])
self._buffer = self._buffer[65535:]
self._write_block(self._buffer)
self._buffer = b""
self._handle.flush()
def close(self):
"""Flush data, write 28 bytes BGZF EOF marker, and close BGZF file.
samtools will look for a magic EOF marker, just a 28 byte empty BGZF
block, and if it is missing warns the BAM file may be truncated. In
addition to samtools writing this block, so too does bgzip - so this
implementation does too.
"""
if self._buffer:
self.flush()
self._handle.write(_bgzf_eof)
self._handle.flush()
self._handle.close()
def tell(self):
"""Returns a BGZF 64-bit virtual offset."""
return make_virtual_offset(self._handle.tell(), len(self._buffer))
def seekable(self):
# Not seekable, but we do support tell...
return False
def isatty(self):
return False
def fileno(self):
return self._handle.fileno()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
print("Call this with no arguments and pipe uncompressed data in on stdin")
print("and it will produce BGZF compressed data on stdout. e.g.")
print("")
print("./bgzf.py < example.fastq > example.fastq.bgz")
print("")
print("The extension convention of *.bgz is to distinugish these from *.gz")
print("used for standard gzipped files without the block structure of BGZF.")
print("You can use the standard gunzip command to decompress BGZF files,")
print("if it complains about the extension try something like this:")
print("")
print("cat example.fastq.bgz | gunzip > example.fastq")
print("")
print("See also the tool bgzip that comes with samtools")
sys.exit(0)
sys.stderr.write("Producing BGZF output from stdin...\n")
w = BgzfWriter(fileobj=sys.stdout)
while True:
data = sys.stdin.read(65536)
w.write(data)
if not data:
break
# Doing close with write an empty BGZF block as EOF marker:
w.close()
sys.stderr.write("BGZF data produced\n")
| |
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListInterDCInterface(command.Lister):
def get_parser(self, prog_name):
parser = super(ListInterDCInterface, self).get_parser(prog_name)
parser.add_argument(
'--description',
metavar="description",
help="filter by description")
parser.add_argument(
'--gw_vipv4',
metavar="gw_vipv4",
help="filter by gateway ipv4")
parser.add_argument(
'--gw_vipv6',
metavar="gw_vipv6",
help="filter by gateway ipv6")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--interdc_gw_id',
metavar="interdc_gw_id",
help="filter by internet gateway id")
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
parser.add_argument(
'--netmask',
metavar="netmask",
help="filter by netmask")
parser.add_argument(
'--primary_ipv4',
metavar="primary_ipv4",
help="filter by primary ipv4")
parser.add_argument(
'--primary_ipv6',
metavar="primary_ipv6",
help="filter by primary ipv6")
parser.add_argument(
'--secondary_ipv4',
metavar="secondary_ipv4",
help="filter by secondary ipv4")
parser.add_argument(
'--secondary_ipv6',
metavar="secondary_ipv6",
help="filter by secondary ipv6")
parser.add_argument(
'--status',
metavar="status",
help="filter by status")
parser.add_argument(
'--vrid',
metavar="vrid",
help="filter by vrid")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'status',
)
column_headers = (
'ID',
'Name',
'Status',
)
search_opts = {}
if parsed_args.description:
search_opts.update({"description": parsed_args.description})
if parsed_args.gw_vipv4:
search_opts.update({"gw_vipv4": parsed_args.gw_vipv4})
if parsed_args.gw_vipv6:
search_opts.update({"gw_vipv6": parsed_args.gw_vipv6})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.interdc_gw_id:
search_opts.update({"interdc_gw_id": parsed_args.interdc_gw_id})
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
if parsed_args.netmask:
search_opts.update({"netmask": parsed_args.netmask})
if parsed_args.primary_ipv4:
search_opts.update({"primary_ipv4": parsed_args.primary_ipv4})
if parsed_args.primary_ipv6:
search_opts.update({"primary_ipv6": parsed_args.primary_ipv6})
if parsed_args.secondary_ipv4:
search_opts.update({"secondary_ipv4": parsed_args.secondary_ipv4})
if parsed_args.secondary_ipv6:
search_opts.update({"secondary_ipv6": parsed_args.secondary_ipv6})
if parsed_args.status:
search_opts.update({"status": parsed_args.status})
if parsed_args.vrid:
search_opts.update({"vrid": parsed_args.vrid})
data = [
to_obj.InterDCInterface(idcif)
for idcif in network_client.list_interdc_interfaces(
**search_opts).get('interdc_interfaces')
]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowInterDCInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowInterDCInterface, self).get_parser(prog_name)
parser.add_argument(
'interdc_interface_id',
metavar="INTERDC_INTERFACE_ID",
help="ID of Inter Data Center Interface to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
interdc_interface_id = parsed_args.interdc_interface_id
dic = network_client.show_interdc_interface(interdc_interface_id).get('interdc_interface')
columns = utils.get_columns(dic)
obj = to_obj.InterDCInterface(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class CreateInterDCInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateInterDCInterface, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<string>',
help='Name of InterDC interface to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of InterDC interface to create.')
parser.add_argument(
'--interdc_gw_id',
metavar='INTERDC_GATEWAY_ID',
required=True,
help='InterDC Gateway ID of Gateway interface to create.')
parser.add_argument(
'--netmask',
metavar='NETMASK',
type=int,
required=True,
help='Netmask of Gateway interface to create.')
parser.add_argument(
'--primary_ipv4',
metavar='<ipv4>',
required=True,
help='Primary IPv4 of Gateway interface to create.')
parser.add_argument(
'--secondary_ipv4',
metavar='<ipv4>',
required=True,
help='Secondary IPv4 of Gateway interface to create.')
parser.add_argument(
'--gw_vipv4',
metavar='<ipv4>',
required=True,
help='Secondary IPv4 of Gateway interface to create.')
parser.add_argument(
'--vrid',
metavar='VRID',
type=int,
required=True,
help='VRRP ID of InterDC interface to create.')
parser.add_argument(
'--primary_ipv6',
metavar='<ipv6>',
help='Primary IPv6 of Gateway interface to create.')
parser.add_argument(
'--secondary_ipv6',
metavar='<ipv6>',
help='Secondary IPv6 of Gateway interface to create.')
parser.add_argument(
'--gw_vipv6',
metavar='<ipv6>',
help='Secondary IPv6 of Gateway interface to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'interdc_interface': {}}
utils.update_dict(
parsed_args,
body['interdc_interface'],
['name', 'description', 'netmask',
'vrid', 'interdc_gw_id',
'primary_ipv4', 'secondary_ipv4', 'gw_vipv4',
'primary_ipv6', 'secondary_ipv6', 'gw_vipv6'])
dic = network_client.create_interdc_interface(body).get('interdc_interface')
columns = utils.get_columns(dic)
obj = to_obj.InterDCInterface(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class SetInterDCInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetInterDCInterface, self).get_parser(prog_name)
parser.add_argument(
'interdc_interface_id',
metavar='<string>',
help='ID of InterDC Interface to update.')
parser.add_argument(
'--name',
metavar='<string>',
help='Name of InterDC interface to update.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of InterDC interface to update.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'interdc_interface': {}}
interdc_interface_id = parsed_args.interdc_interface_id
utils.update_dict(
parsed_args,
body['interdc_interface'],
['name', 'description'])
dic = network_client.update_interdc_interface(
interdc_interface_id, body=body).get('interdc_interface')
columns = utils.get_columns(dic)
obj = to_obj.InterDCInterface(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class DeleteInterDCInterface(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteInterDCInterface, self).get_parser(prog_name)
parser.add_argument(
'interdc_interface_id',
metavar="<uuid>",
nargs="+",
help="ID(s) of InterDC Interface to delete."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
for idcif_id in parsed_args.interdc_interface_id:
network_client.delete_interdc_interface(idcif_id)
| |
# Copyright (c) 2016, MapR Technologies
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Share driver for MapR-FS distributed file system.
"""
import math
import os
from oslo_config import cfg
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
from manila import context
from manila import exception
from manila.i18n import _
from manila.share import api
from manila.share import driver
from manila.share.drivers.maprfs import driver_util as mapru
LOG = log.getLogger(__name__)
maprfs_native_share_opts = [
cfg.ListOpt('maprfs_clinode_ip',
help='The list of IPs or hostnames of nodes where mapr-core '
'is installed.'),
cfg.PortOpt('maprfs_ssh_port',
default=22,
help='CLDB node SSH port.'),
cfg.StrOpt('maprfs_ssh_name',
default="mapr",
help='Cluster admin user ssh login name.'),
cfg.StrOpt('maprfs_ssh_pw',
help='Cluster node SSH login password, '
'This parameter is not necessary, if '
'\'maprfs_ssh_private_key\' is configured.'),
cfg.StrOpt('maprfs_ssh_private_key',
help='Path to SSH private '
'key for login.'),
cfg.StrOpt('maprfs_base_volume_dir',
default='/',
help='Path in MapRFS where share volumes must be created.'),
cfg.ListOpt('maprfs_zookeeper_ip',
help='The list of IPs or hostnames of ZooKeeper nodes.'),
cfg.ListOpt('maprfs_cldb_ip',
help='The list of IPs or hostnames of CLDB nodes.'),
cfg.BoolOpt('maprfs_rename_managed_volume',
default=True,
help='Specify whether existing volume should be renamed when'
' start managing.'),
]
CONF = cfg.CONF
CONF.register_opts(maprfs_native_share_opts)
class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""MapR-FS Share Driver.
Executes commands relating to shares.
driver_handles_share_servers must be False because this driver does not
support creating or managing virtual storage servers (share servers)
API version history:
1.0 - Initial Version
"""
def __init__(self, *args, **kwargs):
super(MapRFSNativeShareDriver, self).__init__(False, *args, **kwargs)
self.configuration.append_config_values(maprfs_native_share_opts)
self.backend_name = self.configuration.safe_get(
'share_backend_name') or 'MapR-FS-Native'
self._base_volume_dir = self.configuration.safe_get(
'maprfs_base_volume_dir') or '/'
self._maprfs_util = None
self._maprfs_base_path = "maprfs://"
self.cldb_ip = self.configuration.maprfs_cldb_ip or []
self.zookeeper_ip = self.configuration.maprfs_zookeeper_ip or []
self.rename_volume = self.configuration.maprfs_rename_managed_volume
self.api = api.API()
def do_setup(self, context):
"""Do initialization while the share driver starts."""
super(MapRFSNativeShareDriver, self).do_setup(context)
self._maprfs_util = mapru.get_version_handler(self.configuration)
def _share_dir(self, share_name):
return os.path.join(self._base_volume_dir, share_name)
def _volume_name(self, share_name):
return share_name
def _get_share_path(self, share):
return share['export_location']
def _get_snapshot_path(self, snapshot):
share_dir = snapshot['share_instance']['export_location'].split(
' ')[0][len(self._maprfs_base_path):]
return os.path.join(share_dir, '.snapshot',
snapshot['provider_location'] or snapshot['name'])
def _get_volume_name(self, context, share):
metadata = self.api.get_share_metadata(context,
{'id': share['share_id']})
return metadata.get('_name', self._volume_name(share['name']))
def _get_share_export_locations(self, share, path=None):
"""Return share path on storage provider."""
cluster_name = self._maprfs_util.get_cluster_name()
path = '%(path)s -C %(cldb)s -Z %(zookeeper)s -N %(name)s' % {
'path': self._maprfs_base_path + (
path or self._share_dir(share['name'])),
'cldb': ' '.join(self.cldb_ip),
'zookeeper': ' '.join(self.zookeeper_ip),
'name': cluster_name
}
export_list = [{
"path": path,
"is_admin_only": False,
"metadata": {
"cldb": ','.join(self.cldb_ip),
"zookeeper": ','.join(self.zookeeper_ip),
"cluster-name": cluster_name,
},
}]
return export_list
def _create_share(self, share, metadata, context):
"""Creates a share."""
if share['share_proto'].lower() != 'maprfs':
msg = _('Only MapRFS protocol supported!')
LOG.error(msg)
raise exception.MapRFSException(msg=msg)
options = {k[1:]: v for k, v in metadata.items() if k[0] == '_'}
share_dir = options.pop('path', self._share_dir(share['name']))
volume_name = options.pop('name', self._volume_name(share['name']))
try:
self._maprfs_util.create_volume(volume_name, share_dir,
share['size'],
**options)
# posix permissions should be 777, ACEs are used as a restriction
self._maprfs_util.maprfs_chmod(share_dir, '777')
except exception.ProcessExecutionError:
self.api.update_share_metadata(context,
{'id': share['share_id']},
{'_name': 'error'})
msg = (_('Failed to create volume in MapR-FS for the '
'share %(share_name)s.') % {'share_name': share['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def _set_share_size(self, share, size):
volume_name = self._get_volume_name(context.get_admin_context(), share)
try:
if share['size'] > size:
info = self._maprfs_util.get_volume_info(volume_name)
used = info['totalused']
if int(used) >= int(size) * units.Ki:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
self._maprfs_util.set_volume_size(volume_name, size)
except exception.ProcessExecutionError:
msg = (_('Failed to set space quota for the share %(share_name)s.')
% {'share_name': share['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def get_network_allocations_number(self):
return 0
def create_share(self, context, share, share_server=None):
"""Create a MapRFS volume which acts as a share."""
metadata = self.api.get_share_metadata(context,
{'id': share['share_id']})
self._create_share(share, metadata, context)
return self._get_share_export_locations(share,
path=metadata.get('_path'))
def ensure_share(self, context, share, share_server=None):
"""Updates export location if it is changes."""
volume_name = self._get_volume_name(context, share)
if self._maprfs_util.volume_exists(volume_name):
info = self._maprfs_util.get_volume_info(volume_name)
path = info['mountdir']
old_location = share['export_locations'][0]
new_location = self._get_share_export_locations(
share, path=path)
if new_location[0]['path'] != old_location['path']:
return new_location
else:
raise exception.ShareResourceNotFound(share_id=share['share_id'])
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Creates a share from snapshot."""
metadata = self.api.get_share_metadata(context,
{'id': share['share_id']})
sn_share_tenant = self.api.get_share_metadata(context, {
'id': snapshot['share_instance']['share_id']}).get('_tenantuser')
if sn_share_tenant and sn_share_tenant != metadata.get('_tenantuser'):
msg = (
_('Cannot create share from snapshot %(snapshot_name)s '
'with name %(share_name)s. Error: Tenant user should not '
'differ from tenant of the source snapshot.') %
{'snapshot_name': snapshot['name'],
'share_name': share['name']})
LOG.error(msg)
raise exception.MapRFSException(msg=msg)
share_dir = metadata.get('_path', self._share_dir(share['name']))
snapshot_path = self._get_snapshot_path(snapshot)
self._create_share(share, metadata, context)
try:
if self._maprfs_util.dir_not_empty(snapshot_path):
self._maprfs_util.maprfs_cp(snapshot_path + '/*', share_dir)
except exception.ProcessExecutionError:
msg = (
_('Failed to create share from snapshot %(snapshot_name)s '
'with name %(share_name)s.') % {
'snapshot_name': snapshot['name'],
'share_name': share['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
return self._get_share_export_locations(share,
path=metadata.get('_path'))
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
volume_name = self._get_volume_name(context, snapshot['share'])
snapshot_name = snapshot['name']
try:
self._maprfs_util.create_snapshot(snapshot_name, volume_name)
return {'provider_location': snapshot_name}
except exception.ProcessExecutionError:
msg = (
_('Failed to create snapshot %(snapshot_name)s for the share '
'%(share_name)s.') % {'snapshot_name': snapshot_name,
'share_name': snapshot['share_name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def delete_share(self, context, share, share_server=None):
"""Deletes share storage."""
volume_name = self._get_volume_name(context, share)
if volume_name == "error":
LOG.info("Skipping deleting share with name %s, as it does not"
" exist on the backend", share['name'])
return
try:
self._maprfs_util.delete_volume(volume_name)
except exception.ProcessExecutionError:
msg = (_('Failed to delete share %(share_name)s.') %
{'share_name': share['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
snapshot_name = snapshot['provider_location'] or snapshot['name']
volume_name = self._get_volume_name(context, snapshot['share'])
try:
self._maprfs_util.delete_snapshot(snapshot_name, volume_name)
except exception.ProcessExecutionError:
msg = (_('Failed to delete snapshot %(snapshot_name)s.') %
{'snapshot_name': snapshot['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share."""
for access in access_rules:
if access['access_type'].lower() != 'user':
msg = _("Only 'user' access type allowed!")
LOG.error(msg)
raise exception.InvalidShareAccess(reason=msg)
volume_name = self._get_volume_name(context, share)
try:
# 'update_access' is called before share is removed, so this
# method shouldn`t raise exception if share does
# not exist actually
if not self._maprfs_util.volume_exists(volume_name):
LOG.warning('Can not get share %s.', share['name'])
return
# check update
if add_rules or delete_rules:
self._maprfs_util.remove_volume_ace_rules(volume_name,
delete_rules)
self._maprfs_util.add_volume_ace_rules(volume_name, add_rules)
else:
self._maprfs_util.set_volume_ace(volume_name, access_rules)
except exception.ProcessExecutionError:
msg = (_('Failed to update access for share %(name)s.') %
{'name': share['name']})
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def extend_share(self, share, new_size, share_server=None):
"""Extend share storage."""
self._set_share_size(share, new_size)
def shrink_share(self, share, new_size, share_server=None):
"""Shrink share storage."""
self._set_share_size(share, new_size)
def _check_maprfs_state(self):
try:
return self._maprfs_util.check_state()
except exception.ProcessExecutionError:
msg = _('Failed to check MapRFS state.')
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def check_for_setup_error(self):
"""Return an error if the prerequisites are not met."""
if not self.configuration.maprfs_clinode_ip:
msg = _(
'MapR cluster has not been specified in the configuration. '
'Add the ip or list of ip of nodes with mapr-core installed '
'in the "maprfs_clinode_ip" configuration parameter.')
LOG.error(msg)
raise exception.MapRFSException(msg=msg)
if not self.configuration.maprfs_cldb_ip:
LOG.warning('CLDB nodes are not specified!')
if not self.configuration.maprfs_zookeeper_ip:
LOG.warning('Zookeeper nodes are not specified!')
if not self._check_maprfs_state():
msg = _('MapR-FS is not in healthy state.')
LOG.error(msg)
raise exception.MapRFSException(msg=msg)
try:
self._maprfs_util.maprfs_ls(
os.path.join(self._base_volume_dir, ''))
except exception.ProcessExecutionError:
msg = _('Invalid "maprfs_base_volume_name". No such directory.')
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def manage_existing(self, share, driver_options):
try:
# retrieve share path from export location, maprfs:// prefix and
# metadata (-C -Z -N) should be casted away
share_path = share['export_location'].split(
)[0][len(self._maprfs_base_path):]
info = self._maprfs_util.get_volume_info_by_path(
share_path, check_if_exists=True)
if not info:
msg = _("Share %s not found") % share[
'export_location']
LOG.error(msg)
raise exception.ManageInvalidShare(reason=msg)
size = math.ceil(float(info['quota']) / units.Ki)
used = math.ceil(float(info['totalused']) / units.Ki)
volume_name = info['volumename']
should_rename = self.rename_volume
rename_option = driver_options.get('rename')
if rename_option:
should_rename = strutils.bool_from_string(rename_option)
if should_rename:
self._maprfs_util.rename_volume(volume_name, share['name'])
else:
self.api.update_share_metadata(context.get_admin_context(),
{'id': share['share_id']},
{'_name': volume_name})
location = self._get_share_export_locations(share, path=share_path)
if size == 0:
size = used
msg = (
'Share %s has no size quota. Total used value will be'
' used as share size')
LOG.warning(msg, share['name'])
return {'size': size, 'export_locations': location}
except (ValueError, KeyError, exception.ProcessExecutionError):
msg = _('Failed to manage share.')
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def manage_existing_snapshot(self, snapshot, driver_options):
volume_name = self._get_volume_name(context.get_admin_context(),
snapshot['share'])
snapshot_path = self._get_snapshot_path(snapshot)
try:
snapshot_list = self._maprfs_util.get_snapshot_list(
volume_name=volume_name)
snapshot_name = snapshot['provider_location']
if snapshot_name not in snapshot_list:
msg = _("Snapshot %s not found") % snapshot_name
LOG.error(msg)
raise exception.ManageInvalidShareSnapshot(reason=msg)
size = math.ceil(float(self._maprfs_util.maprfs_du(
snapshot_path)) / units.Gi)
return {'size': size}
except exception.ProcessExecutionError:
msg = _("Manage existing share snapshot failed.")
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
def _update_share_stats(self):
"""Retrieves stats info of share directories group."""
try:
total, free = self._maprfs_util.fs_capacity()
except exception.ProcessExecutionError:
msg = _('Failed to check MapRFS capacity info.')
LOG.exception(msg)
raise exception.MapRFSException(msg=msg)
total_capacity_gb = int(math.ceil(float(total) / units.Gi))
free_capacity_gb = int(math.floor(float(free) / units.Gi))
data = {
'share_backend_name': self.backend_name,
'storage_protocol': 'MAPRFS',
'driver_handles_share_servers': self.driver_handles_share_servers,
'vendor_name': 'MapR Technologies',
'driver_version': '1.0',
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
}
super(MapRFSNativeShareDriver, self)._update_share_stats(data)
| |
"""
A simple logger.
Simple usage:
import logger
log = logger.Log('my_log.log', logger.Log.DEBUG)
log('A line in the log at the default level (DEBUG)') # simple
log('A log line at WARN level', logger.Log.WARN) # hard to use
log.info('log line issued at INFO level') # best if using level
Based on the 'borg' recipe from [http://code.activestate.com/recipes/66531/].
Log levels styled on the Python 'logging' module.
Log output includes the module and line # of the log() call.
"""
import os
import sys
import datetime
import traceback
################################################################################
# A simple (?) logger.
################################################################################
class Log(object):
__shared_state = {} # this __dict__ shared by ALL instances
# the predefined logging levels
CRITICAL = 50
ERROR = 40
WARN = 30
INFO = 20
DEBUG = 10
NOTSET = 0
MAXLEVEL = CRITICAL
MINLEVEL = NOTSET
# dict to convert logging level back to symbolic name
_level_num_to_name = {
NOTSET: 'NOTSET',
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
CRITICAL: 'CRITICAL',
}
# default maximum length of filename (enforced)
DefaultMaxFname = 15
def __init__(self, logfile=None, level=NOTSET, append=False,
max_fname=DefaultMaxFname):
"""Initialise the logging object.
logfile the path to the log file
level logging level - don't log below this level
append True if log file is appended to
"""
# make sure we have same state as all other log objects
self.__dict__ = Log.__shared_state
# set some initial state
self.max_fname = max_fname
self.sym_level = 'NOTSET' # set in call to check_level()
self.level = self.check_level(level)
# if not given logfile name, make one up
if logfile is None:
logfile = '%s.log' % __name__
# get correct options for rewrite or append of logfile
log_options = 'w'
if append:
log_options = 'a'
# test if we can use the file
try:
self.logfd = open(logfile, log_options)
self.logfd.close()
except IOError:
# assume we have readonly filesystem
basefile = os.path.basename(logfile)
if sys.platform == 'win32':
logfile = os.path.join('C:\\', basefile)
else:
logfile = os.path.join('~', basefile)
# try to open logfile again
self.logfd = open(logfile, log_options)
self.logfile = logfile
# announce time+date of opening logging and logging level
self.debug('='*55)
self.debug('Log started on %s, log level=%s'
% (datetime.datetime.now().ctime(),
self._level_num_to_name[level]))
self.debug('-'*55)
# finally, set some internal state
self.set_level(self.level)
def check_level(self, level):
"""Check the level value for legality.
level a numeric logging level
If 'level' is invalid, raise Exception. If valid, return value.
"""
try:
level = int(level)
except ValueError:
msg = "Logging level invalid: '%s'" % str(level)
print(msg)
raise Exception(msg)
if not self.NOTSET <= level <= self.CRITICAL:
msg = "Logging level invalid: '%s'" % str(level)
print(msg)
raise Exception(msg)
return level
def set_level(self, level):
"""Set logging level."""
level = self.check_level(level)
# convert numeric level to symbolic
sym = self._level_num_to_name.get(level, None)
if sym is None:
# not recognized symbolic but it's legal, so interpret as 'XXXX+2'
sym_10 = int(10 * (level//10))
sym_rem = level - sym_10
sym = '%s+%d' % (self._level_num_to_name[sym_10], sym_rem)
self.level = level
self.sym_level = sym
self.critical('Logging level set to %02d (%s)' % (level, sym))
def __call__(self, msg=None, level=None):
"""Call on the logging object.
msg message string to log
level level to log 'msg' at (if not given, assume self.level)
"""
# get level to log at
if level is None:
level = self.level
# are we going to log?
if level < self.level or self.level < 0:
return
if msg is None:
msg = ''
# get time
to = datetime.datetime.now()
hr = to.hour
min = to.minute
sec = to.second
msec = to.microsecond
# caller information - look back for first module != <this module name>
frames = traceback.extract_stack()
frames.reverse()
try:
(_, mod_name) = __name__.rsplit('.', 1)
except ValueError:
mod_name = __name__
for (fpath, lnum, mname, _) in frames:
fname = os.path.basename(fpath).rsplit('.', 1)
if len(fname) > 1:
fname = fname[0]
if fname != mod_name:
break
# get string for log level
loglevel = self._level_num_to_name[level]
fname = fname[:self.max_fname]
self.logfd.write('%02d:%02d:%02d.%06d|%8s|%*s:%-4d|%s\n'
% (hr, min, sec, msec, loglevel, self.max_fname,
fname, lnum, msg))
self.logfd.flush()
def critical(self, msg):
"""Log a message at CRITICAL level."""
self(msg, self.CRITICAL)
def error(self, msg):
"""Log a message at ERROR level."""
self(msg, self.ERROR)
def warn(self, msg):
"""Log a message at WARN level."""
self(msg, self.WARN)
def info(self, msg):
"""Log a message at INFO level."""
self(msg, self.INFO)
def debug(self, msg):
"""Log a message at DEBUG level."""
self(msg, self.DEBUG)
# def __del__(self):
# """Close the logging."""
#
# self.logfd.close()
# self.logfd = None
| |
# -*- coding: utf-8 -*-
# @Author: massimo
# @Date: 2016-03-10 20:16:59
# @Last Modified by: massimo
# @Last Modified time: 2016-03-10 23:29:56
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.nnet.conv import conv2d
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from collections import OrderedDict
from data_loader import load_mnist
import time
import os
class ConvNet:
def __init__(self,
input_shape,
n_classes=10,
optim='adagrad',
activation='relu',
num_kernels=None,
hidden_layers=None,
batch_size=128,
dropout_p_input=0.0,
dropout_p_conv=0.0,
dropout_p_hidden=0.0,
learning_rate=0.01,
momentum=0.5,
numpy_rng=None,
theano_rng=None):
self.input_shape = input_shape # (num input feature maps, image height, image width)
if num_kernels is None: # number of kernels per layer
num_kernels = [20, 50]
if hidden_layers is None: # number of fully connected hidden layers
hidden_layers = [128]
self.hidden_layers = hidden_layers
self.num_kernels = num_kernels
self.n_classes = n_classes
self.dropout_p_input = dropout_p_input
self.dropout_p_conv = dropout_p_conv
self.dropout_p_hidden = dropout_p_hidden
self.learning_rate = learning_rate
self.momentum = momentum
self.batch_size = batch_size
optims = {
'sgd': self.sgd,
'adagrad': self.adagrad,
'rmsprop': self.rmsprop
}
activs = {
'tanh': self.tanh,
'sigmoid': self.sigmoid,
'relu': self.relu
}
assert optim in optims, 'Unknown optimization "{}"'.format(optim)
self.optimization = optim
self.optimization_fn = optims[optim]
assert activation in activs, 'Unknown activation "{}"'.format(activation)
self.activation = activation
self.activation_fn = activs[self.activation]
if numpy_rng is None:
numpy_rng = np.random.RandomState(2**30)
self.numpy_rng = numpy_rng
if theano_rng is None:
theano_rng = RandomStreams(2**30)
self.theano_rng = theano_rng
self.train_fn, self.pred_fn = None, None
def get_conv_output_shape(self, input_shape, filter_shape, stride=1):
# TODO: return the output shape of the convolution given the input data shape,
# the filter shape and stride
pass
def init(self):
input_feat_maps, input_height, input_width = self.input_shape
#TODO: Initialize the filters for the convolutional part of the DNN
# Fully connected layers
hid_n_inputs = # TODO: right number of hidden units here
self.W, self.b = [], []
for i in range(len(self.hidden_layers)):
print 'Hidden{}: num. units {}'.format(i+1, self.hidden_layers[i])
self.W.append(self.init_weights((hid_n_inputs if i == 0 else self.hidden_layers[i-1], self.hidden_layers[i]), sigma=0, name='W_{}'.format(i)))
self.b.append(theano.shared(value=np.zeros((self.hidden_layers[i],), dtype=theano.config.floatX), borrow=True, name='b_{}'.format(i)))
# Final Logistic Regression layer
self.Wy = self.init_weights((self.hidden_layers[-1], self.n_classes), sigma=0, name='Wy')
self.by = theano.shared(value=np.zeros((self.n_classes,), dtype=theano.config.floatX), borrow=True, name='by')
params = [self.W1, self.W2, self.b1, self.b2, self.Wy, self.by] + self.W + self.b
return params
def model(self, X, dropout_p_input=0.0, dropout_p_conv=0.0, dropout_p_hidden=0.0):
#TODO: define the model of the convolutional neural network
pass
def softmax(self, x):
# numerically stable version of softmax (it avoids exp to blowup due to high values of x)
exp_x = T.exp(x - T.max(x, axis=1, keepdims=True))
return exp_x / T.sum(exp_x, axis=1, keepdims=True)
# ACTIVATION FUNCTIONS #
def tanh(self, x):
return T.tanh(x)
def sigmoid(self, x):
return T.nnet.sigmoid(x)
def relu(self, x):
return x * (x > 0)
# DROPOUT #
def dropout(self, x, p=0):
if p > 0:
retain_p = 1.0 - p
x *= self.theano_rng.binomial(x.shape, p=retain_p, dtype=theano.config.floatX)
x /= retain_p
return x
# LOSS FUNCTION #
def categorical_cross_entropy(self, y, y_hat):
return T.mean(-T.log(y_hat[T.arange(y.shape[0]), y]), axis=0)
def get_params(self):
return self.params
def floatX(self, arr):
return np.asarray(arr, dtype=theano.config.floatX)
def init_weights(self, shape, sigma=0.01, name=''):
if sigma == 0:
W_bound = np.sqrt(6. / (shape[0] + shape[1]))
return theano.shared(self.floatX(self.numpy_rng.uniform(low=-W_bound, high=W_bound, size=shape)), borrow=True, name=name)
return theano.shared(self.floatX(self.numpy_rng.randn(*shape) * sigma), borrow=True, name=name)
def init_filter(self, filter_shape, input_shape=None, output_shape=None, sigma=0.01, name=''):
if input_shape is None and output_shape is None:
# use the heuristic: sample filter weigths from uniform(sqrt(6.0 / (fan_in + fan_out)))
fan_in = np.prod(input_shape[1:])
fan_out = np.prod(output_shape[1:])
W_bound = np.sqrt(6. / (fan_in + fan_out))
return theano.shared(self.floatX(self.numpy_rng.uniform(low=-W_bound, high=W_bound, size=filter_shape)), borrow=True, name=name)
else:
# sample weights from normal distribution with 0 mean and sigma std
return theano.shared(self.floatX(self.numpy_rng.randn(*filter_shape) * sigma), borrow=True, name=name)
def sgd(self, cost, params, learning_rate=0.1):
# compute the gradients of each parameter w.r.t. the loss
pgrads = T.grad(cost, wrt=params)
# define the sgd updates
updates = OrderedDict([(p, p - learning_rate * g) for p, g in zip(params, pgrads)])
return updates
def adagrad(self, cost, params, learning_rate=0.1, epsilon=1e-6):
# compute the gradients of each parameter w.r.t. the loss
pgrads = T.grad(cost, wrt=params)
updates = OrderedDict()
# TODO: compute the updates over params in adagrad
return updates
def rmsprop(self, cost, params, learning_rate=1.0, decay=0.99, epsilon=1e-6):
# compute the gradients of each parameter w.r.t. the loss
pgrads = T.grad(cost, wrt=params)
updates = OrderedDict()
# TODO: compute the updates over params in rmsprop
return updates
def apply_momentum(self, updates, momentum=0.5):
updates = OrderedDict(updates)
for p in updates.keys():
velocity = theano.shared(p.get_value(borrow=True) * 0., borrow=True)
# updates[p] = p - learning_rate * dp
p_new = momentum * velocity + updates[p] # p + momentum * velocity - learning_rate * dp
updates[velocity] = p_new - p # momentum * velocity - learning_rate * dp
updates[p] = p_new
return updates
def fit(self, x, y, epochs=10, shuffle_training=True):
if self.train_fn is None:
print 'Compiling the training functions'
# the input variable now is a 4d tensor
X = T.tensor4()
y_sym = T.ivector()
# initialize filter and hidden units weights
self.params = self.init()
# build the model and the output variables
y_hat = self.model(X, self.dropout_p_input, self.dropout_p_conv, self.dropout_p_hidden)
cost = self.categorical_cross_entropy(y_sym, y_hat)
updates = self.optimization_fn(cost, self.params, self.learning_rate)
if self.momentum > 0.:
updates = self.apply_momentum(updates, self.momentum)
self.train_fn = theano.function(inputs=[X, y_sym], outputs=cost, updates=updates)
if shuffle_training:
shuffle_idx = self.numpy_rng.permutation(x.shape[0])
x = x[shuffle_idx]
y = y[shuffle_idx]
num_train_batches = -(-x.shape[0] // self.batch_size)
cost_history = []
print 'Training started'
for e in range(epochs):
avg_cost = 0
for bidx in range(num_train_batches):
batch_x = x[bidx * self.batch_size: (bidx + 1) * self.batch_size]
batch_y = y[bidx * self.batch_size: (bidx + 1) * self.batch_size]
batch_cost = self.train_fn(batch_x, batch_y)
cost_history.append(batch_cost)
if np.isnan(batch_cost):
print 'NaN cost detected. Abort'
return
avg_cost += batch_cost
avg_cost /= num_train_batches
print 'Epoch: {} Loss: {:.8f}'.format(e + 1, avg_cost)
return cost_history
def predict(self, x):
if self.pred_fn is None:
X = T.tensor4()
y_hat_pred = self.model(X, 0.0, 0.0, 0.0)
y_pred = T.argmax(y_hat_pred, axis=1)
self.pred_fn = theano.function(inputs=[X], outputs=y_pred)
preds = np.asarray([])
num_batches = -(-x.shape[0] // self.batch_size)
for bidx in range(num_batches):
batch_x = x[bidx * self.batch_size: (bidx + 1) * self.batch_size]
batch_y_pred = self.pred_fn(batch_x)
preds = np.concatenate((preds, batch_y_pred))
return preds
if __name__ == '__main__':
dataset = load_mnist('../../data/mnist.pkl.gz')
train_x, train_y = dataset[0]
valid_x, valid_y = dataset[1]
test_x, test_y = dataset[2]
# reshape the input data to be compliant with the input shape expected by the CNN
n_train_x = train_x.shape[0]
n_valid_x = valid_x.shape[0]
n_test_x = test_x.shape[0]
train_x = train_x.reshape(n_train_x, 1, 28, 28)
valid_x = valid_x.reshape(n_valid_x, 1, 28, 28)
test_x = test_x.reshape(n_test_x, 1, 28, 28)
model = ConvNet(
input_shape=train_x.shape[1:],
n_classes=10,
optim='rmsprop',
activation='relu',
num_kernels=[20, 50],
hidden_layers=[256],
dropout_p_input=0.0,
dropout_p_conv=0.5,
dropout_p_hidden=0.5,
learning_rate=0.001,
momentum=0.0)
# WARNING: If possible run on CUDA enabled GPU, otherwise it may take a long time to complete on CPU
t0 = time.time()
tch = model.fit(train_x, train_y, epochs=25)
print 'Training completed in {:.2f} sec'.format(time.time() - t0)
valid_y_pred = model.predict(valid_x)
valid_accuracy = np.sum(valid_y_pred == valid_y, dtype=np.float32) / valid_y.shape[0]
print 'Validation accuracy: {:.2f}'.format(valid_accuracy * 100) # you should get around 99%
test_y_pred = model.predict(test_x)
test_accuracy = np.sum(test_y_pred == test_y, dtype=np.float32) / test_y.shape[0]
print 'Test accuracy: {:.2f}'.format(test_accuracy * 100) # you should get around 99%
# dest_dir = 'convnet_train'
# if not os.path.exists(dest_dir):
# os.makedirs(dest_dir)
# fname = os.path.join(dest_dir,
# 'convnet_{}_{}_k{}_l{}_lr{}_m{}_di{}_dc{}_dh{}.npz'.format(
# model.optimization,
# model.activation,
# '-'.join(map(str, model.num_kernels)),
# '-'.join(map(str, model.hidden_layers)),
# model.learning_rate,
# model.momentum,
# model.dropout_p_input,
# model.dropout_p_conv,
# model.dropout_p_hidden))
# np.savez_compressed(fname, train_cost=tch, valid_accuracy=valid_accuracy, test_accuracy=test_accuracy)
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a general beam search decoder API for RNN based decoders.
The purpose of this API is to allow users to highly customize the behavior
within their RNN decoder(vanilla RNN, LSTM, attention + LSTM, future etc.),
without using the low level API such as while ops.
This API is still under active development and may change drastically.
"""
import contextlib
import numpy as np
from ... import layers
from ...framework import Variable
from ... import core
from ... import framework, unique_name
from ...layer_helper import LayerHelper
__all__ = ['InitState', 'StateCell', 'TrainingDecoder', 'BeamSearchDecoder']
class _DecoderType:
TRAINING = 1
BEAM_SEARCH = 2
class InitState(object):
"""
The initial hidden state object. The state objects holds a variable, and may
use it to initialize the hidden state cell of RNN. Usually used as input to
`StateCell` class.
Args:
init (Variable): The initial variable of the hidden state. If set None,
the variable will be created as a tensor with constant value based
on `shape` and `value` param.
shape (tuple|list): If `init` is None, new Variable's shape. Default
None.
value (float): If `init` is None, new Variable's value. Default None.
init_boot (Variable): If provided, the initial variable will be created
with the same shape as this variable.
need_reorder (bool): If set true, the init will be sorted by its lod
rank within its batches. This should be used if `batch_size > 1`.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of the initial
variable.
Returns:
An initialized state object.
Examples:
See `StateCell`.
"""
def __init__(self,
init=None,
shape=None,
value=0.0,
init_boot=None,
need_reorder=False,
dtype='float32'):
if init is not None:
self._init = init
elif init_boot is None:
raise ValueError(
'init_boot must be provided to infer the shape of InitState .\n')
else:
self._init = layers.fill_constant_batch_size_like(
input=init_boot, value=value, shape=shape, dtype=dtype)
self._shape = shape
self._value = value
self._need_reorder = need_reorder
self._dtype = dtype
@property
def value(self):
return self._init
@property
def need_reorder(self):
return self._need_reorder
class _MemoryState(object):
def __init__(self, state_name, rnn_obj, init_state):
self._state_name = state_name # each is a rnn.memory
self._rnn_obj = rnn_obj
self._state_mem = self._rnn_obj.memory(
init=init_state.value, need_reorder=init_state.need_reorder)
def get_state(self):
return self._state_mem
def update_state(self, state):
self._rnn_obj.update_memory(self._state_mem, state)
class _ArrayState(object):
def __init__(self, state_name, block, init_state):
self._state_name = state_name
self._block = block
self._state_array = self._block.create_var(
name=unique_name.generate('array_state_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init_state.value.dtype)
self._counter = self._block.create_var(
name=unique_name.generate('array_state_counter'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype='int64')
# initialize counter
self._block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [self._counter]},
attrs={
'shape': [1],
'dtype': self._counter.dtype,
'value': float(0.0),
'force_cpu': True
})
self._counter.stop_gradient = True
# write initial state
block.append_op(
type='write_to_array',
inputs={'X': init_state.value,
'I': self._counter},
outputs={'Out': self._state_array})
def get_state(self):
state = layers.array_read(array=self._state_array, i=self._counter)
return state
def update_state(self, state):
layers.increment(x=self._counter, value=1, in_place=True)
layers.array_write(state, array=self._state_array, i=self._counter)
class StateCell(object):
"""
The state cell class stores the hidden state of the RNN cell. A typical RNN
cell has one or more hidden states, and one or more step inputs. This class
allows you to defines the name of hidden states as well as step inputs, and
their associated variables.
Args:
inputs (dict): A feeding dict of {name(str) : Variable}. It specifies
the names of step inputs for RNN cell, and the associated variables.
The variable could initially be None and set manually during each
RNN step.
states (dict): A feeding dict of {name(str) : InitState object}. It
specifies the names of hidden states and their initialized state.
out_state (str): A string that specifies the name of hidden state that
will be used to compute the score in beam search process.
name (str): The name of the RNN cell. Default None.
Raises:
`ValueError`: If the initial state is not an instance of InitState, or
the out_state is not in the dict of states.
Returns:
StateCell: The initialized StateCell object.
Examples:
.. code-block:: python
hidden_state = InitState(init=encoder_out, need_reorder=True)
state_cell = StateCell(
inputs={'current_word': None},
states={'h': hidden_state},
out_state='h')
"""
def __init__(self, inputs, states, out_state, name=None):
self._helper = LayerHelper('state_cell', name=name)
self._cur_states = {}
self._state_names = []
for state_name, state in states.items():
if not isinstance(state, InitState):
raise ValueError('state must be an InitState object.')
self._cur_states[state_name] = state
self._state_names.append(state_name)
self._inputs = inputs # inputs is place holder here
self._cur_decoder_obj = None
self._in_decoder = False
self._states_holder = {}
self._switched_decoder = False
self._state_updater = None
self._out_state = out_state
if self._out_state not in self._cur_states:
raise ValueError('out_state must be one state in states')
def _enter_decoder(self, decoder_obj):
if self._in_decoder == True or self._cur_decoder_obj is not None:
raise ValueError('StateCell has already entered a decoder.')
self._in_decoder = True
self._cur_decoder_obj = decoder_obj
self._switched_decoder = False
def _leave_decoder(self, decoder_obj):
if not self._in_decoder:
raise ValueError('StateCell not in decoder, '
'invalid leaving operation.')
if self._cur_decoder_obj != decoder_obj:
raise ValueError('Inconsistent decoder object in StateCell.')
self._in_decoder = False
self._cur_decoder_obj = None
self._switched_decoder = False
def _switch_decoder(self): # lazy switch
if not self._in_decoder:
raise ValueError('StateCell must be enter a decoder.')
if self._switched_decoder:
raise ValueError('StateCell already done switching.')
for state_name in self._state_names:
if state_name not in self._states_holder:
state = self._cur_states[state_name]
if not isinstance(state, InitState):
raise ValueError('Current type of state is %s, should be '
'an InitState object.' % type(state))
self._states_holder[state_name] = {}
if self._cur_decoder_obj.type == _DecoderType.TRAINING:
self._states_holder[state_name][id(self._cur_decoder_obj)] \
= _MemoryState(state_name,
self._cur_decoder_obj.dynamic_rnn,
state)
elif self._cur_decoder_obj.type == _DecoderType.BEAM_SEARCH:
self._states_holder[state_name][id(self._cur_decoder_obj)] \
= _ArrayState(state_name,
self._cur_decoder_obj._parent_block(),
state)
else:
raise ValueError('Unknown decoder type, only support '
'[TRAINING, BEAM_SEARCH]')
# Read back, since current state should be LoDTensor
self._cur_states[state_name] = \
self._states_holder[state_name][
id(self._cur_decoder_obj)].get_state()
self._switched_decoder = True
def get_state(self, state_name):
"""
The getter of state object. Find the state variable by its name.
Args:
state_name (str): A string of the state's name.
Returns:
The associated state object.
"""
if self._in_decoder and not self._switched_decoder:
self._switch_decoder()
if state_name not in self._cur_states:
raise ValueError(
'Unknown state %s. Please make sure _switch_decoder() '
'invoked.' % state_name)
return self._cur_states[state_name]
def get_input(self, input_name):
"""
The getter of input variable. Find the input variable by its name.
Args:
input_name (str): The string of the input's name.
Returns:
The associated input variable.
"""
if input_name not in self._inputs or self._inputs[input_name] is None:
raise ValueError('Invalid input %s.' % input_name)
return self._inputs[input_name]
def set_state(self, state_name, state_value):
"""
The setter of the state variable. Change the variable of the given
`state_name`.
Args:
state_name (str): The name of the state to change.
state_value (Var): The variable of the new state.
"""
self._cur_states[state_name] = state_value
def state_updater(self, updater):
"""
Set up the updater to update the hidden state every RNN step. The
behavior of updater could be customized by users. The updater should be
a function that takes a `StateCell` object as input and update the
hidden state within it. The hidden state could be accessed through
`get_state` method.
Args:
updater (func): the updater to update the state cell.
"""
self._state_updater = updater
def _decorator(state_cell):
if state_cell == self:
raise TypeError('Updater should only accept a StateCell object '
'as argument.')
updater(state_cell)
return _decorator
def compute_state(self, inputs):
"""
Provide the step input of RNN cell, and compute the new hidden state
with updater and give step input.
Args:
inputs (dict): A feed dict, {name(str): Variable}. name should be
the names of step inputs for this RNN cell, and Variable should be
the associated variables.
Examples:
.. code-block:: python
state_cell.compute_state(inputs={'x': current_word})
"""
if self._in_decoder and not self._switched_decoder:
self._switch_decoder()
for input_name, input_value in inputs.items():
if input_name not in self._inputs:
raise ValueError('Unknown input %s. '
'Please make sure %s in input '
'place holder.' % (input_name, input_name))
self._inputs[input_name] = input_value
self._state_updater(self)
def update_states(self):
"""
Update and record state information after each RNN step.
"""
if self._in_decoder and not self._switched_decoder:
self._switched_decoder()
for state_name, decoder_state in self._states_holder.items():
if id(self._cur_decoder_obj) not in decoder_state:
raise ValueError('Unknown decoder object, please make sure '
'switch_decoder been invoked.')
decoder_state[id(self._cur_decoder_obj)].update_state(
self._cur_states[state_name])
def out_state(self):
"""
Get the output state variable. This must be called after update_states.
Returns:
The output variable of the RNN cell.
"""
return self._cur_states[self._out_state]
class TrainingDecoder(object):
"""
A decoder that can only be used for training. The decoder could be
initialized with a `StateCell` object. The computation within the RNN cell
could be defined with decoder's block.
Args:
state_cell (StateCell): A StateCell object that handles the input and
state variables.
name (str): The name of this decoder. Default None.
Returns:
TrainingDecoder: The initialized TrainingDecoder object.
Examples:
.. code-block:: python
decoder = TrainingDecoder(state_cell)
with decoder.block():
current_word = decoder.step_input(trg_embedding)
decoder.state_cell.compute_state(inputs={'x': current_word})
current_score = layers.fc(input=decoder.state_cell.get_state('h'),
size=32,
act='softmax')
decoder.state_cell.update_states()
decoder.output(current_score)
"""
BEFORE_DECODER = 0
IN_DECODER = 1
AFTER_DECODER = 2
def __init__(self, state_cell, name=None):
self._helper = LayerHelper('training_decoder', name=name)
self._status = TrainingDecoder.BEFORE_DECODER
self._dynamic_rnn = layers.DynamicRNN()
self._type = _DecoderType.TRAINING
self._state_cell = state_cell
self._state_cell._enter_decoder(self)
@contextlib.contextmanager
def block(self):
"""
Define the behavior of the decoder for each RNN time step.
"""
if self._status != TrainingDecoder.BEFORE_DECODER:
raise ValueError('decoder.block() can only be invoked once')
self._status = TrainingDecoder.IN_DECODER
with self._dynamic_rnn.block():
yield
self._status = TrainingDecoder.AFTER_DECODER
self._state_cell._leave_decoder(self)
@property
def state_cell(self):
self._assert_in_decoder_block('state_cell')
return self._state_cell
@property
def dynamic_rnn(self):
return self._dynamic_rnn
@property
def type(self):
return self._type
def step_input(self, x):
"""
Set the input variable as a step input to the RNN cell. For example,
in machine translation, each time step we read one word from the target
sentences, then the target sentence is a step input to the RNN cell.
Args:
x (Variable): the variable to be used as step input.
Returns:
Variable: The variable as input of current step.
Examples:
.. code-block:: python
current_word = decoder.step_input(trg_embedding)
"""
self._assert_in_decoder_block('step_input')
return self._dynamic_rnn.step_input(x)
def static_input(self, x):
"""
Set the input variable as a static input of RNN cell. In contrast to
step input, this variable will be used as a whole within the RNN decode
loop and will not be scattered into time steps.
Args:
x (Variable): the variable to be used as static input.
Returns:
Variable: The variable as input of current step.
Examples:
.. code-block:: python
encoder_vec = decoder.static_input(encoded_vector)
"""
self._assert_in_decoder_block('static_input')
return self._dynamic_rnn.static_input(x)
def __call__(self, *args, **kwargs):
"""
Get the output of RNN. This API should only be invoked after RNN.block()
Returns:
Variable: The specified output of the RNN cell.
"""
if self._status != TrainingDecoder.AFTER_DECODER:
raise ValueError('Output of training decoder can only be visited '
'outside the block.')
return self._dynamic_rnn(*args, **kwargs)
def output(self, *outputs):
"""
Set the output variable of the RNN cell.
Args:
*outputs (Variables): a series of variables that treated as output
of the RNN cell.
Examples:
.. code-block:: python
out = fluid.layers.fc(input=h,
size=32,
bias_attr=True,
act='softmax')
decoder.output(out)
"""
self._assert_in_decoder_block('output')
self._dynamic_rnn.output(*outputs)
def _assert_in_decoder_block(self, method):
if self._status != TrainingDecoder.IN_DECODER:
raise ValueError('%s should be invoked inside block of '
'TrainingDecoder object.' % method)
class BeamSearchDecoder(object):
"""
A beam search decoder that can be used for inference. The decoder should be
initialized with a `StateCell` object. The decode process can be defined
within its block.
Args:
state_cell (StateCell): A StateCell object that handles the input and
state variables.
init_ids (Variable): The init beam search token ids.
init_scores (Variable): The associated score of each id.
target_dict_dim (int): Size of dictionary.
word_dim (int): Word embedding dimension.
input_var_dict (dict): A feeding dict to feed the required input
variables to the state cell. It will be used by state_cell 's
compute method. Default empty.
topk_size (int): The topk size used for beam search. Default 50.
max_len (int): The maximum allowed length of the generated sentence.
Default 100.
beam_size (int): The beam width of beam search decode. Default 1.
end_id (int): The id of end token within beam search.
name (str): The name of this decoder. Default None.
Returns:
BeamSearchDecoder: A initialized BeamSearchDecoder object.
Examples:
.. code-block:: python
decoder = BeamSearchDecoder(
state_cell=state_cell,
init_ids=init_ids,
init_scores=init_scores,
target_dict_dim=target_dict_dim,
word_dim=word_dim,
init_var_dict={},
topk_size=topk_size,
sparse_emb=IS_SPARSE,
max_len=max_length,
beam_size=beam_size,
end_id=1,
name=None
)
decoder.decode()
translation_ids, translation_scores = decoder()
"""
BEFORE_BEAM_SEARCH_DECODER = 0
IN_BEAM_SEARCH_DECODER = 1
AFTER_BEAM_SEARCH_DECODER = 2
def __init__(self,
state_cell,
init_ids,
init_scores,
target_dict_dim,
word_dim,
input_var_dict={},
topk_size=50,
sparse_emb=True,
max_len=100,
beam_size=1,
end_id=1,
name=None):
self._helper = LayerHelper('beam_search_decoder', name=name)
self._counter = layers.zeros(shape=[1], dtype='int64')
self._counter.stop_gradient = True
self._type = _DecoderType.BEAM_SEARCH
self._max_len = layers.fill_constant(
shape=[1], dtype='int64', value=max_len)
self._cond = layers.less_than(
x=self._counter,
y=layers.fill_constant(
shape=[1], dtype='int64', value=max_len))
self._while_op = layers.While(self._cond)
self._state_cell = state_cell
self._state_cell._enter_decoder(self)
self._status = BeamSearchDecoder.BEFORE_BEAM_SEARCH_DECODER
self._zero_idx = layers.fill_constant(
shape=[1], value=0, dtype='int64', force_cpu=True)
self._array_dict = {}
self._array_link = []
self._ids_array = None
self._scores_array = None
self._beam_size = beam_size
self._end_id = end_id
self._init_ids = init_ids
self._init_scores = init_scores
self._target_dict_dim = target_dict_dim
self._topk_size = topk_size
self._sparse_emb = sparse_emb
self._word_dim = word_dim
self._input_var_dict = input_var_dict
@contextlib.contextmanager
def block(self):
"""
Define the behavior of the decoder for each RNN time step.
"""
if self._status != BeamSearchDecoder.BEFORE_BEAM_SEARCH_DECODER:
raise ValueError('block() can only be invoke once.')
self._status = BeamSearchDecoder.IN_BEAM_SEARCH_DECODER
with self._while_op.block():
yield
with layers.Switch() as switch:
with switch.case(self._cond):
layers.increment(x=self._counter, value=1.0, in_place=True)
for value, array in self._array_link:
layers.array_write(
x=value, i=self._counter, array=array)
layers.less_than(
x=self._counter, y=self._max_len, cond=self._cond)
self._status = BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER
self._state_cell._leave_decoder(self)
@property
def type(self):
return self._type
def early_stop(self):
"""
Stop the generation process in advance. Could be used as "break".
"""
layers.fill_constant(
shape=[1], value=0, dtype='bool', force_cpu=True, out=self._cond)
def decode(self):
"""
Set up the computation within the decoder. Then you could call the
decoder to get the result of beam search decode. If you want to define
a more specific decoder, you could override this function.
Examples:
.. code-block:: python
decoder.decode()
translation_ids, translation_scores = decoder()
"""
with self.block():
prev_ids = self.read_array(init=self._init_ids, is_ids=True)
prev_scores = self.read_array(
init=self._init_scores, is_scores=True)
prev_ids_embedding = layers.embedding(
input=prev_ids,
size=[self._target_dict_dim, self._word_dim],
dtype='float32',
is_sparse=self._sparse_emb)
feed_dict = {}
update_dict = {}
for init_var_name, init_var in self._input_var_dict.items():
if init_var_name not in self.state_cell._inputs:
raise ValueError('Variable ' + init_var_name +
' not found in StateCell!\n')
read_var = self.read_array(init=init_var)
update_dict[init_var_name] = read_var
feed_var_expanded = layers.sequence_expand(read_var,
prev_scores)
feed_dict[init_var_name] = feed_var_expanded
for state_str in self._state_cell._state_names:
prev_state = self.state_cell.get_state(state_str)
prev_state_expanded = layers.sequence_expand(prev_state,
prev_scores)
self.state_cell.set_state(state_str, prev_state_expanded)
for i, input_name in enumerate(self._state_cell._inputs):
if input_name not in feed_dict:
feed_dict[input_name] = prev_ids_embedding
self.state_cell.compute_state(inputs=feed_dict)
current_state = self.state_cell.out_state()
current_state_with_lod = layers.lod_reset(
x=current_state, y=prev_scores)
scores = layers.fc(input=current_state_with_lod,
size=self._target_dict_dim,
act='softmax')
topk_scores, topk_indices = layers.topk(scores, k=self._topk_size)
accu_scores = layers.elementwise_add(
x=layers.log(x=topk_scores),
y=layers.reshape(
prev_scores, shape=[-1]),
axis=0)
selected_ids, selected_scores = layers.beam_search(
prev_ids,
prev_scores,
topk_indices,
accu_scores,
self._beam_size,
end_id=1,
level=0)
with layers.Switch() as switch:
with switch.case(layers.is_empty(selected_ids)):
self.early_stop()
with switch.default():
self.state_cell.update_states()
self.update_array(prev_ids, selected_ids)
self.update_array(prev_scores, selected_scores)
for update_name, var_to_update in update_dict.items():
self.update_array(var_to_update, feed_dict[update_name])
def read_array(self, init, is_ids=False, is_scores=False):
"""
Read an array to get the decoded ids and scores generated by previous
RNN step. At the first step of RNN, the init variable mut be used to
initialize the array.
Args:
init (Variable): The initial variable for first step usage. init
must be provided.
is_ids (bool): Specify whether the variable is an id.
is_scores (bool): Specify whether the variable is a score.
Returns:
The associated variable generated during previous RNN steps.
Examples:
.. code-block:: python
prev_ids = decoder.read_array(init=init_ids, is_ids=True)
prev_scores = decoder.read_array(init=init_scores, is_scores=True)
"""
self._assert_in_decoder_block('read_array')
if is_ids and is_scores:
raise ValueError('Shouldn\'t mark current array be ids array and'
'scores array at the same time.')
if not isinstance(init, Variable):
raise TypeError('The input argument `init` must be a Variable.')
parent_block = self._parent_block()
array = parent_block.create_var(
name=unique_name.generate('beam_search_decoder_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init,
'I': self._zero_idx},
outputs={'Out': array})
if is_ids:
self._ids_array = array
elif is_scores:
self._scores_array = array
read_value = layers.array_read(array=array, i=self._counter)
self._array_dict[read_value.name] = array
return read_value
def update_array(self, array, value):
"""
Store the value generated in current step in an array for each RNN step.
This array could be accessed by read_array method.
Args:
array (Variable): The array to append the new variable to.
value (Variable): The newly generated value to be stored.
"""
self._assert_in_decoder_block('update_array')
if not isinstance(array, Variable):
raise TypeError(
'The input argument `array` of must be a Variable.')
if not isinstance(value, Variable):
raise TypeError('The input argument `value` of must be a Variable.')
array = self._array_dict.get(array.name, None)
if array is None:
raise ValueError('Please invoke read_array before update_array.')
self._array_link.append((value, array))
def __call__(self):
"""
Run the decode process and return the final decode result.
Returns:
A tuple of decoded (id, score) pairs. id is a Variable that holds
the generated tokens, and score is a Variable with the same shape
as id, holds the score for each generated token.
"""
if self._status != BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER:
raise ValueError('Output of BeamSearchDecoder object can '
'only be visited outside the block.')
return layers.beam_search_decode(
ids=self._ids_array,
scores=self._scores_array,
beam_size=self._beam_size,
end_id=self._end_id)
@property
def state_cell(self):
self._assert_in_decoder_block('state_cell')
return self._state_cell
def _parent_block(self):
"""
Getter of parent block.
Returns:
The parent block of decoder.
"""
program = self._helper.main_program
parent_block_idx = program.current_block().parent_idx
if parent_block_idx < 0:
raise ValueError('Invalid block with index %d.' % parent_block_idx)
parent_block = program.block(parent_block_idx)
return parent_block
def _assert_in_decoder_block(self, method):
if self._status != BeamSearchDecoder.IN_BEAM_SEARCH_DECODER:
raise ValueError('%s should be invoked inside block of '
'BeamSearchDecoder object.' % method)
| |
#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import glob
import json
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
PARSER_FUNCS = {
'.json': json.loads,
'.yml': yaml.safe_load,
'.yaml': yaml.safe_load
}
BASE_REPO_URL = 'https://github.com/StackStorm/st2contrib'
BASE_PACKS_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
PACK_ICON_URL = 'https://raw.githubusercontent.com/StackStorm/st2contrib/master/packs/%(name)s/icon.png'
NO_PACK_ICON_URL = 'https://raw.githubusercontent.com/StackStorm/st2contrib/master/packs/st2/icon.png'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
icon_path = os.path.join(PACKS_DIR, pack, 'icon.png')
if os.path.exists(icon_path):
metadata['icon_url'] = PACK_ICON_URL % {'name': pack}
else:
metadata['icon_url'] = NO_PACK_ICON_URL
return metadata
def get_pack_resources(pack):
sensors_path = os.path.join(PACKS_DIR, pack, 'sensors/')
actions_path = os.path.join(PACKS_DIR, pack, 'actions/')
sensor_metadata_files = glob.glob(sensors_path + '/*.json')
sensor_metadata_files += glob.glob(sensors_path + '/*.yaml')
sensor_metadata_files += glob.glob(sensors_path + '/*.yml')
action_metadata_files = glob.glob(actions_path + '/*.json')
action_metadata_files += glob.glob(actions_path + '/*.yaml')
action_metadata_files += glob.glob(actions_path + '/*.yml')
resources = {
'sensors': [],
'actions': []
}
for sensor_metadata_file in sensor_metadata_files:
file_name, file_ext = os.path.splitext(sensor_metadata_file)
with open(sensor_metadata_file, 'r') as fp:
content = fp.read()
content = PARSER_FUNCS[file_ext](content)
item = {
'name': content['class_name'],
'description': content.get('description', None)
}
resources['sensors'].append(item)
for action_metadata_file in action_metadata_files:
file_name, file_ext = os.path.splitext(action_metadata_file)
with open(action_metadata_file, 'r') as fp:
content = fp.read()
content = PARSER_FUNCS[file_ext](content)
if 'name' not in content:
continue
item = {
'name': content['name'],
'description': content.get('description', None)
}
resources['actions'].append(item)
resources['sensors'] = sorted(resources['sensors'], key=lambda i: i['name'])
resources['actions'] = sorted(resources['actions'], key=lambda i: i['name'])
return resources
def generate_pack_list_table(packs):
lines = []
lines.append('Icon | Name | Description | Keywords | Author | Latest Version | Available Resources')
lines.append('---- | ---- | ----------- | -------- | ------ | -------------- | -------------------')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_packs_url'] = BASE_PACKS_URL
values['base_repo_url'] = BASE_REPO_URL
values['keywords'] = ', '.join(metadata.get('keywords', []))
line = '[s)](%(base_packs_url)s/%(name)s) | [%(name)s](%(base_packs_url)s/%(name)s) | %(description)s | %(keywords)s | [%(author)s](mailto:%(email)s) | %(version)s | [click](%(base_repo_url)s#%(name)s-pack)' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def generate_pack_resources_tables(packs):
lines = []
for pack_name, metadata in packs:
pack_resources = get_pack_resources(pack=pack_name)
table = generate_pack_resources_table(pack_name=pack_name,
metadata=metadata,
resources=pack_resources)
if not table:
continue
lines.append(table)
result = '\n\n'.join(lines)
return result
def generate_pack_resources_table(pack_name, metadata, resources):
lines = []
if not resources['sensors'] and not resources['actions']:
return None
lines.append('###  %s pack' % (pack_name, metadata['icon_url'], pack_name))
lines.append('')
if resources['sensors']:
lines.append('#### Sensors')
lines.append('')
lines.append('Name | Description')
lines.append('---- | -----------')
for sensor in resources['sensors']:
lines.append('%s | %s' % (sensor['name'], sensor['description']))
if resources['actions']:
lines.append('')
if resources['actions']:
lines.append('#### Actions')
lines.append('')
lines.append('Name | Description')
lines.append('---- | -----------')
for action in resources['actions']:
lines.append('%s | %s' % (action['name'], action['description']))
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table1 = generate_pack_list_table(packs=packs_with_metadata)
table2 = generate_pack_resources_tables(packs=packs_with_metadata)
table = '%s\n%s' % (table1, table2)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
| |
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE.txt file distributed with
# this work for additional information regarding copyright ownership. The ASF
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Agile Web Grid. HTTP-based profile and product servers.
'''
__docformat__ = 'restructuredtext'
from oodt.query import Query
from xml.dom.minidom import parseString, getDOMImplementation
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import cgi, os, shutil, stat
_validKinds = ('profile', 'product')
_doc = getDOMImplementation().createDocument(None, None, None) # qname, nsuri, doctype
class WebGridRequestHandler(BaseHTTPRequestHandler):
'''HTTP request handler for Web-Grid requests. This request handler accepts GET
or POST requests and directs them to profile or product handlers. Additionally,
requests to install new handlers, to list currently installed handlers, and to
remove handlers by ID are supported.
'''
def do_POST(self):
'''Handle a POST.
'''
try:
length = int(self.headers['content-length'])
kind = self.headers['content-type']
if kind.endswith('www-form-urlencoded'):
if length > 0:
params = cgi.parse_qs(self.rfile.read(length), True, True) # Keep blanks, strict parse
else:
params = {}
self.__execute(self.path, params)
else:
raise ValueError('Unknown encoding "%s"' % kind)
except Exception, e:
self.send_error(500, str(e))
def do_GET(self):
'''Handle a GET.
'''
try:
index = self.path.find('?')
if index >= 0:
params = cgi.parse_qs(self.path[index+1:], True, True) # Keep blanks, strict parse
path = self.path[0:index]
else:
params, path = {}, self.path
self.__execute(path, params)
except Exception, e:
self.send_error(500, str(e))
def __execute(self, path, params):
'''Execute an HTTP request.
'''
components = path.split('/')
if len(components) == 3:
context, command = components[1], components[2]
if context != self.server.serverID:
raise ValueError('Unknown server ID "%s"' % context)
func = getattr(self, command)
if callable(func):
func(params)
return
raise KeyError('Unknown command')
def echo(self, params):
'''Debugging method that echoes back the request parameters.
'''
u = unicode(params)
self.send_response(200)
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Content-length', str(len(u)))
self.end_headers()
self.wfile.write(u)
def sendEmptyResponse(self):
'''Send an empty response to the HTTP client.
'''
self.send_response(200)
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Content-length', '0')
self.end_headers()
def install(self, params):
'''Install a new handler. This will overwrite existing handlers with the
same ID.
'''
handlers = self.server.getHandlers(params['kind'][0])
globs = dict(globals())
del globs['__name__']
# TODO: use rexec or otherwise limit the code than can be uploaded.
exec params['code'][0] in globs, globs
handlers[params['id'][0]] = globs['handler']
self.sendEmptyResponse()
def remove(self, params):
'''Remove an existing handler.
'''
handlers = self.server.getHandlers(params['kind'][0])
del handlers[params['id'][0]]
self.sendEmptyResponse()
def list(self, params):
'''List installed handlers.
'''
handlers = {}
for kind in _validKinds:
handlers[kind] = self.server.getHandlers(kind).keys()
handlers = unicode(handlers)
self.send_response(200)
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Content-length', str(len(handlers)))
self.end_headers()
self.wfile.write(handlers)
def __createQuery(self, params):
'''Create a Query from the request parameters. This method prefers the
xmlq parameter and parses it as an XML document and into a Query object.
However, if it's not provided, or fails to parse, it'll use the q parameter,
which is expected to be just a query expression.
'''
try:
doc = parseString(params['xmlq'][0])
return Query(node=doc.documentElement)
except KeyError:
return Query(params['q'][0])
def sendProduct(self, match):
'''Send a matching product.
'''
self.send_response(200)
self.send_header('Content-type', match.contentType)
self.send_header('Content-length', str(match.length))
self.end_headers()
shutil.copyfileobj(match.data, self.wfile)
self.log_request(200, match.length)
def prod(self, params):
'''Handle a product query.
'''
query = self.__createQuery(params)
for handler in self.server.getHandlers('product'):
matches = handler.query(query)
if len(matches) > 0:
self.sendProduct(matches[0])
self.send_error(404, 'No matching products')
def prof(self, params):
'''Handle a profile query.
'''
query = self.__createQuery(params)
tmp = os.tmpfile()
tmp.writelines((u'<?xml version="1.0" encoding="UTF-8"?>\n',
u'<!DOCTYPE profiles PUBLIC "-//JPL//DTD Profile 1.1//EN"\n',
u' "http://oodt.jpl.nasa.gov/grid-profile/dtd/prof.dtd">\n',
u'<profiles>\n'))
for handler in self.server.getHandlers('profile').itervalues():
for profile in handler.query(query):
node = profile.toXML(_doc)
tmp.write(node.toxml())
tmp.write(u'</profiles>')
tmp.flush()
tmp.seek(0L)
self.send_response(200)
self.send_header('Content-type', 'text/xml;charset=utf-8')
size = os.fstat(tmp.fileno())[stat.ST_SIZE]
self.send_header('Content-length', str(size))
self.end_headers()
shutil.copyfileobj(tmp, self.wfile)
self.log_request(200, size)
class WebGridServer(HTTPServer):
'''Web grid HTTP server. This server handles incoming HTTP requests and directs them to a
WebGridRequestHandler. It also contains the server's ID, and the sequences of profile and
product handlers.
'''
def __init__(self, addr, serverID):
'''Initialize by saving the server ID and creating empty sequences of profile
and product handlers.
'''
HTTPServer.__init__(self, addr, WebGridRequestHandler)
self.serverID = serverID
self.__handlers = {}
for kind in _validKinds:
self.__handlers[kind] = {}
def getHandlers(self, kind):
'''Get the map of handlers for the given kind, which is either "product" or "profile".
'''
if kind not in _validKinds:
raise ValueError('Invalid handler kind "%s"' % kind)
return self.__handlers[kind]
def _main():
'''Run the web grid server.
'''
import sys
try:
serverID = sys.argv[1]
except IndexError:
serverID = 'oodt'
listenAddr = ('', 7576)
httpd = WebGridServer(listenAddr, serverID)
httpd.serve_forever()
if __name__ == '__main__':
_main()
| |
#!/usr/bin/env python
import os
import sys
import time
import shlex
import struct
import platform
import subprocess
import pyglet
import cocos
from cocos.director import director
from cocos.audio.pygame.mixer import Sound
from cocos.audio.pygame import mixer
import colorama
colorama.init()
# support running from locations outside the bin directory assuming symbolic links aren't used
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import panzee.nmfe
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen.
Taken from http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
# taken from https://gist.github.com/jtriley/1108174
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
def clear_screen():
if sys.platform == "nt":
_ = os.system("cls")
else:
_ = os.system("clear")
class ConsoleView(object):
DIALOGUE, DRAW_CHOICE, WAIT_INPUT, IDLE = range(4)
ACTOR_COLOR = colorama.Fore.BLUE
DIALOGUE_COLOR = colorama.Fore.WHITE
DIALOGUE_DELAY = 0.05
CHOICE_COLOR = colorama.Fore.RED
HELP_COLOR = colorama.Fore.GREEN
HELP_STR = "d = next, s = exit, 1/2/3/etc. used for choice selection"
def __init__(self):
self.state = ConsoleView.IDLE
self._dialogue_buffer = ""
self._need_refresh = True
self._speaker = None
self._speaker_drawn = False
self._choices = []
def display_background(self, _, __):
pass
def clear_background(self):
pass
def play_audio(self, _):
pass
def stop_audio(self):
pass
def get_speaker(self):
return self._speaker
def set_speaker(self, speaker):
self._speaker = speaker
def display_dialogue(self, dialogue):
self.state = ConsoleView.DIALOGUE
self._dialogue_buffer = list(dialogue)
def display_choices(self, choices):
self._choices = choices
self.state = ConsoleView.DRAW_CHOICE
def get_selected_choice(self):
choice = int(getch())
self._need_refresh = True
self.state = ConsoleView.IDLE
return choice
def wait(self, duration):
time.sleep(duration)
def update(self):
if self._need_refresh is True:
self._draw_interface()
self._need_refresh = False
if self.state is ConsoleView.DIALOGUE:
self._draw_speaker()
self._draw_dialogue()
elif self.state is ConsoleView.DRAW_CHOICE:
self._draw_choices()
def restore_context(self, commands):
self.clear_background()
self.stop_audio()
for command in commands:
command.execute()
def mainloop(self, runtime):
while True:
if runtime.can_step() is False and self.state is ConsoleView.IDLE:
break
if self.state is ConsoleView.IDLE:
command = runtime.step()
command.execute()
elif self.state is ConsoleView.WAIT_INPUT:
while True:
char = getch()
if char == "d":
self.state = ConsoleView.IDLE
break
elif char == "s":
clear_screen()
sys.exit(0)
self.update()
clear_screen()
print "End of file reached. Press any key to exit."
getch()
def _draw_interface(self):
clear_screen()
print ConsoleView.HELP_COLOR + ConsoleView.HELP_STR + colorama.Style.RESET_ALL
_, height = get_terminal_size()
print "\n" * (height - 6)
def _draw_speaker(self):
if self._speaker and self._speaker_drawn is False:
print ConsoleView.ACTOR_COLOR + self._speaker + colorama.Style.RESET_ALL
self._speaker_drawn = True
def _draw_dialogue(self):
next_char = self._dialogue_buffer.pop(0)
sys.stdout.write(ConsoleView.DIALOGUE_COLOR + next_char + colorama.Style.RESET_ALL)
sys.stdout.flush()
time.sleep(ConsoleView.DIALOGUE_DELAY)
if len(self._dialogue_buffer) < 1:
self.state = ConsoleView.WAIT_INPUT
self._need_refresh = True
self._speaker_drawn = False
return
def _draw_choices(self):
for index, choice in enumerate(self._choices):
print "%s%d. %s%s" % (ConsoleView.CHOICE_COLOR, index + 1, choice, colorama.Style.RESET_ALL)
self.state = ConsoleView.IDLE
class CocosUILayer(cocos.layer.ColorLayer):
is_event_handler = True
UI_COLOR = (200, 200, 200)
UI_OPACITY = 200
UI_HEIGHT = 150
def __init__(self):
super(CocosUILayer, self).__init__(0, 0, 0, 0, CocosView.WIDTH, CocosUILayer.UI_HEIGHT)
self.color = CocosUILayer.UI_COLOR
self.opacity = CocosUILayer.UI_OPACITY
def advance_request(self):
pass
def on_key_release(self, key, _):
if key < 255 and chr(key) == CocosView.ADVANCE_NEXT_KEY:
self.advance_request()
class CocosView(object):
DIALOGUE, GET_CHOICE, WAIT, WAIT_INPUT, IDLE = range(5)
WIDTH = 800
HEIGHT = 600
TITLE = "NMFe Standalone Player"
ACTOR_FONT_SIZE = 20
DIALOGUE_FONT_SIZE = 16
ADVANCE_NEXT_KEY = "n"
FONT = "Sans Serif"
DIALOGUE_DELAY = 0.035
def __init__(self):
self._actor_label = None
self._dialogue_box = None
self._scene = None
self._audio = None
self._background = None
self._ui_layer = None
self._image_layer = None
self._menu = None
self._choice = None
self._delete_queue = []
self._display_queue = []
self._dialogue_dirty = False
self._dialogue_buffer = []
self._avatars = {}
self._speaker = None
self._state = CocosView.IDLE
self._start_time = -1
self._duration = -1
self._update_speaker = False
def display_dialogue(self, dialogue):
self._state = CocosView.DIALOGUE
self._dialogue_dirty = True
self._dialogue_buffer = list(dialogue)
def set_speaker(self, speaker):
self._speaker = speaker
self._update_speaker = True
def get_speaker(self):
return self._speaker
def display_background(self, background_path, transition):
if self._background is not None:
self._delete_queue.append(self._background)
image = pyglet.image.load(background_path)
self._background = cocos.sprite.Sprite(image)
self._background.position = (CocosView.WIDTH / 2, CocosView.HEIGHT / 2)
self._display_queue.append((self._background, -1))
def clear_background(self):
self._delete_queue.append(self._background)
self._background = None
def play_audio(self, audio_path):
if self._audio is not None:
self.stop_audio()
self._audio = Sound(audio_path)
self._audio.play(-1)
def stop_audio(self):
if self._audio is not None:
self._audio.stop()
self._audio = None
def display_avatar(self, avatar_path):
old_position = None
already_displaying = False
if self._avatars.has_key(self.get_speaker()):
already_displaying = True
current_avatar = self._avatars[self.get_speaker()]
old_position = current_avatar.position
self.remove_avatar(self.get_speaker())
image = pyglet.image.load(avatar_path)
sprite = cocos.sprite.Sprite(image)
self._avatars[self.get_speaker()] = sprite
if old_position:
old_x = old_position[0]
if old_x < CocosView.WIDTH / 2:
self.set_avatar_position(self.get_speaker(), "left")
elif old_x > CocosView.WIDTH / 2:
self.set_avatar_position(self.get_speaker(), "right")
else:
self.set_avatar_position(self.get_speaker(), "center")
else:
sprite.position = (CocosView.WIDTH / 2, sprite.height / 2)
if already_displaying:
self._display_queue.append((sprite, 0))
def remove_avatar(self, avatar):
self._delete_queue.append(self._avatars[avatar])
del self._avatars[avatar]
def set_avatar_position(self, avatar, position):
sprite = self._avatars[avatar]
if position == "left":
sprite.position = (sprite.width / 2, sprite.height / 2)
elif position == "center":
sprite.position = (CocosView.WIDTH / 2, sprite.height / 2)
elif position == "right":
sprite.position = (CocosView.WIDTH - sprite.width / 2, sprite.height / 2)
if sprite not in self._image_layer:
self._display_queue.append((sprite, 0))
def wait(self, duration):
self._check_delete_queue()
self._duration = duration
self._start_time = time.time()
self._state = CocosView.WAIT
def restore_context(self, commands):
self._check_delete_queue()
self.clear_background()
self.stop_audio()
for command in commands:
command.execute()
def display_choices(self, choices):
self._state = CocosView.GET_CHOICE
self._refresh_menu()
choice_items = []
for index, text in enumerate(choices):
# counting in NMF starts at one, not zero, so we have to add one
# to make up the difference
callback = lambda: self._set_choice(index + 1)
choice_items.append(cocos.menu.MenuItem(text, callback))
self._menu.create_menu(choice_items)
self._show_menu()
def get_selected_choice(self):
return self._choice
def mainloop(self, runtime):
mixer.init()
director.init(width=CocosView.WIDTH, height=CocosView.HEIGHT, caption=CocosView.TITLE)
self._runtime = runtime
self._init_interface()
self._scene.schedule(self._update)
self._scene.schedule_interval(self._render_dialogue, CocosView.DIALOGUE_DELAY)
director.run(self._scene)
def _update(self, _):
if self._runtime.can_step() is False and self._state is CocosView.IDLE:
pyglet.app.exit()
elif self._state is CocosView.IDLE:
command = self._runtime.step()
command.execute()
elif self._state is CocosView.WAIT:
current_time = time.time()
if current_time - self._start_time >= self._duration:
self._state = CocosView.IDLE
def _render_dialogue(self, _):
if self._dialogue_dirty:
self._dialogue_dirty = False
self._dialogue_box.element.text = ""
if self._state is CocosView.DIALOGUE:
self._check_delete_queue()
self._check_display_queue()
if len(self._dialogue_buffer) == 0:
self._state = CocosView.WAIT_INPUT
else:
self._dialogue_box.element.text += self._dialogue_buffer.pop(0)
if self._update_speaker:
if self._speaker is None:
self._actor_label.element.text = ""
else:
self._actor_label.element.text = self._speaker
self._update_speaker = False
def _advance_request(self):
if self._state is not CocosView.GET_CHOICE:
if len(self._dialogue_buffer) > 0:
self._dialogue_box.element.text += ''.join(self._dialogue_buffer)
self._dialogue_buffer = []
else:
self._state = CocosView.IDLE
def _check_delete_queue(self):
while self._delete_queue:
sprite = self._delete_queue.pop(0)
sprite.kill()
def _check_display_queue(self):
while self._display_queue:
sprite, z_value = self._display_queue.pop(0)
self._image_layer.add(sprite, z=z_value)
def _refresh_menu(self):
if self._menu is not None:
self._menu.kill()
self._menu = cocos.menu.Menu()
self._menu.font_item["font_size"] = 16
self._menu.font_item_selected["font_size"] = 24
def _hide_menu(self):
if self._menu in self._scene:
self._menu.kill()
def _show_menu(self):
self._scene.add(self._menu)
def _set_choice(self, choice_index):
self._choice = choice_index
self._hide_menu()
self._state = CocosView.IDLE
def _init_interface(self):
self._scene = cocos.scene.Scene()
self._image_layer = cocos.layer.Layer()
self._scene.add(self._image_layer)
self._ui_layer = CocosUILayer()
self._ui_layer.advance_request = self._advance_request
self._actor_label = cocos.text.Label(font_name=CocosView.FONT,
font_size=CocosView.ACTOR_FONT_SIZE,
anchor_x="left",
color=(100, 100, 150, 255))
self._actor_label.position = (20, 120)
self._ui_layer.add(self._actor_label)
self._dialogue_box = cocos.text.Label(font_name=CocosView.FONT,
font_size=CocosView.DIALOGUE_FONT_SIZE,
anchor_x="left",
color=(50, 50, 50, 255),
width=CocosView.WIDTH - 100,
multiline=True)
self._dialogue_box.position = (20, 80)
self._ui_layer.add(self._dialogue_box)
self._scene.add(self._ui_layer)
def main():
if os.getenv("NO_COCOS"):
view = ConsoleView()
else:
view = CocosView()
runtime = panzee.nmfe.Runtime(view)
if len(sys.argv) < 2:
print "Usage: nmfe-player scene"
sys.exit(1)
try:
runtime.read(sys.argv[1])
except panzee.nmfe.ParseException as e:
print "Parse error:", str(e)
sys.exit(1)
view.mainloop(runtime)
if __name__ == "__main__":
main()
| |
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
class TestFromHoudiniPolygonsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
points.parm( "npts" ).set( 5000 )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPolygonsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.MeshPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECore.TypeId.MeshPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.MeshPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.MeshPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.MeshPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createBox().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECore.TypeId.MeshPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.MeshPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
## \todo: make sure we catch that bad_cast crash
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPolygonsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.MeshPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 100 )
self.assertEqual( result.numFaces(), 100 )
self.assertEqual( len( result.verticesPerFace ), 100 )
for i in range( len( result.verticesPerFace ) ) :
self.assertEqual( result.verticesPerFace[i], 4 )
self.assertEqual( len( result.vertexIds ), 400 )
for i in range( len( result.vertexIds ) ) :
self.assert_( result.vertexIds[i] >= 0 )
self.assert_( result.vertexIds[i] < 100 )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True )
vertString.parm("name").set("vertString")
vertString.parm("class").set(3)
vertString.parm("type").set(3)
vertString.parm("string").set("string $VTX!")
vertString.setInput( 0, vert_v3f )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vertString )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPolygonsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.MeshPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 100 )
self.assertEqual( result.numFaces(), 100 )
self.assertEqual( len( result.verticesPerFace ), 100 )
for i in range( len( result.verticesPerFace ) ) :
self.assertEqual( result.verticesPerFace[i], 4 )
self.assertEqual( len( result.vertexIds ), 400 )
for i in range( len( result.vertexIds ) ) :
self.assert_( result.vertexIds[i] >= 0 )
self.assert_( result.vertexIds[i] < 100 )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cs" in result )
self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData )
self.assertEqual( result["Cs"].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( result["Cs"].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) :
for j in range( 0, 3 ) :
self.assert_( result["Cs"].data[i][j] >= 0.0 )
self.assert_( result["Cs"].data[i][j] <= 1.0 )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f", "vertStringIndices" ]
for a in attrs :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( result[a].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.FaceVarying ) )
self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() )
self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() )
self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.FaceVarying ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_f3"].data[i][j] >= 0.0 )
self.assert_( result["vert_f3"].data[i][j] < 0.4 )
self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() )
self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() )
self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.FaceVarying ) ) :
for j in range( 0, 3 ) :
self.assertEqual( result["vert_i3"].data[i][j], 0 )
self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["vertString"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["vertString"].data.size(), 4 )
self.assertEqual( result["vertStringIndices"].data.typeId(), IECore.TypeId.IntVectorData )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.FaceVarying ) ) :
index = result["vertStringIndices"].data[ result.vertexIds[i] ]
self.assertEqual( result["vertString"].data[ index ], "string %d!" % index )
self.assert_( result.arePrimitiveVariablesValid() )
def testConvertNull( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
null = geo.createNode( "null" )
m = IECoreHoudini.FromHoudiniPolygonsConverter( null ).convert()
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
self.assertEqual( m, IECore.MeshPrimitive() )
# convert some points
def testConvertPoints( self ) :
points = self.createPoints()
m = IECoreHoudini.FromHoudiniPolygonsConverter( points ).convert()
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
self.assertEqual( m, IECore.MeshPrimitive() )
# simple attribute conversion
def testSetupAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, torus )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(0) # float
attr.parm("size").set(1) # 1 element
attr.parm("value1").set(123.456)
attr.parm("value2").set(654.321)
converter = IECoreHoudini.FromHoudiniPolygonsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assert_( result.arePrimitiveVariablesValid() )
return attr
# testing point attributes and types
def testPointAttributes( self ) :
attr = self.testSetupAttributes()
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData )
self.assert_( result["test_attribute"].data[0] > 123.0 )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attribute"].data[0], 123 )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result["test_attribute"].data[0],IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string $PT!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["test_attribute"].data[10], "string 10!" )
self.assertEqual( result["test_attribute"].data.size(), 100 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["test_attributeIndices"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attributeIndices"].data[10], 10 )
self.assertEqual( result["test_attributeIndices"].data.size(), 100 )
self.assertEqual( result["test_attributeIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
# testing detail attributes and types
def testDetailAttributes( self ) :
attr = self.testSetupAttributes()
attr.parm("class").set(0) # detail attribute
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData )
self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData )
self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData )
self.assertEqual( result["test_attribute"].data.value, "string!" )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
# testing that float[4] doesn't work!
def testFloat4attr( self ) : # we can't deal with float 4's right now
attr = self.testSetupAttributes()
attr.parm("name").set( "test_attribute" )
attr.parm("size").set(4) # 4 elements per point-attribute
converter = IECoreHoudini.FromHoudiniPolygonsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4]
self.assert_( result.arePrimitiveVariablesValid() )
# testing conversion of animating geometry
def testAnimatingGeometry( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
torus = geo.createNode( "torus" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
mountain = geo.createNode( "mountain" )
mountain.parm("offset1").setExpression( "$FF" )
facet.setInput( 0, torus )
mountain.setInput( 0, facet )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( mountain )
hou.setFrame( 1 )
mesh1 = converter.convert()
hou.setFrame( 2 )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( mountain )
mesh2 = converter.convert()
self.assertNotEqual( mesh1["P"].data, mesh2["P"].data )
self.assertNotEqual( mesh1, mesh2 )
# testing we can handle an object being deleted
def testObjectWasDeleted( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPolygonsConverter, torus ) )
# testing we can handle an object being deleted
def testObjectWasDeletedFactory( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) )
# testing converting a Houdini particle primitive with detail and point attribs
def testParticlePrimitive( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
location = popnet.createNode( "location" )
detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm("name").set( "float3detail" )
detailAttr.parm("class").set( 0 ) # detail
detailAttr.parm("type").set( 0 ) # float
detailAttr.parm("size").set( 3 ) # 3 elements
detailAttr.parm("value1").set( 1 )
detailAttr.parm("value2").set( 2 )
detailAttr.parm("value3").set( 3 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm("name").set( "float3point" )
pointAttr.parm("class").set( 2 ) # point
pointAttr.parm("type").set( 0 ) # float
pointAttr.parm("size").set( 3 ) # 3 elements
pointAttr.parm("value1").set( 1 )
pointAttr.parm("value2").set( 2 )
pointAttr.parm("value3").set( 3 )
hou.setFrame( 5 )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( pointAttr )
self.assertRaises( RuntimeError, converter.convert )
add = pointAttr.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
m = IECoreHoudini.FromHoudiniPolygonsConverter( add ).convert()
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
self.assertEqual( m, IECore.MeshPrimitive() )
# testing winding order
def testWindingOrder( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
grid = geo.createNode( "grid" )
grid.parm( "rows" ).set( 2 )
grid.parm( "cols" ).set( 2 )
mesh = IECoreHoudini.FromHoudiniPolygonsConverter( grid ).convert()
p = mesh["P"].data
vertexIds = mesh.vertexIds
self.assertEqual( vertexIds.size(), 4 )
loop = IECore.V3fVectorData( [ p[vertexIds[0]], p[vertexIds[1]], p[vertexIds[2]], p[vertexIds[3]] ] )
self.assert_( IECore.polygonNormal( loop ).equalWithAbsError( IECore.V3f( 0, 1, 0 ), 0.0001 ) )
# testing vertex data order
def testVertexDataOrder( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
grid = geo.createNode( "grid" )
grid.parm( "rows" ).set( 2 )
grid.parm( "cols" ).set( 2 )
attr = grid.createOutputNode( "attribcreate", exact_type_name=True )
attr.parm("name").set( "vertex" )
attr.parm("class").set( 3 ) # vertex
attr.parm("type").set( 0 ) # float
attr.parm("value1").setExpression( "$VTX" )
mesh = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertEqual( mesh["vertex"].data, IECore.FloatVectorData( [ 3, 2, 1, 0 ] ) )
def testEmptyStringAttr( self ) :
torus = self.createTorus()
geo = torus.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, torus )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(3) # string
attr.parm("string").set("")
converter = IECoreHoudini.FromHoudiniPolygonsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assert_( "test_attributeIndices" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), 1 )
self.assertEqual( result["test_attributeIndices"].data.size(), 100 )
self.assertEqual( result["test_attribute"].data[0], "" )
for i in range( 0, 100 ) :
self.assertEqual( result["test_attributeIndices"].data[i], 0 )
self.assert_( result.arePrimitiveVariablesValid() )
def testName( self ) :
torus = self.createTorus()
name = torus.createOutputNode( "name" )
name.parm( "name1" ).set( "torus" )
box = torus.parent().createNode( "box" )
name2 = box.createOutputNode( "name" )
name2.parm( "name1" ).set( "box" )
merge = name.createOutputNode( "merge" )
merge.setInput( 1, name2 )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( merge )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# both torii were converted as one mesh
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 106 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "torus" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named polygons were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 100 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named polygons were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 6 )
self.assertTrue( result.arePrimitiveVariablesValid() )
# the name filter will convert both, but keep them separate
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "*" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniGroupConverter ) ) )
result = converter.convert()
numPrims = [ 6, 100 ]
names = [ "box", "torus" ]
self.assertEqual( result.blindData(), IECore.CompoundData() )
for i in range( 0, len(result.children()) ) :
child = result.children()[i]
self.assertFalse( "name" in child )
self.assertFalse( "nameIndices" in child )
self.assertEqual( child.blindData(), IECore.CompoundData( { "name" : names[i] } ) )
self.assertEqual( child.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), numPrims[i] )
self.assertTrue( child.arePrimitiveVariablesValid() )
def testAttributeFilter( self ) :
torus = self.createTorus()
# add vertex normals
facet = torus.createOutputNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
# add a primitive colour attributes
primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True )
detail.parm("name").set("detailAttr")
detail.parm("class").set(0)
detail.parm("type").set(1)
detail.parm("size").set(3)
detail.parm("value1").set(123)
detail.parm("value2").set(456.789) # can we catch it out with a float?
detail.parm("value3").set(789)
uvunwrap = detail.createOutputNode( "uvunwrap" )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( uvunwrap )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "s", "t", "varmap" ] )
converter.parameters()["attributeFilter"].setTypedValue( "P" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr", "s", "t" ] )
# P must be converted
converter.parameters()["attributeFilter"].setTypedValue( "* ^P" )
self.assertTrue( "P" in converter.convert().keys() )
# have to filter the source attr uv and not s, t
converter.parameters()["attributeFilter"].setTypedValue( "s t Cs" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "s Cd" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "uv Cd" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "s", "t" ] )
def testStandardAttributeConversion( self ) :
torus = self.createTorus()
color = torus.createOutputNode( "color" )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
uvunwrap = scale.createOutputNode( "uvunwrap" )
converter = IECoreHoudini.FromHoudiniPolygonsConverter( uvunwrap )
result = converter.convert()
self.assertEqual( result.keys(), [ "Cs", "P", "Pref", "s", "t", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
sData = result["s"].data
tData = result["t"].data
geo = uvunwrap.geometry()
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( sData[i], uvValues[0] )
self.assertAlmostEqual( tData[i], 1 - uvValues[1] )
i += 1
converter["convertStandardAttributes"].setTypedValue( False )
result = converter.convert()
self.assertEqual( result.keys(), [ "Cd", "P", "pscale", "rest", "uv", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
uvData = result["uv"].data
geo = uvunwrap.geometry()
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvData[i][0], uvValues[0] )
self.assertAlmostEqual( uvData[i][1], uvValues[1] )
i += 1
def testInterpolation( self ) :
torus = self.createTorus()
normals = torus.createOutputNode( "facet" )
normals.parm( "postnml" ).set( True )
result = IECoreHoudini.FromHoudiniPolygonsConverter( normals ).convert()
self.assertTrue( "ieMeshInterpolation" not in result.keys() )
self.assertEqual( result.interpolation, "linear" )
self.assertTrue( "N" in result.keys() )
attr = normals.createOutputNode( "attribcreate", node_name = "interpolation", exact_type_name=True )
attr.parm( "name" ).set( "ieMeshInterpolation" )
attr.parm( "class" ).set( 1 ) # prim
attr.parm( "type" ).set( 3 ) # string
attr.parm( "string") .set( "subdiv" )
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertTrue( "ieMeshInterpolation" not in result.keys() )
self.assertEqual( result.interpolation, "catmullClark" )
self.assertTrue( "N" not in result.keys() )
attr.parm( "string") .set( "poly" )
result = IECoreHoudini.FromHoudiniPolygonsConverter( attr ).convert()
self.assertTrue( "ieMeshInterpolation" not in result.keys() )
self.assertEqual( result.interpolation, "linear" )
self.assertTrue( "N" in result.keys() )
def testRename( self ) :
torus = self.createTorus()
name = torus.createOutputNode( "name" )
name.parm( "name1" ).set( "foo" )
rename = name.createOutputNode( "name" )
rename.parm( "name1" ).set( "bar" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( rename )
self.assertTrue( isinstance( converter, IECoreHoudini.FromHoudiniPolygonsConverter ) )
self.assertTrue( isinstance( converter.convert(), IECore.MeshPrimitive ) )
if __name__ == "__main__":
unittest.main()
| |
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import subprocess
from netaddr import IPAddress, IPNetwork
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost, CLUSTER_STORE_DOCKER_OPTIONS
from tests.st.utils.constants import DEFAULT_IPV4_POOL_CIDR
from tests.st.utils.route_reflector import RouteReflectorCluster
from tests.st.utils.utils import check_bird_status, retry_until_success
from time import sleep
from .peer import create_bgp_peer
"""
Test calico IPIP behaviour.
"""
class TestIPIP(TestBase):
def tearDown(self):
self.remove_tunl_ip()
@parameterized.expand([
('bird',),
('gobgp',),
])
def test_ipip(self, backend):
"""
Test IPIP routing with the different IPIP modes.
This test modifies the working IPIP mode of the pool and monitors the
traffic flow to ensure it either is or is not going over the IPIP
tunnel as expected.
"""
with DockerHost('host1',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
start_calico=False) as host1, \
DockerHost('host2',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
start_calico=False) as host2:
# Before starting the node, create the default IP pool using the
# v1.0.2 calicoctl. For calicoctl v1.1.0+, a new IPIP mode field
# is introduced - by testing with an older pool version validates
# the IPAM BIRD templates function correctly without the mode field.
self.pool_action(host1, "create", DEFAULT_IPV4_POOL_CIDR, False,
calicoctl_version="v1.0.2")
# Autodetect the IP addresses - this should ensure the subnet is
# correctly configured.
host1.start_calico_node("--ip=autodetect --backend={0}".format(backend))
host2.start_calico_node("--ip=autodetect --backend={0}".format(backend))
# Create a network and a workload on each host.
network1 = host1.create_network("subnet1")
workload_host1 = host1.create_workload("workload1",
network=network1)
workload_host2 = host2.create_workload("workload2",
network=network1)
# Allow network to converge.
self.assert_true(
workload_host1.check_can_ping(workload_host2.ip, retries=10))
# Check connectivity in both directions
self.assert_ip_connectivity(workload_list=[workload_host1,
workload_host2],
ip_pass_list=[workload_host1.ip,
workload_host2.ip])
# Note in the following we are making a number of configuration
# changes and testing whether or not IPIP is being used.
# The order of tests is deliberately chosen to flip between IPIP
# and no IPIP because it is easier to look for a change of state
# than to look for state remaining the same.
# Turn on IPIP with a v1.0.2 calicoctl and check that the
# IPIP tunnel is being used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, True,
calicoctl_version="v1.0.2")
self.assert_ipip_routing(host1, workload_host1, workload_host2,
True)
# Turn off IPIP using the latest version of calicoctl and check that
# IPIP tunnel is not being used. We'll use the latest version of
# calicoctl for the remaining tests.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, False)
self.assert_ipip_routing(host1, workload_host1, workload_host2,
False)
# Turn on IPIP, default mode (which is always use IPIP), and check
# IPIP tunnel is being used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, True)
self.assert_ipip_routing(host1, workload_host1, workload_host2,
True)
# Turn off IPIP and check IPIP tunnel is not being used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, False)
self.assert_ipip_routing(host1, workload_host1, workload_host2,
False)
# Turn on IPIP mode "always", and check IPIP tunnel is being used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, True,
ipip_mode="always")
self.assert_ipip_routing(host1, workload_host1, workload_host2,
True)
# Turn on IPIP mode "cross-subnet", since both hosts will be on the
# same subnet, IPIP should not be used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, True,
ipip_mode="cross-subnet")
self.assert_ipip_routing(host1, workload_host1, workload_host2,
False)
# Set the BGP subnet on both node resources to be a /32. This will
# fool Calico into thinking they are on different subnets. IPIP
# routing should be used.
self.pool_action(host1, "replace", DEFAULT_IPV4_POOL_CIDR, True,
ipip_mode="cross-subnet")
self.modify_subnet(host1, 32)
self.modify_subnet(host2, 32)
self.assert_ipip_routing(host1, workload_host1, workload_host2,
True)
def test_ipip_addr_assigned(self):
with DockerHost('host', dind=False, start_calico=False) as host:
# Set up first pool before Node is started, to ensure we get tunl IP on boot
ipv4_pool = IPNetwork("10.0.1.0/24")
self.pool_action(host, "create", ipv4_pool, True)
host.start_calico_node()
self.assert_tunl_ip(host, ipv4_pool, expect=True)
# Disable the IP Pool, and make sure the tunl IP is not from this IP pool anymore.
self.pool_action(host, "apply", ipv4_pool, True, disabled=True)
self.assert_tunl_ip(host, ipv4_pool, expect=False)
# Re-enable the IP pool and make sure the tunl IP is assigned from that IP pool again.
self.pool_action(host, "apply", ipv4_pool, True)
self.assert_tunl_ip(host, ipv4_pool, expect=True)
# Test that removing pool removes the tunl IP.
self.pool_action(host, "delete", ipv4_pool, True)
self.assert_tunl_ip(host, ipv4_pool, expect=False)
# Test that re-adding the pool triggers the confd watch and we get an IP
self.pool_action(host, "create", ipv4_pool, True)
self.assert_tunl_ip(host, ipv4_pool, expect=True)
# Test that by adding another pool, then deleting the first,
# we remove the original IP, and allocate a new one from the new pool
new_ipv4_pool = IPNetwork("192.168.0.0/16")
self.pool_action(host, "create", new_ipv4_pool, True)
self.pool_action(host, "delete", ipv4_pool, True)
self.assert_tunl_ip(host, new_ipv4_pool)
def pool_action(self, host, action, cidr, ipip, disabled=False, ipip_mode="", calicoctl_version=None):
"""
Perform an ipPool action.
"""
testdata = {
'apiVersion': 'v1',
'kind': 'ipPool',
'metadata': {
'cidr': str(cidr)
},
'spec': {
'ipip': {
'enabled': ipip
},
'disabled': disabled
}
}
# Only add the mode field is a value is specified. Note that
# the mode field will not be valid on pre-v1.1.0 versions of calicoctl.
if ipip_mode:
testdata['spec']['ipip']['mode'] = ipip_mode
host.writefile("testfile.yaml", testdata)
host.calicoctl("%s -f testfile.yaml" % action, version=calicoctl_version)
def assert_tunl_ip(self, host, ip_network, expect=True):
"""
Helper function to make assertions on whether or not the tunl interface
on the Host has been assigned an IP or not. This function will retry
7 times, ensuring that our 5 second confd watch will trigger.
:param host: DockerHost object
:param ip_network: IPNetwork object which describes the ip-range we do (or do not)
expect to see an IP from on the tunl interface.
:param expect: Whether or not we are expecting to see an IP from IPNetwork on the tunl interface.
:return:
"""
retries = 7
for retry in range(retries + 1):
try:
output = host.execute("ip addr show tunl0")
match = re.search(r'inet ([\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3})', output)
if match:
ip_address = IPAddress(match.group(1))
if expect:
self.assertIn(ip_address, ip_network)
else:
self.assertNotIn(ip_address, ip_network)
else:
self.assertFalse(expect, "No IP address assigned to tunl interface.")
except Exception as e:
if retry < retries:
sleep(1)
else:
raise e
else:
return
def remove_tunl_ip(self):
"""
Remove the host tunl IP address if assigned.
"""
try:
output = subprocess.check_output(["ip", "addr", "show", "tunl0"])
except subprocess.CalledProcessError:
return
match = re.search(r'inet ([\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3})', output)
if not match:
return
ipnet = str(IPNetwork(match.group(1)))
try:
output = subprocess.check_output(["ip", "addr", "del", ipnet, "dev", "tunl0"])
except subprocess.CalledProcessError:
return
def modify_subnet(self, host, prefixlen):
"""
Update the calico node resource to use the specified prefix length.
Returns the current mask size.
"""
node = json.loads(host.calicoctl(
"get node %s --output=json" % host.get_hostname()))
assert len(node) == 1
# Get the current network and prefix len
ipnet = IPNetwork(node[0]["spec"]["bgp"]["ipv4Address"])
current_prefix_len = ipnet.prefixlen
# Update the prefix length
ipnet.prefixlen = prefixlen
node[0]["spec"]["bgp"]["ipv4Address"] = str(ipnet)
# Write the data back again.
host.writejson("new_data", node)
host.calicoctl("apply -f new_data")
return current_prefix_len
def assert_ipip_routing(self, host1, workload_host1, workload_host2, expect_ipip):
"""
Test whether IPIP is being used as expected on host1 when pinging workload_host2
from workload_host1.
"""
def check():
orig_tx = self.get_tunl_tx(host1)
workload_host1.execute("ping -c 2 -W 1 %s" % workload_host2.ip)
if expect_ipip:
assert self.get_tunl_tx(host1) == orig_tx + 2
else:
assert self.get_tunl_tx(host1) == orig_tx
retry_until_success(check, retries=10)
def get_tunl_tx(self, host):
"""
Get the tunl TX count
"""
try:
output = host.execute("ifconfig tunl0")
except subprocess.CalledProcessError:
return
match = re.search(r'RX packets:(\d+) ',
output)
return int(match.group(1))
@parameterized.expand([
(False,),
(True,),
(False,'gobgp',),
(True,'gobgp',),
])
def test_gce(self, with_ipip, backend='bird'):
"""Test with and without IP-in-IP routing on simulated GCE instances.
In this test we simulate GCE instance routing, where there is a router
between the instances, and each instance has a /32 address that appears
not to be directly connected to any subnet. With that setup,
connectivity between workloads on different hosts _should_ require
IP-in-IP to be enabled. We test that we do get connectivity _with_
IP-in-IP, that we don't get connectivity _without_ IP-in-IP, and that
the situation updates dynamically if we toggle IP-in-IP with workloads
already existing.
Note that this test targets the BGP implementation, to check that it
does IP-in-IP routing correctly, and handles the underlying GCE
routing, and switches dynamically between IP-in-IP and normal routing
as directed by calicoctl. (In the BIRD case, these are all points for
which we've patched the upstream BIRD code.) But naturally it also
involves calicoctl and confd, so it isn't _only_ about the BGP code.
"""
with DockerHost('host1',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
simulate_gce_routing=True,
start_calico=False) as host1, \
DockerHost('host2',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
simulate_gce_routing=True,
start_calico=False) as host2:
self._test_gce_int(with_ipip, backend, host1, host2, False)
@parameterized.expand([
(False,),
(True,),
])
def test_gce_rr(self, with_ipip):
"""As test_gce except with a route reflector instead of mesh config."""
with DockerHost('host1',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
simulate_gce_routing=True,
start_calico=False) as host1, \
DockerHost('host2',
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
simulate_gce_routing=True,
start_calico=False) as host2, \
RouteReflectorCluster(1, 1) as rrc:
self._test_gce_int(with_ipip, 'bird', host1, host2, rrc)
def _test_gce_int(self, with_ipip, backend, host1, host2, rrc):
host1.start_calico_node("--backend={0}".format(backend))
host2.start_calico_node("--backend={0}".format(backend))
# Before creating any workloads, set the initial IP-in-IP state.
host1.set_ipip_enabled(with_ipip)
if rrc:
# Set the default AS number - as this is used by the RR mesh,
# and turn off the node-to-node mesh (do this from any host).
host1.calicoctl("config set asNumber 64513")
host1.calicoctl("config set nodeToNodeMesh off")
# Peer from each host to the route reflector.
for host in [host1, host2]:
for rr in rrc.get_redundancy_group():
create_bgp_peer(host, "node", rr.ip, 64513)
# Create a network and a workload on each host.
network1 = host1.create_network("subnet1")
workload_host1 = host1.create_workload("workload1",
network=network1)
workload_host2 = host2.create_workload("workload2",
network=network1)
for _ in [1, 2]:
# Check we do or don't have connectivity between the workloads,
# according to the IP-in-IP setting.
if with_ipip:
# Allow network to converge.
self.assert_true(
workload_host1.check_can_ping(workload_host2.ip, retries=10))
# Check connectivity in both directions
self.assert_ip_connectivity(workload_list=[workload_host1,
workload_host2],
ip_pass_list=[workload_host1.ip,
workload_host2.ip])
# Check that we are using IP-in-IP for some routes.
assert "tunl0" in host1.execute("ip r")
assert "tunl0" in host2.execute("ip r")
# Check that routes are not flapping: the following shell
# script checks that there is no output for 10s from 'ip
# monitor', on either host. The "-le 1" is to allow for
# something (either 'timeout' or 'ip monitor', not sure) saying
# 'Terminated' when the 10s are up. (Note that all commands
# here are Busybox variants; I tried 'grep -v' to eliminate the
# Terminated line, but for some reason it didn't work.)
for host in [host1, host2]:
host.execute("changes=`timeout -t 10 ip -t monitor 2>&1`; " +
"echo \"$changes\"; " +
"test `echo \"$changes\" | wc -l` -le 1")
else:
# Expect non-connectivity between workloads on different hosts.
self.assert_false(
workload_host1.check_can_ping(workload_host2.ip, retries=10))
if not rrc:
# Check the BGP status on each host.
check_bird_status(host1, [("node-to-node mesh", host2.ip, "Established")])
check_bird_status(host2, [("node-to-node mesh", host1.ip, "Established")])
# Flip the IP-in-IP state for the next iteration.
with_ipip = not with_ipip
host1.set_ipip_enabled(with_ipip)
TestIPIP.batchnumber = 4 # Add batch label to these tests for parallel running
| |
from typing import Optional, Tuple
import gdsfactory as gf
from gdsfactory.add_padding import get_padding_points
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.cross_section import strip
from gdsfactory.port import Port
from gdsfactory.types import CrossSectionFactory, Layer
@cell
def taper(
length: float = 10.0,
width1: float = 0.5,
width2: Optional[float] = None,
port: Optional[Port] = None,
with_cladding_box: bool = True,
cross_section: CrossSectionFactory = strip,
**kwargs
) -> Component:
"""Linear taper.
Deprecated, use gf.components.taper_cross_section instead
Args:
length:
width1: width of the west port
width2: width of the east port
port: can taper from a port instead of defining width1
with_cladding_box: to avoid DRC acute angle errors in cladding
cross_section:
kwargs: cross_section settings
"""
x = cross_section(**kwargs)
layers_cladding = x.info["layers_cladding"]
layer = x.info["layer"]
if isinstance(port, gf.Port) and width1 is None:
width1 = port.width
if width2 is None:
width2 = width1
y1 = width1 / 2
y2 = width2 / 2
kwargs.update(width=width1)
x1 = cross_section(**kwargs)
kwargs.update(width=width2)
x2 = cross_section(**kwargs)
xpts = [0, length, length, 0]
ypts = [y1, y2, -y2, -y1]
c = gf.Component()
c.add_polygon((xpts, ypts), layer=layer)
c.add_port(
name="o1",
midpoint=[0, 0],
width=width1,
orientation=180,
layer=layer,
cross_section=x1,
)
c.add_port(
name="o2",
midpoint=[length, 0],
width=width2,
orientation=0,
layer=layer,
cross_section=x2,
)
if with_cladding_box and x.info["layers_cladding"]:
layers_cladding = x.info["layers_cladding"]
cladding_offset = x.info["cladding_offset"]
points = get_padding_points(
component=c,
default=0,
bottom=cladding_offset,
top=cladding_offset,
)
for layer in layers_cladding or []:
c.add_polygon(points, layer=layer)
c.info["length"] = float(length)
c.info["width1"] = float(width1)
c.info["width2"] = float(width2)
return c
@gf.cell
def taper_strip_to_ridge(
length: float = 10.0,
width1: float = 0.5,
width2: float = 0.5,
w_slab1: float = 0.15,
w_slab2: float = 6.0,
layer_wg: Layer = gf.LAYER.WG,
layer_slab: Layer = gf.LAYER.SLAB90,
layers_cladding: Optional[Tuple[Layer, ...]] = None,
cladding_offset: float = 3.0,
cross_section: CrossSectionFactory = strip,
) -> Component:
r"""Linear taper from strip to rib
Deprecated, use gf.components.taper_cross_section instead
Args:
length:
width1:
width2:
w_slab1
w_slab2
layer_wg:
layer_slab:
layers_cladding
cladding_offset:
cross_section: for input waveguide
.. code::
__________________________
/ |
_______/____________|______________
/ |
width1 |w_slab1 | w_slab2 width2
______\_____________|______________
\ |
\__________________________
"""
cross_section = gf.partial(cross_section, width=width1)
taper_wg = taper(
length=length,
width1=width1,
width2=width2,
layer=layer_wg,
cross_section=cross_section,
)
taper_slab = taper(
length=length,
width1=w_slab1,
width2=w_slab2,
layer=layer_slab,
)
c = gf.Component()
for _t in [taper_wg, taper_slab]:
taper_ref = _t.ref()
c.add(taper_ref)
c.absorb(taper_ref)
c.info["length"] = float(length)
c.add_port(name="o1", port=taper_wg.ports["o1"])
c.add_port(name="o2", port=taper_slab.ports["o2"])
if layers_cladding:
points = get_padding_points(
component=c,
default=0,
bottom=cladding_offset,
top=cladding_offset,
)
for layer in layers_cladding:
c.add_polygon(points, layer=layer)
return c
@gf.cell
def taper_strip_to_ridge_trenches(
length: float = 10.0,
width: float = 0.5,
slab_offset: float = 3.0,
trench_width: float = 2.0,
trench_layer: Layer = gf.LAYER.SLAB90,
layer_wg: Layer = gf.LAYER.WG,
trench_offset: float = 0.1,
):
"""Defines taper using trenches to define the etch.
Args:
length:
width:
slab_offset:
trench_width:
trench_layer:
layer_wg:
trench_offset: after waveguide
"""
c = gf.Component()
y0 = width / 2 + trench_width - trench_offset
yL = width / 2 + trench_width - trench_offset + slab_offset
# straight
x = [0, length, length, 0]
yw = [y0, yL, -yL, -y0]
c.add_polygon((x, yw), layer=layer_wg)
# top trench
ymin0 = width / 2
yminL = width / 2
ymax0 = width / 2 + trench_width
ymaxL = width / 2 + trench_width + slab_offset
x = [0, length, length, 0]
ytt = [ymin0, yminL, ymaxL, ymax0]
ytb = [-ymin0, -yminL, -ymaxL, -ymax0]
c.add_polygon((x, ytt), layer=trench_layer)
c.add_polygon((x, ytb), layer=trench_layer)
c.add_port(name="o1", midpoint=[0, 0], width=width, orientation=180, layer=layer_wg)
c.add_port(
name="o2", midpoint=[length, 0], width=width, orientation=0, layer=layer_wg
)
return c
taper_strip_to_slab150 = gf.partial(taper_strip_to_ridge, layer_slab=gf.LAYER.SLAB150)
# taper StripCband to NitrideCband
taper_sc_nc = gf.partial(
taper_strip_to_ridge,
layer_wg=gf.LAYER.WG,
layer_slab=gf.LAYER.WGN,
length=20.0,
width1=0.5,
width2=0.15,
w_slab1=0.15,
w_slab2=1.0,
)
if __name__ == "__main__":
# c = taper(width2=1)
# c = taper_strip_to_ridge(with_slab_port=True, layers_cladding=((111, 0),))
# print(c.get_optical_ports())
# c = taper_strip_to_ridge_trenches()
# c = taper()
# c = gf.components.taper_strip_to_ridge(width1=1, width2=2)
# c = gf.components.taper_strip_to_ridge(width1=1, width2=2)
# c = gf.components.extend_ports(c)
# c = taper_strip_to_ridge_trenches()
c = taper_sc_nc()
c.show()
| |
import datetime
import decimal
import json
import traceback
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
import stripe
from jsonfield.fields import JSONField
from .managers import CustomerManager, ChargeManager, TransferManager
from .settings import (
DEFAULT_PLAN,
INVOICE_FROM_EMAIL,
PAYMENTS_PLANS,
plan_from_stripe_id,
SEND_EMAIL_RECEIPTS,
TRIAL_PERIOD_FOR_USER_CALLBACK,
PLAN_QUANTITY_CALLBACK
)
from .signals import (
cancelled,
card_changed,
subscription_made,
webhook_processing_error,
WEBHOOK_SIGNALS,
)
from .utils import convert_tstamp
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_version = getattr(settings, "STRIPE_API_VERSION", "2012-11-07")
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta: # pylint: disable=E0012,C1001
abstract = True
class EventProcessingException(models.Model):
event = models.ForeignKey("Event", null=True)
data = models.TextField()
message = models.CharField(max_length=500)
traceback = models.TextField()
created_at = models.DateTimeField(default=timezone.now)
@classmethod
def log(cls, data, exception, event):
cls.objects.create(
event=event,
data=data or "",
message=str(exception),
traceback=traceback.format_exc()
)
def __unicode__(self):
return u"<%s, pk=%s, Event=%s>" % (self.message, self.pk, self.event)
class Event(StripeObject):
kind = models.CharField(max_length=250)
livemode = models.BooleanField()
customer = models.ForeignKey("Customer", null=True)
webhook_message = JSONField()
validated_message = JSONField(null=True)
valid = models.NullBooleanField(null=True)
processed = models.BooleanField(default=False)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
@property
def message(self):
return self.validated_message
def __unicode__(self):
return "%s - %s" % (self.kind, self.stripe_id)
def link_customer(self):
cus_id = None
customer_crud_events = [
"customer.created",
"customer.updated",
"customer.deleted"
]
if self.kind in customer_crud_events:
cus_id = self.message["data"]["object"]["id"]
else:
cus_id = self.message["data"]["object"].get("customer", None)
if cus_id is not None:
try:
self.customer = Customer.objects.get(stripe_id=cus_id)
self.save()
except Customer.DoesNotExist:
pass
def link_stripe_connect(self):
connect_id = self.message["data"]["object"].get("user_id", None)
if connect_id is not None:
try:
self.stripe_connect = ConnectUser.objects.get(account_id=connect_id)
self.save()
except ConnectUser.DoesNotExist:
pass
def validate(self):
evt = stripe.Event.retrieve(self.stripe_id)
self.validated_message = json.loads(
json.dumps(
evt.to_dict(),
sort_keys=True,
cls=stripe.StripeObjectEncoder
)
)
if self.webhook_message["data"] == self.validated_message["data"]:
self.valid = True
else:
self.valid = False
self.save()
def process(self):
"""
"account.updated",
"account.application.deauthorized",
"charge.succeeded",
"charge.failed",
"charge.refunded",
"charge.dispute.created",
"charge.dispute.updated",
"chagne.dispute.closed",
"customer.created",
"customer.updated",
"customer.deleted",
"customer.subscription.created",
"customer.subscription.updated",
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.discount.created",
"customer.discount.updated",
"customer.discount.deleted",
"invoice.created",
"invoice.updated",
"invoice.payment_succeeded",
"invoice.payment_failed",
"invoiceitem.created",
"invoiceitem.updated",
"invoiceitem.deleted",
"plan.created",
"plan.updated",
"plan.deleted",
"coupon.created",
"coupon.updated",
"coupon.deleted",
"transfer.created",
"transfer.updated",
"transfer.failed",
"ping"
"""
if self.valid and not self.processed:
try:
if not self.kind.startswith("plan.") and \
not self.kind.startswith("transfer."):
self.link_customer()
if not self.stripe_connect:
self.link_stripe_connect()
if self.kind.startswith("invoice."):
Invoice.handle_event(self)
elif self.kind.startswith("charge."):
if not self.customer:
self.link_customer()
self.customer.record_charge(
self.message["data"]["object"]["id"]
)
elif self.kind.startswith("transfer."):
Transfer.process_transfer(
self,
self.message["data"]["object"]
)
elif self.kind.startswith("customer.subscription."):
if not self.customer:
self.link_customer()
if self.customer:
self.customer.sync_current_subscription()
elif self.kind == "customer.deleted":
if not self.customer:
self.link_customer()
self.customer.purge()
self.send_signal()
self.processed = True
self.save()
except stripe.StripeError, e:
EventProcessingException.log(
data=e.http_body,
exception=e,
event=self
)
webhook_processing_error.send(
sender=Event,
data=e.http_body,
exception=e
)
def send_signal(self):
signal = WEBHOOK_SIGNALS.get(self.kind)
if signal:
return signal.send(sender=Event, event=self)
class Transfer(StripeObject):
# pylint: disable=C0301
event = models.ForeignKey(Event, related_name="transfers")
amount = models.DecimalField(decimal_places=2, max_digits=9)
status = models.CharField(max_length=25)
date = models.DateTimeField()
description = models.TextField(null=True, blank=True)
adjustment_count = models.IntegerField(null=True)
adjustment_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
adjustment_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_count = models.IntegerField(null=True)
charge_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
charge_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
collected_fee_count = models.IntegerField(null=True)
collected_fee_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
net = models.DecimalField(decimal_places=2, max_digits=9, null=True)
refund_count = models.IntegerField(null=True)
refund_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
refund_gross = models.DecimalField(decimal_places=2, max_digits=7, null=True)
validation_count = models.IntegerField(null=True)
validation_fees = models.DecimalField(decimal_places=2, max_digits=7, null=True)
stripe_connect = models.ForeignKey('ConnectUser', null=True)
objects = TransferManager()
def update_status(self):
self.status = stripe.Transfer.retrieve(self.stripe_id).status
self.save()
@classmethod
def process_transfer(cls, event, transfer):
defaults = {
"amount": transfer["amount"] / decimal.Decimal("100"),
"status": transfer["status"],
"date": convert_tstamp(transfer, "date"),
"description": transfer.get("description", "")
}
summary = transfer.get("summary")
if summary:
defaults.update({
"adjustment_count": summary.get("adjustment_count"),
"adjustment_fees": summary.get("adjustment_fees"),
"adjustment_gross": summary.get("adjustment_gross"),
"charge_count": summary.get("charge_count"),
"charge_fees": summary.get("charge_fees"),
"charge_gross": summary.get("charge_gross"),
"collected_fee_count": summary.get("collected_fee_count"),
"collected_fee_gross": summary.get("collected_fee_gross"),
"refund_count": summary.get("refund_count"),
"refund_fees": summary.get("refund_fees"),
"refund_gross": summary.get("refund_gross"),
"validation_count": summary.get("validation_count"),
"validation_fees": summary.get("validation_fees"),
"net": summary.get("net") / decimal.Decimal("100")
})
for field in defaults:
if field.endswith("fees") or field.endswith("gross"):
defaults[field] = defaults[field] / decimal.Decimal("100")
if event.kind == "transfer.paid":
defaults.update({"event": event})
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
defaults=defaults
)
else:
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
event=event,
defaults=defaults
)
if event.stripe_connect:
obj.stripe_connect = event.stripe_connect
if created and summary:
for fee in summary.get("charge_fee_details", []):
obj.charge_fee_details.create(
amount=fee["amount"] / decimal.Decimal("100"),
application=fee.get("application", ""),
description=fee.get("description", ""),
kind=fee["type"]
)
else:
obj.status = transfer["status"]
obj.save()
if event.kind == "transfer.updated":
obj.update_status()
class TransferChargeFee(models.Model):
transfer = models.ForeignKey(Transfer, related_name="charge_fee_details")
amount = models.DecimalField(decimal_places=2, max_digits=7)
application = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
kind = models.CharField(max_length=150)
created_at = models.DateTimeField(default=timezone.now)
class Customer(StripeObject):
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
card_fingerprint = models.CharField(max_length=200, blank=True)
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
date_purged = models.DateTimeField(null=True, editable=False)
objects = CustomerManager()
def __unicode__(self):
return unicode(self.user)
@property
def stripe_customer(self):
return stripe.Customer.retrieve(self.stripe_id)
def purge(self):
try:
self.stripe_customer.delete()
except stripe.InvalidRequestError as e:
if e.message.startswith("No such customer:"):
# The exception was thrown because the customer was already
# deleted on the stripe side, ignore the exception
pass
else:
# The exception was raised for another reason, re-raise it
raise
self.user = None
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
self.date_purged = timezone.now()
self.save()
def delete(self, using=None):
# Only way to delete a customer is to use SQL
self.purge()
def can_charge(self):
return self.card_fingerprint and \
self.card_last_4 and \
self.card_kind and \
self.date_purged is None
def has_active_subscription(self):
try:
return self.current_subscription.is_valid()
except CurrentSubscription.DoesNotExist:
return False
def cancel(self, at_period_end=True):
try:
current = self.current_subscription
except CurrentSubscription.DoesNotExist:
return
sub = self.stripe_customer.cancel_subscription(
at_period_end=at_period_end
)
current.status = sub.status
current.cancel_at_period_end = sub.cancel_at_period_end
current.current_period_end = convert_tstamp(sub, "current_period_end")
current.save()
cancelled.send(sender=self, stripe_response=sub)
@classmethod
def create(cls, user, card=None, plan=None, charge_immediately=True):
if card and plan:
plan = PAYMENTS_PLANS[plan]["stripe_plan_id"]
elif DEFAULT_PLAN:
plan = PAYMENTS_PLANS[DEFAULT_PLAN]["stripe_plan_id"]
else:
plan = None
trial_end = None
if TRIAL_PERIOD_FOR_USER_CALLBACK and plan:
trial_days = TRIAL_PERIOD_FOR_USER_CALLBACK(user)
trial_end = datetime.datetime.utcnow() + datetime.timedelta(
days=trial_days
)
stripe_customer = stripe.Customer.create(
email=user.email,
card=card,
plan=plan or DEFAULT_PLAN,
trial_end=trial_end
)
if stripe_customer.active_card:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
card_fingerprint=stripe_customer.active_card.fingerprint,
card_last_4=stripe_customer.active_card.last4,
card_kind=stripe_customer.active_card.type
)
else:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
)
if plan:
if stripe_customer.subscription:
cus.sync_current_subscription(cu=stripe_customer)
if charge_immediately:
cus.send_invoice()
return cus
def update_card(self, token):
cu = self.stripe_customer
cu.card = token
cu.save()
self.save_card(cu)
def save_card(self, cu=None):
cu = cu or self.stripe_customer
active_card = cu.active_card
self.card_fingerprint = active_card.fingerprint
self.card_last_4 = active_card.last4
self.card_kind = active_card.type
self.save()
card_changed.send(sender=self, stripe_response=cu)
def retry_unpaid_invoices(self):
self.sync_invoices()
for inv in self.invoices.filter(paid=False, closed=False):
try:
inv.retry() # Always retry unpaid invoices
except stripe.InvalidRequestError, error:
if error.message != "Invoice is already paid":
raise error
def send_invoice(self):
try:
invoice = stripe.Invoice.create(customer=self.stripe_id)
if invoice.amount_due > 0:
invoice.pay()
return True
except stripe.InvalidRequestError:
return False # There was nothing to invoice
def sync(self, cu=None):
cu = cu or self.stripe_customer
updated = False
if hasattr(cu, "active_card") and cu.active_card:
# Test to make sure the card has changed, otherwise do not update it
# (i.e. refrain from sending any signals)
if (self.card_last_4 != cu.active_card.last4 or
self.card_fingerprint != cu.active_card.fingerprint or
self.card_kind != cu.active_card.type):
updated = True
self.card_last_4 = cu.active_card.last4
self.card_fingerprint = cu.active_card.fingerprint
self.card_kind = cu.active_card.type
else:
updated = True
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
if updated:
self.save()
card_changed.send(sender=self, stripe_response=cu)
def sync_invoices(self, cu=None):
cu = cu or self.stripe_customer
for invoice in cu.invoices().data:
Invoice.sync_from_stripe_data(invoice, send_receipt=False)
def sync_charges(self, cu=None):
cu = cu or self.stripe_customer
for charge in cu.charges().data:
self.record_charge(charge.id)
def sync_current_subscription(self, cu=None):
cu = cu or self.stripe_customer
sub = getattr(cu, "subscription", None)
if sub is None:
try:
self.current_subscription.delete()
except CurrentSubscription.DoesNotExist:
pass
else:
try:
sub_obj = self.current_subscription
sub_obj.plan = plan_from_stripe_id(sub.plan.id)
sub_obj.current_period_start = convert_tstamp(
sub.current_period_start
)
sub_obj.current_period_end = convert_tstamp(
sub.current_period_end
)
sub_obj.amount = (sub.plan.amount / decimal.Decimal("100"))
sub_obj.status = sub.status
sub_obj.cancel_at_period_end = sub.cancel_at_period_end
sub_obj.start = convert_tstamp(sub.start)
sub_obj.quantity = sub.quantity
sub_obj.save()
except CurrentSubscription.DoesNotExist:
sub_obj = CurrentSubscription.objects.create(
customer=self,
plan=plan_from_stripe_id(sub.plan.id),
current_period_start=convert_tstamp(
sub.current_period_start
),
current_period_end=convert_tstamp(
sub.current_period_end
),
amount=(sub.plan.amount / decimal.Decimal("100")),
status=sub.status,
cancel_at_period_end=sub.cancel_at_period_end,
start=convert_tstamp(sub.start),
quantity=sub.quantity
)
if sub.trial_start and sub.trial_end:
sub_obj.trial_start = convert_tstamp(sub.trial_start)
sub_obj.trial_end = convert_tstamp(sub.trial_end)
sub_obj.save()
return sub_obj
def update_plan_quantity(self, quantity, charge_immediately=False):
self.subscribe(
plan=plan_from_stripe_id(
self.stripe_customer.subscription.plan.id
),
quantity=quantity,
charge_immediately=charge_immediately
)
def subscribe(self, plan, quantity=None, trial_days=None,
charge_immediately=True, token=None, coupon=None):
if quantity is None:
if PLAN_QUANTITY_CALLBACK is not None:
quantity = PLAN_QUANTITY_CALLBACK(self)
else:
quantity = 1
cu = self.stripe_customer
subscription_params = {}
if trial_days:
subscription_params["trial_end"] = \
datetime.datetime.utcnow() + datetime.timedelta(days=trial_days)
if token:
subscription_params["card"] = token
subscription_params["plan"] = PAYMENTS_PLANS[plan]["stripe_plan_id"]
subscription_params["quantity"] = quantity
subscription_params["coupon"] = coupon
resp = cu.update_subscription(**subscription_params)
if token:
# Refetch the stripe customer so we have the updated card info
cu = self.stripe_customer
self.save_card(cu)
self.sync_current_subscription(cu)
if charge_immediately:
self.send_invoice()
subscription_made.send(sender=self, plan=plan, stripe_response=resp)
return resp
def charge(self, amount, currency="usd", description=None,
send_receipt=True, application_fee=None,
stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['card'] = stripe.Token.create(customer=self.stripe_id, api_key=stripe_connect_user.stripe_access_token)
charge_args['api_key'] = stripe_connect_user.stripe_access_token
else:
charge_args['customer'] = self.stripe_id
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
obj = self.record_charge(resp["id"], stripe_connect_user)
if send_receipt:
obj.send_receipt()
return obj
def record_charge(self, charge_id, stripe_connect_user=None):
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
data = stripe.Charge.retrieve(charge_id, api_key=stripe_connect_user.stripe_access_token)
else:
data = stripe.Charge.retrieve(charge_id)
return Charge.sync_from_stripe_data(data)
class ConnectUser(models.Model):
"""
A user in your system who you may be routing payments to through "Stripe Connect"
"""
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
# when a webhook is received for an action related to a ConnectUser, a 'user_id' will be provided
# This is the same as an account id
account_id = models.CharField(max_length=100)
stripe_access_token = models.CharField(max_length=100)
stripe_publishable_key = models.CharField(max_length=100)
@staticmethod
def account_id_lookup(stripe_access_token):
data = stripe.Account.retrieve(api_key=stripe_access_token)
return data.get('id', None)
def __unicode__(self):
return unicode(self.user)
class CurrentSubscription(models.Model):
customer = models.OneToOneField(
Customer,
related_name="current_subscription",
null=True
)
plan = models.CharField(max_length=100)
quantity = models.IntegerField()
start = models.DateTimeField()
# trialing, active, past_due, canceled, or unpaid
status = models.CharField(max_length=25)
cancel_at_period_end = models.BooleanField(default=False)
canceled_at = models.DateTimeField(blank=True, null=True)
current_period_end = models.DateTimeField(blank=True, null=True)
current_period_start = models.DateTimeField(blank=True, null=True)
ended_at = models.DateTimeField(blank=True, null=True)
trial_end = models.DateTimeField(blank=True, null=True)
trial_start = models.DateTimeField(blank=True, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=7)
created_at = models.DateTimeField(default=timezone.now)
@property
def total_amount(self):
return self.amount * self.quantity
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
def status_display(self):
return self.status.replace("_", " ").title()
def is_period_current(self):
return self.current_period_end > timezone.now()
def is_status_current(self):
return self.status in ["trialing", "active"]
def is_valid(self):
if not self.is_status_current():
return False
if self.cancel_at_period_end and not self.is_period_current():
return False
return True
def delete(self, using=None): # pylint: disable=E1002
"""
Set values to None while deleting the object so that any lingering
references will not show previous values (such as when an Event
signal is triggered after a subscription has been deleted)
"""
super(CurrentSubscription, self).delete(using=using)
self.plan = None
self.status = None
self.quantity = 0
self.amount = 0
class Invoice(models.Model):
stripe_id = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, related_name="invoices")
attempted = models.NullBooleanField()
attempts = models.PositiveIntegerField(null=True)
closed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
period_end = models.DateTimeField()
period_start = models.DateTimeField()
subtotal = models.DecimalField(decimal_places=2, max_digits=7)
total = models.DecimalField(decimal_places=2, max_digits=7)
date = models.DateTimeField()
charge = models.CharField(max_length=50, blank=True)
created_at = models.DateTimeField(default=timezone.now)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
class Meta: # pylint: disable=E0012,C1001
ordering = ["-date"]
def retry(self):
if not self.paid and not self.closed:
inv = stripe.Invoice.retrieve(self.stripe_id)
inv.pay()
return True
return False
def status(self):
if self.paid:
return "Paid"
return "Open"
@classmethod
def sync_from_stripe_data(cls, stripe_invoice, send_receipt=True, stripe_connect=None):
c = Customer.objects.get(stripe_id=stripe_invoice["customer"])
period_end = convert_tstamp(stripe_invoice, "period_end")
period_start = convert_tstamp(stripe_invoice, "period_start")
date = convert_tstamp(stripe_invoice, "date")
invoice, created = cls.objects.get_or_create(
stripe_id=stripe_invoice["id"],
defaults=dict(
customer=c,
attempted=stripe_invoice["attempted"],
attempts=stripe_invoice["attempt_count"],
closed=stripe_invoice["closed"],
paid=stripe_invoice["paid"],
period_end=period_end,
period_start=period_start,
subtotal=stripe_invoice["subtotal"] / decimal.Decimal("100"),
total=stripe_invoice["total"] / decimal.Decimal("100"),
date=date,
charge=stripe_invoice.get("charge") or "",
stripe_connect=stripe_connect
)
)
if not created:
# pylint: disable=C0301
invoice.attempted = stripe_invoice["attempted"]
invoice.attempts = stripe_invoice["attempt_count"]
invoice.closed = stripe_invoice["closed"]
invoice.paid = stripe_invoice["paid"]
invoice.period_end = period_end
invoice.period_start = period_start
invoice.subtotal = stripe_invoice["subtotal"] / decimal.Decimal("100")
invoice.total = stripe_invoice["total"] / decimal.Decimal("100")
invoice.date = date
invoice.charge = stripe_invoice.get("charge") or ""
invoice.stripe_connect = stripe_connect
invoice.save()
for item in stripe_invoice["lines"].get("data", []):
period_end = convert_tstamp(item["period"], "end")
period_start = convert_tstamp(item["period"], "start")
if item.get("plan"):
plan = plan_from_stripe_id(item["plan"]["id"])
else:
plan = ""
inv_item, inv_item_created = invoice.items.get_or_create(
stripe_id=item["id"],
defaults=dict(
amount=(item["amount"] / decimal.Decimal("100")),
currency=item["currency"],
proration=item["proration"],
description=item.get("description") or "",
line_type=item["type"],
plan=plan,
period_start=period_start,
period_end=period_end,
quantity=item.get("quantity")
)
)
if not inv_item_created:
inv_item.amount = (item["amount"] / decimal.Decimal("100"))
inv_item.currency = item["currency"]
inv_item.proration = item["proration"]
inv_item.description = item.get("description") or ""
inv_item.line_type = item["type"]
inv_item.plan = plan
inv_item.period_start = period_start
inv_item.period_end = period_end
inv_item.quantity = item.get("quantity")
inv_item.save()
if stripe_invoice.get("charge"):
obj = c.record_charge(stripe_invoice["charge"])
obj.invoice = invoice
obj.save()
if send_receipt:
obj.send_receipt()
return invoice
@classmethod
def handle_event(cls, event, send_receipt=SEND_EMAIL_RECEIPTS):
valid_events = ["invoice.payment_failed", "invoice.payment_succeeded"]
if event.kind in valid_events:
invoice_data = event.message["data"]["object"]
stripe_invoice = stripe.Invoice.retrieve(invoice_data["id"])
cls.sync_from_stripe_data(stripe_invoice, send_receipt=send_receipt, stripe_connect=event.stripe_connect)
class InvoiceItem(models.Model):
stripe_id = models.CharField(max_length=255)
created_at = models.DateTimeField(default=timezone.now)
invoice = models.ForeignKey(Invoice, related_name="items")
amount = models.DecimalField(decimal_places=2, max_digits=7)
currency = models.CharField(max_length=10)
period_start = models.DateTimeField()
period_end = models.DateTimeField()
proration = models.BooleanField(default=False)
line_type = models.CharField(max_length=50)
description = models.CharField(max_length=200, blank=True)
plan = models.CharField(max_length=100, blank=True)
quantity = models.IntegerField(null=True)
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
class Charge(StripeObject):
customer = models.ForeignKey(Customer, related_name="charges", null=True)
invoice = models.ForeignKey(Invoice, null=True, related_name="charges")
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=7, null=True)
amount_refunded = models.DecimalField(
decimal_places=2,
max_digits=7,
null=True
)
description = models.TextField(blank=True)
paid = models.NullBooleanField(null=True)
disputed = models.NullBooleanField(null=True)
refunded = models.NullBooleanField(null=True)
fee = models.DecimalField(decimal_places=2, max_digits=7, null=True)
receipt_sent = models.BooleanField(default=False)
charge_created = models.DateTimeField(null=True, blank=True)
stripe_connect = models.ForeignKey(ConnectUser, null=True)
objects = ChargeManager()
def calculate_refund_amount(self, amount=None):
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
amount_to_refund = min(eligible_to_refund, amount)
else:
amount_to_refund = eligible_to_refund
return int(amount_to_refund * 100)
def refund(self, amount=None):
# pylint: disable=E1121
charge_obj = stripe.Charge.retrieve(
self.stripe_id
).refund(
amount=self.calculate_refund_amount(amount=amount)
)
Charge.sync_from_stripe_data(charge_obj)
@classmethod
def sync_from_stripe_data(cls, data):
obj, _ = Charge.objects.get_or_create(
stripe_id=data["id"]
)
customer_id = data.get("customer", None);
customer = Customer.objects.get(stripe_id=customer_id) if customer_id else None
obj.customer = customer
invoice_id = data.get("invoice", None)
if Invoice.objects.filter(stripe_id=invoice_id).exists():
obj.invoice = obj.customer.invoices.get(stripe_id=invoice_id)
obj.card_last_4 = data["card"]["last4"]
obj.card_kind = data["card"]["type"]
obj.amount = (data["amount"] / decimal.Decimal("100"))
obj.paid = data["paid"]
obj.refunded = data["refunded"]
obj.fee = (data["fee"] / decimal.Decimal("100"))
obj.disputed = data["dispute"] is not None
obj.charge_created = convert_tstamp(data, "created")
if data.get("description"):
obj.description = data["description"]
if data.get("amount_refunded"):
# pylint: disable=C0301
obj.amount_refunded = (data["amount_refunded"] / decimal.Decimal("100"))
if data["refunded"]:
obj.amount_refunded = (data["amount"] / decimal.Decimal("100"))
user_id = data.get("user_id", None)
if user_id and ConnectUser.objects.filter(account_id=user_id).exists():
obj.stripe_connect = ConnectUser.objects.get(account_id=user_id)
obj.save()
return obj
def send_receipt(self):
if not self.receipt_sent and self.customer:
site = Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
ctx = {
"charge": self,
"site": site,
"protocol": protocol,
}
subject = render_to_string("payments/email/subject.txt", ctx)
subject = subject.strip()
message = render_to_string("payments/email/body.txt", ctx)
num_sent = EmailMessage(
subject,
message,
to=[self.customer.user.email],
from_email=INVOICE_FROM_EMAIL
).send()
self.receipt_sent = num_sent > 0
self.save()
@classmethod
def create(cls, card, amount, currency="usd", description=None, application_fee=None, stripe_connect_user=None):
"""
This method expects `amount` and 'application_fee' to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal) or (not application_fee is None and not isinstance(application_fee, decimal.Decimal)):
raise ValueError(
"You must supply a decimal value representing dollars for amount and for application_fee (if supplied)."
)
charge_args = {
'amount': int(amount * 100),
'currency': currency,
'description': description,
'card': card,
}
if stripe_connect_user and isinstance(stripe_connect_user, ConnectUser):
charge_args['api_key'] = stripe_connect_user.stripe_access_token
elif stripe_connect_user:
charge_args['api_key'] = stripe_connect_user
if application_fee:
charge_args['application_fee'] = int(application_fee * 100)
resp = stripe.Charge.create(**charge_args)
return Charge.sync_from_stripe_data(resp)
| |
from __future__ import print_function
from keras.models import Sequential
from keras.layers import LSTM, Dense, GRU, Dropout, SimpleRNN
from keras.models import load_model
from keras.callbacks import *
import numpy as np
import csv
import random as r
from util import elapsed
import logging
import json
import os
# Ref: https://stackoverflow.com/a/40164869
from datetime import datetime
now = datetime.now()
gru_id = '023'
lstm_id = '013'
rnnkind = 'GRU'
# If Windows
if os.name == 'nt':
logging.basicConfig(filename='/Users/Mathias/Documents/GitHub/MotionClassifier/logs/classifier_18.log',
level=logging.DEBUG)
else:
if rnnkind == 'GRU':
logging.basicConfig(filename='/home/mathias/PycharmProjects/MotionClassifier/logs/gru/%s_%s.log' % (gru_id, now.strftime("%Y%m%d-%H%M%S")), level=logging.DEBUG)
elif rnnkind == 'LSTM':
logging.basicConfig(filename='/home/mathias/PycharmProjects/MotionClassifier/logs/lstm/%s_%s.log' % (lstm_id, now.strftime("%Y%m%d-%H%M%S")), level=logging.DEBUG)
if rnnkind == 'GRU':
run_path = 'runs/gru_' + gru_id
log_path = 'gru/training%s.log' % gru_id
elif rnnkind == 'LSTM':
run_path = 'runs/lstm_' + lstm_id
log_path = 'lstm/training%s.log' % lstm_id
# tb = TensorBoard(log_dir='logs/', histogram_freq=0, write_graph=True,
# write_images=False, embeddings_freq=0, embeddings_layer_names=None,
# embeddings_metadata=None)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
# kind = 'gru/training%s.log' % gru_id
# kind = 'lstm/training%s.log' % lstm_id
# csv_logger = CSVLogger('logs/%s/training04.log' % kind)
# csv_logger = CSVLogger('logs/%s' % kind)
csv_logger = CSVLogger('logs/%s' % log_path)
def get_the_fing_data(filepath, timesteps=10, features=8, is_test=False):
log_msg = "[GET DATA] "
data = []
target = []
with open(filepath, 'r') as csvfile:
reader = csv.reader(csvfile)
tmp_data = []
tmp_target = []
# reader = csv.DictReader(csvfile, fieldnames=['D%s'% x for x in range(8)]+['Class'])
# train_data = [tr for tr in reader if tr[:-1] != ['0.0','0.0','0.0','0.0','0.0','0.0','0.0','0.0']]
# count = 0
for tr in reader:
# tr[:-n] where n depends on how the collated raw data is saved;
# .. new data format ends with <filename>, <frame number>, <class> so n = 3
# .. otherwise, n = 1 (no filename or frame number)
if tr[:-1] == ['0.0']*(len(tr)-1):
# Has to have the correct shape (10,8) (timesteps, features)
if np.array(tmp_data).shape == (timesteps, features):
data.append(list(tmp_data))
target.append(list(tmp_target))
tmp_data = []
tmp_target = []
else:
tmp_data.append([float(i) for i in tr[:-3]])
#tmp_target.append([float(tr[-1])])
# print("Else: {0}".format(tr))
tx = [0.0, 0.0]
# One-hot version
if tr[-1] == "0.0":
tx = [1.0, 0.0]
elif tr[-1] == "1.0":
tx = [0.0, 1.0]
# ONLY FOR TEST DATA
if is_test:
tx = [tx, tr[-3:-1]] # append filename and frame number for analysis
tmp_target.append(tx)
# count += 1
# if count > 5:
# break
#
# print(tmp_data)
# print(tmp_target)
fdata = np.array(data)
ftarget = np.array(target)
msg = "file: {0}, data: {1} - target: {2}".format(filepath, str(fdata.shape), str(ftarget.shape))
logging.info(log_msg + msg)
return fdata, ftarget
def shuffle(data, targets):
n = len(data)
d_data = data
t_targets = targets
shuffled_data = []
shuffled_targets = []
while len(d_data) > 1 and len(t_targets) > 1:
# Pick a random point within the number of data points
pick = r.choice(range(len(d_data)))
# Create a mask to filter the data (and targets);
# ... effectively performing a 'pop' operation
mask = np.ones(len(d_data), dtype=bool)
mask[[pick]] = False
# Collect the picked data points and its corresponding target value
shuffled_data.append(d_data[pick])
shuffled_targets.append(t_targets[pick])
# Filter (pop) the data
d_data = d_data[mask,...]
t_targets = t_targets[mask,...]
# Add whatever is left over
# shuffled_data.append(d_data[0])
# shuffled_targets.append(t_targets[0])
return np.array(shuffled_data), np.array(shuffled_targets)
@elapsed
def retrain_model(i, x_train, y_train, x_val, y_val, epochs, model=None, model_source=None, batch_size=50):
"""
Retrain a saved model
:param x_train: training data
:param y_train: training targets
:param x_val: validation data
:param y_val: validation targets
:param epochs: number of runs
:param model: a model to retrain
:param model_source: path to saved model e.g. 'rnn_results/motion_derivative_dataset_model0.h5'
:param batch_size: number of data points to process in one epoch
:return model0: the retrained model
"""
log_msg = "[RETRAIN] "
if not model and not model_source:
logging.error(log_msg + "You gotta give either a model or a path to a saved model!")
model0 = model
if model_source:
model0 = load_model(model_source)
x_retrain, y_retrain = shuffle(x_train, y_train)
x_reval, y_reval = shuffle(x_val, y_val)
train_size = len(x_train) - (len(x_train) % batch_size)
x_retrain = np.array(x_retrain)[:train_size]
y_retrain = np.array(y_retrain)[:train_size]
val_size = len(x_val) - (len(x_val) % batch_size)
x_reval = np.array(x_reval)[:val_size]
y_reval = np.array(y_reval)[:val_size]
if rnnkind == 'LSTM':
retrain_path = 'retrain_lstm'
elif rnnkind == 'GRU':
retrain_path = 'retrain_gru'
tbx=TensorBoard(log_dir='logs/{0}/{1}_{2}/'.format(run_path, retrain_path, i), histogram_freq=0, write_graph=True,
write_images=False, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None)
model0.fit(x_retrain, y_retrain,
batch_size=batch_size, epochs=epochs, shuffle=True,
validation_data=(x_reval, y_reval), callbacks=[tbx, reduce_lr, csv_logger])
logging.info(model0.summary())
return model0
def do_the_thing(train_data, train_target, validation_data, validation_target, data_dim, timesteps,
num_classes, batch_size, hidden_size, epochs, hidden_layers, kind='GRU'):
log_msg = "[DO THE THING] "
inputs, targets = shuffle(train_data, train_target)
validate_data, validate_target = shuffle(validation_data, validation_target)
# learning_rate = 5e-1
# seq_length = 10
# data_dim = 8
# timesteps = seq_length
# num_classes = 1
# batch_size = 50
# hidden_size = 512
# epochs = 20
# Expected input batch shape: (batch_size, timesteps, data_dim)
# Note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
modelx = Sequential()
if kind == 'LSTM':
modelx.add(LSTM(hidden_size, return_sequences=True, # stateful=True,
batch_input_shape=(batch_size, timesteps, data_dim)))
for i in range(hidden_layers):
modelx.add(LSTM(hidden_size, return_sequences=True, stateful=True))
# modelx.add(LSTM(hidden_size, return_sequences=True, stateful=True))
elif kind == 'GRU':
modelx.add(GRU(hidden_size, return_sequences=True, # stateful=True,
batch_input_shape=(batch_size, timesteps, data_dim)))
for i in range(hidden_layers):
modelx.add(GRU(hidden_size, return_sequences=True, stateful=True))
# modelx.add(GRU(hidden_size, return_sequences=True, stateful=True, ))
modelx.add(Dropout(0.2))
# model3.add(GRU(hidden_size, return_sequences=True, stateful=True))
# model3.add(GRU(hidden_size, return_sequences=True, stateful=True))
# model3.add(LSTM(v_size, return_sequences=True, stateful=True, activation='softmax'))
# Pair sigmoid with mse
# modelx.add(Dense(num_classes, activation='sigmoid'))
# modelx.compile(loss='mean_squared_error',
# optimizer='rmsprop', metrics=['accuracy'])
# Use for num_classes > 1
modelx.add(Dense(num_classes, activation='softmax'))
modelx.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# Generate dummy training data
# shape: (#batch*x, sequence_length, input_vector_size)
train_size = len(inputs) - (len(inputs) % batch_size)
x_train = np.array(inputs)[:train_size]
y_train = np.array(targets)[:train_size]
val_size = len(validate_data) - (len(validate_data) % batch_size)
x_val = np.array(validate_data)[:val_size]
y_val = np.array(validate_target)[:val_size]
# # Generate dummy validation data
# x_val = np.random.random((batch_size * 3, timesteps, data_dim))
# y_val = np.random.random((batch_size * 3, num_classes))
logging.info(log_msg + str(x_train.shape))
logging.info(log_msg + str(y_train.shape))
tb = TensorBoard(log_dir='logs/%s/' % run_path, histogram_freq=0, write_graph=True,
write_images=False, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None)
modelx.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, shuffle=True,
validation_data=(x_val, y_val), callbacks=[tb, reduce_lr, csv_logger])
modelx.summary()
logging.info(json.dumps(modelx.to_json()))
return modelx
def save_model(model, path):
log_msg = "[SAVE_MODEL] "
logging.info(log_msg + "Saving model to %s ..." % path)
try:
model.save(path)
logging.info(log_msg + "Model saved.")
except Exception as e:
logging.error(log_msg + "Something went wrong: %s" % e.message)
@elapsed
def test_the_thing(model, test_source=None, batch_size=50, timesteps=10, features=8, save=True):
"""
Test a trained model
:param model:
:param test_source:
:param batch_size: batch of data points (not sequence length)
:return:
"""
log_msg = "[TEST] "
model = model
testpath = '/home/mathias/Projects/motion_data/testx7.csv'
logging.info(log_msg + "Test file: %s" % testpath)
if test_source:
testpath = test_source
test_data, test_target = get_the_fing_data(testpath, timesteps=timesteps, features=features, is_test=True)
test_size = len(test_data) - (len(test_data) % batch_size)
x_test = np.array(test_data)[:batch_size]
y_test = np.array(test_target)[:batch_size]
predictions = model.predict_on_batch(x_test)
# print predictions
# print y_test
correct = 0
wrong = 0
for pred, yk in zip(predictions, y_test):
# p = np.mean(pred)
# y = np.mean(yak)
# d = abs(p-y)
# print(yk)
# print("Yk: {0}".format(yk))
yb = [f[0] for f in yk]
ylabel = [f[1] for f in yk]
yak = np.array(yb, dtype=np.float) # Only take the first two values; the rest are labels
# print("Yak: {0}".format(yak))
d, p, y = prediction_diff(yak, pred)
show = "p vs y: %s - %s --> (abs) %s" % (p, y, d)
if d < 0.3 :
correct += 1
# show += '*'
else:
wrong += 1
show += ' <!>' # Mark wrong predictions
file_start = ylabel[0][0]
frame_start = ylabel[0][1]
file_end = ylabel[-1][0]
frame_end = ylabel[-1][1]
show += " data: {0} ({1}) - {2} ({3})".format(file_start, frame_start, file_end, frame_end) # show the
print(show)
logging.info(log_msg + show)
acc = correct*1.0/(correct+wrong)
logging.info(log_msg + "Correct: %s vs. Wrong: %s" % (correct, wrong))
logging.info(log_msg + "Accuracy: {:.04f} ({:.02f}%)".format(acc, acc*100))
def prediction_diff(target, prediction):
# print("{0} ({1})".format(target, type(target)))
# print("{0} ({1})".format(prediction, type(prediction)))
p = np.mean(prediction, axis=0)
y = np.mean(target, axis=0)
d = np.mean(np.abs(np.diff([y, p], axis=0)))
return d, p, y
| |
import MySQLdb
import config
import warnings
import os.path as op
import contextlib
ch_my_cnf = op.join(op.dirname(op.realpath(__file__)), 'ch.my.cnf')
wp_my_cnf = op.join(op.dirname(op.realpath(__file__)), 'wp.my.cnf')
class RetryingConnection(object):
'''
Wraps a MySQLdb connection, handling retries as needed.
'''
def __init__(self, connect):
self._connect = connect
self._do_connect()
def _do_connect(self):
self.conn = self._connect()
self.conn.ping(True) # set the reconnect flag
def execute_with_retry(self, operations, *args, **kwds):
max_retries = 5
for retry in range(max_retries):
try:
with self.conn as cursor:
return operations(cursor, *args, **kwds)
except MySQLdb.OperationalError:
if retry == max_retries - 1:
raise
else:
self._do_connect()
else:
break
def execute_with_retry_s(self, sql, *args):
def operations(cursor, sql, *args):
cursor.execute(sql, args)
if cursor.rowcount > 0:
return cursor.fetchall()
return None
return self.execute_with_retry(operations, sql, *args)
# https://stackoverflow.com/questions/4146095/ (sigh)
def __enter__(self):
return self.conn.__enter__()
def __exit__(self, *args):
return self.conn.__exit__(*args)
def __getattr__(self, name):
return getattr(self.conn, name)
@contextlib.contextmanager
def ignore_warnings():
warnings.filterwarnings('ignore', category = MySQLdb.Warning)
yield
warnings.resetwarnings()
def _connect(config_file):
return MySQLdb.connect(charset = 'utf8mb4', read_default_file = config_file)
def _make_tools_labs_dbname(db, database, lang_code):
cursor = db.cursor()
cursor.execute("SELECT SUBSTRING_INDEX(USER(), '@', 1)")
user = cursor.fetchone()[0]
return '%s__%s_%s' % (user, database, lang_code)
def _ensure_database(db, database, lang_code):
with db as cursor:
dbname = _make_tools_labs_dbname(db, database, lang_code)
with ignore_warnings():
cursor.execute('SET SESSION sql_mode = ""')
cursor.execute(
'CREATE DATABASE IF NOT EXISTS %s CHARACTER SET utf8mb4' % dbname)
cursor.execute('USE %s' % dbname)
def init_db(lang_code):
def connect_and_initialize():
db = _connect(ch_my_cnf)
_ensure_database(db, 'citationhunt', lang_code)
return db
return RetryingConnection(connect_and_initialize)
def init_scratch_db():
cfg = config.get_localized_config()
def connect_and_initialize():
db = _connect(ch_my_cnf)
_ensure_database(db, 'scratch', cfg.lang_code)
return db
return RetryingConnection(connect_and_initialize)
def init_stats_db():
def connect_and_initialize():
db = _connect(ch_my_cnf)
_ensure_database(db, 'stats', 'global')
with db as cursor, ignore_warnings():
cursor.execute('''
CREATE TABLE IF NOT EXISTS requests (
ts DATETIME, lang_code VARCHAR(4), snippet_id VARCHAR(128),
category_id VARCHAR(128), url VARCHAR(768), prefetch BOOLEAN,
status_code INTEGER, referrer VARCHAR(128))
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS fixed (
clicked_ts DATETIME, snippet_id VARCHAR(128),
lang_code VARCHAR(4))
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
# Create per-language views for convenience
for lang_code in config.lang_code_to_config:
cursor.execute('''
CREATE OR REPLACE VIEW requests_''' + lang_code +
''' AS SELECT * FROM requests WHERE lang_code = %s
''', (lang_code,))
cursor.execute('''
CREATE OR REPLACE VIEW fixed_''' + lang_code +
''' AS SELECT * FROM fixed WHERE lang_code = %s
''', (lang_code,))
return db
return RetryingConnection(connect_and_initialize)
def init_wp_replica_db():
cfg = config.get_localized_config()
def connect_and_initialize():
db = _connect(wp_my_cnf)
with db as cursor:
cursor.execute('USE ' + cfg.database)
return db
return RetryingConnection(connect_and_initialize)
def init_projectindex_db():
def connect_and_initialize():
db = _connect(ch_my_cnf)
with db as cursor:
cursor.execute('USE s52475__wpx_p')
return db
return RetryingConnection(connect_and_initialize)
def reset_scratch_db():
cfg = config.get_localized_config()
db = init_db(cfg.lang_code)
with db as cursor:
dbname = _make_tools_labs_dbname(db, 'scratch', cfg.lang_code)
with ignore_warnings():
cursor.execute('DROP DATABASE IF EXISTS ' + dbname)
cursor.execute('CREATE DATABASE %s CHARACTER SET utf8mb4' % dbname)
cursor.execute('USE ' + dbname)
create_tables(db)
return db
def install_scratch_db():
cfg = config.get_localized_config()
db = init_db(cfg.lang_code)
# ensure citationhunt is populated with tables
create_tables(db)
chname = _make_tools_labs_dbname(db, 'citationhunt', cfg.lang_code)
scname = _make_tools_labs_dbname(db, 'scratch', cfg.lang_code)
with db as cursor:
# generate a sql query that will atomically swap tables in
# 'citationhunt' and 'scratch'. Modified from:
# http://blog.shlomoid.com/2010/02/emulating-missing-rename-database.html
cursor.execute('''
SELECT CONCAT('RENAME TABLE ',
GROUP_CONCAT('%s.', table_name,
' TO ', table_schema, '.old_', table_name, ', ',
table_schema, '.', table_name, ' TO ', '%s.', table_name),';')
FROM information_schema.TABLES WHERE table_schema = '%s'
GROUP BY table_schema;
''' % (chname, chname, scname))
rename_stmt = cursor.fetchone()[0]
cursor.execute(rename_stmt)
cursor.execute('DROP DATABASE ' + scname)
def create_tables(db):
cfg = config.get_localized_config()
with db as cursor, ignore_warnings():
cursor.execute('''
CREATE TABLE IF NOT EXISTS categories (id VARCHAR(128) PRIMARY KEY,
title VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
cursor.execute('''
INSERT IGNORE INTO categories VALUES("unassigned", "unassigned")
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS articles (page_id INT(8) UNSIGNED
PRIMARY KEY, url VARCHAR(512), title VARCHAR(512))
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS articles_categories (
article_id INT(8) UNSIGNED, category_id VARCHAR(128),
FOREIGN KEY(article_id) REFERENCES articles(page_id)
ON DELETE CASCADE,
FOREIGN KEY(category_id) REFERENCES categories(id)
ON DELETE CASCADE) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS snippets (id VARCHAR(128) PRIMARY KEY,
snippet VARCHAR(%s), section VARCHAR(768), article_id INT(8)
UNSIGNED, FOREIGN KEY(article_id) REFERENCES articles(page_id)
ON DELETE CASCADE) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''', (cfg.snippet_max_size * 2,))
cursor.execute('''
CREATE TABLE IF NOT EXISTS snippets_links (prev VARCHAR(128),
next VARCHAR(128), cat_id VARCHAR(128),
FOREIGN KEY(prev) REFERENCES snippets(id) ON DELETE CASCADE,
FOREIGN KEY(next) REFERENCES snippets(id) ON DELETE CASCADE,
FOREIGN KEY(cat_id) REFERENCES categories(id) ON DELETE CASCADE)
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
''')
| |
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright (C) 2003-2013 Python Software Foundation
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
import struct
from test import support
from io import BytesIO
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
}
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(support.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(support.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
def test_bytes(self):
pl = self._create()
data = plistlib.dumps(pl)
pl2 = plistlib.loads(data)
self.assertNotIsInstance(pl, plistlib._InternalDict)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.dumps(pl2)
self.assertEqual(data, data2)
def test_indentation_array(self):
data = [[[[[[[[{'test': b'aaaaaa'}]]]]]]]]
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict(self):
data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': b'aaaaaa'}}}}}}}}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict_mix(self):
data = {'1': {'2': [{'3': [[[[[{'test': b'aaaaaa'}]]]]]}]}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_appleformatting(self):
for use_builtin_types in (True, False):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt, use_builtin_types=use_builtin_types):
pl = plistlib.loads(TESTDATA[fmt],
use_builtin_types=use_builtin_types)
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(data, TESTDATA[fmt],
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
self.maxDiff = None
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = self._create(fmt=fmt)
pl2 = plistlib.loads(TESTDATA[fmt], fmt=fmt)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
pl2 = plistlib.loads(TESTDATA[fmt])
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
b = BytesIO()
pl = self._create(fmt=fmt)
plistlib.dump(pl, b, fmt=fmt)
pl2 = plistlib.load(BytesIO(b.getvalue()), fmt=fmt)
self.assertEqual(dict(pl), dict(pl2))
pl2 = plistlib.load(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_keysort_bytesio(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
b = BytesIO()
plistlib.dump(pl, b, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.load(BytesIO(b.getvalue()),
dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keysort(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
data = plistlib.dumps(pl, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.loads(data, dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keys_no_string(self):
pl = { 42: 'aNumber' }
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
b = BytesIO()
self.assertRaises(TypeError, plistlib.dump, pl, b, fmt=fmt)
def test_skipkeys(self):
pl = {
42: 'aNumber',
'snake': 'aWord',
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(
pl, fmt=fmt, skipkeys=True, sort_keys=False)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {'snake': 'aWord'})
fp = BytesIO()
plistlib.dump(
pl, fp, fmt=fmt, skipkeys=True, sort_keys=False)
data = fp.getvalue()
pl2 = plistlib.loads(fp.getvalue())
self.assertEqual(pl2, {'snake': 'aWord'})
def test_tuple_members(self):
pl = {
'first': (1, 2),
'second': (1, 2),
'third': (3, 4),
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_list_members(self):
pl = {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_dict_members(self):
pl = {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
plistlib.dumps(testString, fmt=plistlib.FMT_XML)
else:
self.assertRaises(ValueError,
plistlib.dumps,
testString)
def test_nondictroot(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.loads(plistlib.dumps(test1, fmt=fmt))
result2 = plistlib.loads(plistlib.dumps(test2, fmt=fmt))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.loads,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not real</integer></plist>")
def test_xml_encodings(self):
base = TESTDATA[plistlib.FMT_XML]
for xml_encoding, encoding, bom in [
(b'utf-8', 'utf-8', codecs.BOM_UTF8),
(b'utf-16', 'utf-16-le', codecs.BOM_UTF16_LE),
(b'utf-16', 'utf-16-be', codecs.BOM_UTF16_BE),
# Expat does not support UTF-32
#(b'utf-32', 'utf-32-le', codecs.BOM_UTF32_LE),
#(b'utf-32', 'utf-32-be', codecs.BOM_UTF32_BE),
]:
pl = self._create(fmt=plistlib.FMT_XML)
with self.subTest(encoding=encoding):
data = base.replace(b'UTF-8', xml_encoding)
data = bom + data.decode('utf-8').encode(encoding)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
def test_nonstandard_refs_size(self):
# Issue #21538: Refs and offsets are 24-bit integers
data = (b'bplist00'
b'\xd1\x00\x00\x01\x00\x00\x02QaQb'
b'\x00\x00\x08\x00\x00\x0f\x00\x00\x11'
b'\x00\x00\x00\x00\x00\x00'
b'\x03\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13')
self.assertEqual(plistlib.loads(data), {'a': 'b'})
class TestPlistlibDeprecated(unittest.TestCase):
def test_io_deprecated(self):
pl_in = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
pl_out = plistlib._InternalDict({
'key': 42,
'sub': plistlib._InternalDict({
'key': 9,
'alt': 'value',
'data': plistlib.Data(b'buffer'),
})
})
self.addCleanup(support.unlink, support.TESTFN)
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, support.TESTFN)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(pl_out, pl2)
os.unlink(support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, fp)
with open(support.TESTFN, 'rb') as fp:
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(fp)
self.assertEqual(pl_out, pl2)
def test_bytes_deprecated(self):
pl = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
with self.assertWarns(DeprecationWarning):
data = plistlib.writePlistToBytes(pl)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlistFromBytes(data)
self.assertIsInstance(pl2, plistlib._InternalDict)
self.assertEqual(pl2, plistlib._InternalDict(
key=42,
sub=plistlib._InternalDict(
key=9,
alt='value',
data=plistlib.Data(b'buffer'),
)
))
with self.assertWarns(DeprecationWarning):
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_dataobject_deprecated(self):
in_data = { 'key': plistlib.Data(b'hello') }
out_data = { 'key': b'hello' }
buf = plistlib.dumps(in_data)
cur = plistlib.loads(buf)
self.assertEqual(cur, out_data)
self.assertNotEqual(cur, in_data)
cur = plistlib.loads(buf, use_builtin_types=False)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
with self.assertWarns(DeprecationWarning):
cur = plistlib.readPlistFromBytes(buf)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
def test_main():
support.run_unittest(TestPlistlib, TestPlistlibDeprecated)
if __name__ == '__main__':
test_main()
=======
# Copyright (C) 2003-2013 Python Software Foundation
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
import struct
from test import support
from io import BytesIO
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
}
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(support.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(support.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
def test_bytes(self):
pl = self._create()
data = plistlib.dumps(pl)
pl2 = plistlib.loads(data)
self.assertNotIsInstance(pl, plistlib._InternalDict)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.dumps(pl2)
self.assertEqual(data, data2)
def test_indentation_array(self):
data = [[[[[[[[{'test': b'aaaaaa'}]]]]]]]]
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict(self):
data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': b'aaaaaa'}}}}}}}}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict_mix(self):
data = {'1': {'2': [{'3': [[[[[{'test': b'aaaaaa'}]]]]]}]}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_appleformatting(self):
for use_builtin_types in (True, False):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt, use_builtin_types=use_builtin_types):
pl = plistlib.loads(TESTDATA[fmt],
use_builtin_types=use_builtin_types)
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(data, TESTDATA[fmt],
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
self.maxDiff = None
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = self._create(fmt=fmt)
pl2 = plistlib.loads(TESTDATA[fmt], fmt=fmt)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
pl2 = plistlib.loads(TESTDATA[fmt])
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
b = BytesIO()
pl = self._create(fmt=fmt)
plistlib.dump(pl, b, fmt=fmt)
pl2 = plistlib.load(BytesIO(b.getvalue()), fmt=fmt)
self.assertEqual(dict(pl), dict(pl2))
pl2 = plistlib.load(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_keysort_bytesio(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
b = BytesIO()
plistlib.dump(pl, b, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.load(BytesIO(b.getvalue()),
dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keysort(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
data = plistlib.dumps(pl, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.loads(data, dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keys_no_string(self):
pl = { 42: 'aNumber' }
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
b = BytesIO()
self.assertRaises(TypeError, plistlib.dump, pl, b, fmt=fmt)
def test_skipkeys(self):
pl = {
42: 'aNumber',
'snake': 'aWord',
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(
pl, fmt=fmt, skipkeys=True, sort_keys=False)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {'snake': 'aWord'})
fp = BytesIO()
plistlib.dump(
pl, fp, fmt=fmt, skipkeys=True, sort_keys=False)
data = fp.getvalue()
pl2 = plistlib.loads(fp.getvalue())
self.assertEqual(pl2, {'snake': 'aWord'})
def test_tuple_members(self):
pl = {
'first': (1, 2),
'second': (1, 2),
'third': (3, 4),
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_list_members(self):
pl = {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_dict_members(self):
pl = {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
plistlib.dumps(testString, fmt=plistlib.FMT_XML)
else:
self.assertRaises(ValueError,
plistlib.dumps,
testString)
def test_nondictroot(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.loads(plistlib.dumps(test1, fmt=fmt))
result2 = plistlib.loads(plistlib.dumps(test2, fmt=fmt))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.loads,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not real</integer></plist>")
def test_xml_encodings(self):
base = TESTDATA[plistlib.FMT_XML]
for xml_encoding, encoding, bom in [
(b'utf-8', 'utf-8', codecs.BOM_UTF8),
(b'utf-16', 'utf-16-le', codecs.BOM_UTF16_LE),
(b'utf-16', 'utf-16-be', codecs.BOM_UTF16_BE),
# Expat does not support UTF-32
#(b'utf-32', 'utf-32-le', codecs.BOM_UTF32_LE),
#(b'utf-32', 'utf-32-be', codecs.BOM_UTF32_BE),
]:
pl = self._create(fmt=plistlib.FMT_XML)
with self.subTest(encoding=encoding):
data = base.replace(b'UTF-8', xml_encoding)
data = bom + data.decode('utf-8').encode(encoding)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
def test_nonstandard_refs_size(self):
# Issue #21538: Refs and offsets are 24-bit integers
data = (b'bplist00'
b'\xd1\x00\x00\x01\x00\x00\x02QaQb'
b'\x00\x00\x08\x00\x00\x0f\x00\x00\x11'
b'\x00\x00\x00\x00\x00\x00'
b'\x03\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13')
self.assertEqual(plistlib.loads(data), {'a': 'b'})
class TestPlistlibDeprecated(unittest.TestCase):
def test_io_deprecated(self):
pl_in = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
pl_out = plistlib._InternalDict({
'key': 42,
'sub': plistlib._InternalDict({
'key': 9,
'alt': 'value',
'data': plistlib.Data(b'buffer'),
})
})
self.addCleanup(support.unlink, support.TESTFN)
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, support.TESTFN)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(pl_out, pl2)
os.unlink(support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, fp)
with open(support.TESTFN, 'rb') as fp:
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(fp)
self.assertEqual(pl_out, pl2)
def test_bytes_deprecated(self):
pl = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
with self.assertWarns(DeprecationWarning):
data = plistlib.writePlistToBytes(pl)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlistFromBytes(data)
self.assertIsInstance(pl2, plistlib._InternalDict)
self.assertEqual(pl2, plistlib._InternalDict(
key=42,
sub=plistlib._InternalDict(
key=9,
alt='value',
data=plistlib.Data(b'buffer'),
)
))
with self.assertWarns(DeprecationWarning):
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_dataobject_deprecated(self):
in_data = { 'key': plistlib.Data(b'hello') }
out_data = { 'key': b'hello' }
buf = plistlib.dumps(in_data)
cur = plistlib.loads(buf)
self.assertEqual(cur, out_data)
self.assertNotEqual(cur, in_data)
cur = plistlib.loads(buf, use_builtin_types=False)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
with self.assertWarns(DeprecationWarning):
cur = plistlib.readPlistFromBytes(buf)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
def test_main():
support.run_unittest(TestPlistlib, TestPlistlibDeprecated)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (C) 2003-2013 Python Software Foundation
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
import struct
from test import support
from io import BytesIO
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
}
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(support.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(support.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
def test_bytes(self):
pl = self._create()
data = plistlib.dumps(pl)
pl2 = plistlib.loads(data)
self.assertNotIsInstance(pl, plistlib._InternalDict)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.dumps(pl2)
self.assertEqual(data, data2)
def test_indentation_array(self):
data = [[[[[[[[{'test': b'aaaaaa'}]]]]]]]]
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict(self):
data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': b'aaaaaa'}}}}}}}}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict_mix(self):
data = {'1': {'2': [{'3': [[[[[{'test': b'aaaaaa'}]]]]]}]}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_appleformatting(self):
for use_builtin_types in (True, False):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt, use_builtin_types=use_builtin_types):
pl = plistlib.loads(TESTDATA[fmt],
use_builtin_types=use_builtin_types)
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(data, TESTDATA[fmt],
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
self.maxDiff = None
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = self._create(fmt=fmt)
pl2 = plistlib.loads(TESTDATA[fmt], fmt=fmt)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
pl2 = plistlib.loads(TESTDATA[fmt])
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
b = BytesIO()
pl = self._create(fmt=fmt)
plistlib.dump(pl, b, fmt=fmt)
pl2 = plistlib.load(BytesIO(b.getvalue()), fmt=fmt)
self.assertEqual(dict(pl), dict(pl2))
pl2 = plistlib.load(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_keysort_bytesio(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
b = BytesIO()
plistlib.dump(pl, b, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.load(BytesIO(b.getvalue()),
dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keysort(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
data = plistlib.dumps(pl, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.loads(data, dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keys_no_string(self):
pl = { 42: 'aNumber' }
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
b = BytesIO()
self.assertRaises(TypeError, plistlib.dump, pl, b, fmt=fmt)
def test_skipkeys(self):
pl = {
42: 'aNumber',
'snake': 'aWord',
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(
pl, fmt=fmt, skipkeys=True, sort_keys=False)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {'snake': 'aWord'})
fp = BytesIO()
plistlib.dump(
pl, fp, fmt=fmt, skipkeys=True, sort_keys=False)
data = fp.getvalue()
pl2 = plistlib.loads(fp.getvalue())
self.assertEqual(pl2, {'snake': 'aWord'})
def test_tuple_members(self):
pl = {
'first': (1, 2),
'second': (1, 2),
'third': (3, 4),
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_list_members(self):
pl = {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_dict_members(self):
pl = {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
plistlib.dumps(testString, fmt=plistlib.FMT_XML)
else:
self.assertRaises(ValueError,
plistlib.dumps,
testString)
def test_nondictroot(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.loads(plistlib.dumps(test1, fmt=fmt))
result2 = plistlib.loads(plistlib.dumps(test2, fmt=fmt))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.loads,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not real</integer></plist>")
def test_xml_encodings(self):
base = TESTDATA[plistlib.FMT_XML]
for xml_encoding, encoding, bom in [
(b'utf-8', 'utf-8', codecs.BOM_UTF8),
(b'utf-16', 'utf-16-le', codecs.BOM_UTF16_LE),
(b'utf-16', 'utf-16-be', codecs.BOM_UTF16_BE),
# Expat does not support UTF-32
#(b'utf-32', 'utf-32-le', codecs.BOM_UTF32_LE),
#(b'utf-32', 'utf-32-be', codecs.BOM_UTF32_BE),
]:
pl = self._create(fmt=plistlib.FMT_XML)
with self.subTest(encoding=encoding):
data = base.replace(b'UTF-8', xml_encoding)
data = bom + data.decode('utf-8').encode(encoding)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
def test_nonstandard_refs_size(self):
# Issue #21538: Refs and offsets are 24-bit integers
data = (b'bplist00'
b'\xd1\x00\x00\x01\x00\x00\x02QaQb'
b'\x00\x00\x08\x00\x00\x0f\x00\x00\x11'
b'\x00\x00\x00\x00\x00\x00'
b'\x03\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13')
self.assertEqual(plistlib.loads(data), {'a': 'b'})
class TestPlistlibDeprecated(unittest.TestCase):
def test_io_deprecated(self):
pl_in = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
pl_out = plistlib._InternalDict({
'key': 42,
'sub': plistlib._InternalDict({
'key': 9,
'alt': 'value',
'data': plistlib.Data(b'buffer'),
})
})
self.addCleanup(support.unlink, support.TESTFN)
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, support.TESTFN)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(pl_out, pl2)
os.unlink(support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, fp)
with open(support.TESTFN, 'rb') as fp:
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(fp)
self.assertEqual(pl_out, pl2)
def test_bytes_deprecated(self):
pl = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
with self.assertWarns(DeprecationWarning):
data = plistlib.writePlistToBytes(pl)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlistFromBytes(data)
self.assertIsInstance(pl2, plistlib._InternalDict)
self.assertEqual(pl2, plistlib._InternalDict(
key=42,
sub=plistlib._InternalDict(
key=9,
alt='value',
data=plistlib.Data(b'buffer'),
)
))
with self.assertWarns(DeprecationWarning):
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_dataobject_deprecated(self):
in_data = { 'key': plistlib.Data(b'hello') }
out_data = { 'key': b'hello' }
buf = plistlib.dumps(in_data)
cur = plistlib.loads(buf)
self.assertEqual(cur, out_data)
self.assertNotEqual(cur, in_data)
cur = plistlib.loads(buf, use_builtin_types=False)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
with self.assertWarns(DeprecationWarning):
cur = plistlib.readPlistFromBytes(buf)
self.assertNotEqual(cur, out_data)
self.assertEqual(cur, in_data)
def test_main():
support.run_unittest(TestPlistlib, TestPlistlibDeprecated)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
import time
from typing import Any, Dict, List, Optional
from django.db import models
from . import logging
from .autoupdate import AutoupdateElement, inform_changed_data, inform_elements
from .rest_api import model_serializer_classes
from .utils import convert_camel_case_to_pseudo_snake_case, get_element_id
logger = logging.getLogger(__name__)
class MinMaxIntegerField(models.IntegerField):
"""
IntegerField with options to set a min- and a max-value.
"""
def __init__(
self, min_value: int = None, max_value: int = None, *args: Any, **kwargs: Any
) -> None:
self.min_value, self.max_value = min_value, max_value
super(MinMaxIntegerField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs: Any) -> Any:
defaults = {"min_value": self.min_value, "max_value": self.max_value}
defaults.update(kwargs)
return super(MinMaxIntegerField, self).formfield(**defaults)
class RESTModelMixin:
"""
Mixin for Django models which are used in our REST API.
"""
def get_root_rest_element(self) -> models.Model:
"""
Returns the root rest instance.
Uses self as default.
"""
return self
@classmethod
def get_collection_string(cls) -> str:
"""
Returns the string representing the name of the collection. Returns
None if this is not a so called root rest instance.
"""
# TODO Check if this is a root rest element class and return None if not.
app_label = cls._meta.app_label # type: ignore
object_name = cls._meta.object_name # type: ignore
return "/".join(
(
convert_camel_case_to_pseudo_snake_case(app_label),
convert_camel_case_to_pseudo_snake_case(object_name),
)
)
def get_rest_pk(self) -> int:
"""
Returns the primary key used in the REST API. By default this is
the database pk.
"""
return self.pk # type: ignore
def get_element_id(self) -> str:
return get_element_id(self.get_collection_string(), self.get_rest_pk())
def save(
self,
skip_autoupdate: bool = False,
disable_history: bool = False,
*args: Any,
**kwargs: Any,
) -> Any:
"""
Calls Django's save() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_changed_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data
return_value = super().save(*args, **kwargs) # type: ignore
if not skip_autoupdate:
inform_changed_data(
self.get_root_rest_element(),
disable_history=disable_history,
)
return return_value
def delete(self, skip_autoupdate: bool = False, *args: Any, **kwargs: Any) -> Any:
"""
Calls Django's delete() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_deleted_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data, inform_deleted_data
instance_pk = self.pk # type: ignore
return_value = super().delete(*args, **kwargs) # type: ignore
if not skip_autoupdate:
if self != self.get_root_rest_element():
# The deletion of a included element is a change of the root element.
inform_changed_data(self.get_root_rest_element())
else:
inform_deleted_data([(self.get_collection_string(), instance_pk)])
return return_value
@classmethod
def get_elements(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
"""
Returns all elements as full_data.
"""
do_logging = not bool(ids)
if do_logging:
logger.info(f"Loading {cls.get_collection_string()}")
# Get the query to receive all data from the database.
try:
query = cls.objects.get_prefetched_queryset(ids=ids) # type: ignore
except AttributeError:
# If the model des not have to method get_prefetched_queryset(), then use
# the default queryset from django.
query = cls.objects # type: ignore
if ids:
query = query.filter(pk__in=ids)
# Build a dict from the instance id to the full_data
instances = query.all()
full_data = []
# For logging the progress
last_time = time.time()
instances_length = len(instances) # this evaluates the query
for i, instance in enumerate(instances):
# Append full data from this instance
full_data.append(instance.get_full_data())
if do_logging:
# log progress every 5 seconds
current_time = time.time()
if current_time > last_time + 5:
last_time = current_time
logger.info(f" {i+1}/{instances_length}...")
return full_data
def get_full_data(self) -> Dict[str, Any]:
"""
Returns the full_data of the instance.
"""
try:
serializer_class = model_serializer_classes[type(self)]
except KeyError:
# Because of the order of imports, it can happen, that the serializer
# for a model is not imported yet. Try to guess the name of the
# module and import it.
module_name = type(self).__module__.rsplit(".", 1)[0] + ".serializers"
__import__(module_name)
serializer_class = model_serializer_classes[type(self)]
return serializer_class(self).data
def SET_NULL_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.SET_NULL but also informs the autoupdate system about the
instance that was reference.
"""
instances = []
for sub_obj in sub_objs:
setattr(sub_obj, field.name, None)
instances.append(sub_obj)
inform_changed_data(instances)
models.SET_NULL(collector, field, sub_objs, using)
def CASCADE_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.CASCADE but also informs the autoupdate system about the
root rest element of the also deleted instance.
"""
elements = []
for sub_obj in sub_objs:
root_rest_element = sub_obj.get_root_rest_element()
elements.append(
AutoupdateElement(
collection_string=root_rest_element.get_collection_string(),
id=root_rest_element.get_rest_pk(),
)
)
inform_elements(elements)
models.CASCADE(collector, field, sub_objs, using)
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
class User:
"""
A user object is created at the beginning of every request with details of the use.
The global user object is `frappe.user`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, module from tabDocType""", as_dict=1):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
roles = self.get_roles()
for r in frappe.db.sql("""select * from tabDocPerm where docstatus=0
and ifnull(permlevel,0)=0
and role in ({roles})""".format(roles=", ".join(["%s"]*len(roles))), tuple(roles), as_dict=1):
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
self.all_read.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if not dtp.get('module') in self.allow_modules:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.all_read += self.can_read
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().get_value("recent:" + self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
r = frappe.cache().set_value("recent:" + self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, time_zone,
email_signature, background_image, background_style, user_type, language
from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().get_value("recent:" + self.name) or [])
d['roles'] = self.get_roles()
d['defaults'] = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
return d
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""select distinct name,
concat_ws(" ", if(first_name="", null, first_name), if(last_name="", null, last_name))
as fullname from tabUser p
where docstatus < 2 and enabled = 1
and name not in ({})
and exists (select * from tabUserRole ur
where ur.parent = p.name and ur.role="System Manager")""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
user_wrapper = frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User"
})
user.insert()
# add roles
roles = frappe.db.sql_list("""select name from `tabRole`
where name not in ("Administrator", "Guest", "All")""")
user.add_roles(*roles)
def get_roles(username=None, with_standard=True):
"""get roles of current user"""
if not username:
username = frappe.session.user
if username=='Guest':
return ['Guest']
roles = frappe.cache().get_value("roles:" + username)
if not roles:
roles = [r[0] for r in frappe.db.sql("""select role from tabUserRole
where parent=%s and role!='All'""", (username,))] + ['All']
frappe.cache().set_value("roles:" + username, roles)
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
| |
#!/usr/local/bin/python
from __future__ import print_function
"""A Python version of the Perforce "p4" client.
This uses the Python type P4API.P4Adapter, which is a wrapper for the
Perforce ClientApi object.
$Id: //depot/r13.1/p4-python/P4.py#2 $
#*******************************************************************************
# Copyright (c) 2007-2010, Perforce Software, Inc. All rights reserved.
# Portions Copyright (c) 1999, Mike Meyer. All rights reserved.
# Portions Copyright (c) 2004-2007, Robert Cowham. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PERFORCE SOFTWARE, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*******************************************************************************
Copyright 2007-2010 Perforce Software, Inc. All rights reserved
License:
See accompanying LICENSE.txt including for redistribution permission.
"""
import sys, os, string, datetime
import types, re
from contextlib import contextmanager
# P4Exception - some sort of error occurred
class P4Exception(Exception):
"""Exception thrown by P4 in case of Perforce errors or warnings"""
def __init__(self, value):
Exception.__init__(self)
if isinstance(value, (list, tuple)) and len(value) > 2:
self.value = value[0]
self.errors = value[1]
self.warnings = value[2]
else:
self.value = value
def __str__(self):
return str(self.value)
class Spec(dict):
"""Subclass of dict, representing the fields of a spec definition.
Attributes can be accessed either with the conventional dict format,
spec['attribute'] or with shorthand spec._attribute.
Instances of this class will preventing any unknown keys.
"""
def __init__(self, fieldmap=None):
self.__dict__['_Spec__fields'] = fieldmap
def permitted_fields(self):
return self.__fields
def __setitem__(self, key, value):
if not isinstance(value, str) and not isinstance(value, list):
raise P4Exception("Illegal value of type %s, must be string or list" % value.__class__)
if key in self or self.__fields == None:
dict.__setitem__(self, key, value)
elif str(key).lower() in self.__fields:
dict.__setitem__(self, self.__fields[key.lower()], value)
else:
raise P4Exception("Illegal field '%s'" % str(key))
def __getattr__(self, attr):
key = str(attr).lower()
if key[0] != '_':
raise AttributeError(attr)
key = key[1:]
if key in self:
return self[key]
elif key in self.__fields:
return self[self.__fields[key]]
def __setattr__(self, attr, value):
if attr == 'comment':
self.__dict__[attr] = value
else:
key = str(attr).lower()
if key[0] != '_':
raise AttributeError(attr)
key = key[1:]
self[key] = value
#
# P4Integration objects hold details about the integrations that have
# been performed on a particular revision. Used primarily with the
# P4Revision class
#
class Integration:
def __init__( self, how, file, srev, erev ):
self.how = how
self.file = file
self.srev = srev
self.erev = erev
def __repr__(self):
return "Integration (how = %s file = %s srev = %s erev = %s)" \
% (self.how, self.file, self.srev, self.erev)
#
# Each P4Revision object holds details about a particular revision
# of a file. It may also contain the history of any integrations
# to/from the file
#
class Revision:
def __init__( self, depotFile ):
self.depotFile = depotFile
self.integrations = []
self.rev = None
self.change = None
self.action = None
self.type = None
self.time = None
self.user = None
self.client = None
self.desc = None
self.digest = None
self.fileSize = None
def integration( self, how, file, srev, erev ):
rec = Integration( how, file, srev, erev )
self.integrations.append( rec )
return rec
# iterator over the collection calling a provided function
# Python's poor man version of the Ruby block
def each_integration(self):
for i in self.integrations:
yield i
def __repr__(self):
return "Revision (depotFile = %s rev = %s change = %s action = %s type = %s time = %s user = %s client = %s)" % \
(self.depotFile, self.rev, self.change, self.action, self.type, self.time, self.user, self.client)
#
# Each DepotFile entry contains details about one depot file.
#
class DepotFile:
def __init__( self, name ):
self.depotFile = name
self.revisions = []
def new_revision(self):
r = Revision( self.depotFile )
self.revisions.append( r )
return r
def each_revision(self):
for r in self.revisions:
yield r
def __repr__(self):
return "DepotFile (depotFile = %s, %s revisions)" % ( self.depotFile, len( self.revisions ) )
#
# Resolver class used in p4.run_resolve()
#
# The default simply checks that p4.input is set to sensible value
# This class is meant to be subclassed for a custom resolver and
# Resolver.resolve() overriden
#
class Resolver:
def __init__(self):
pass
def resolve(self, mergeInfo):
if mergeInfo.merge_hint == "e":
print("Standard resolver encountered merge conflict, skipping resolve")
return "s"
else:
return mergeInfo.merge_hint
def actionResolve(self, mergeInfo):
return mergeInfo.merge_hint
#
# OutputHandler base class
#
# Extend this class if you want to use the handler interface
#
class OutputHandler:
REPORT = 0
HANDLED = 1
CANCEL = 2
def __init__(self):
pass
def outputText(self, s):
return OutputHandler.REPORT
def outputBinary(self, b):
return OutputHandler.REPORT
def outputStat(self, h):
return OutputHandler.REPORT
def outputInfo(self, i):
return OutputHandler.REPORT
def outputMessage(self, e):
return OutputHandler.REPORT
class ReportHandler( OutputHandler ):
def __init__(self):
OutputHandler.__init__(self)
def outputText(self, s):
print( "text: ", s)
return OutputHandler.HANDLED
def outputBinary(self, b):
print( "binary: ", b)
return OutputHandler.HANDLED
def outputStat(self, h):
print( "stat:", h)
return OutputHandler.HANDLED
def outputInfo(self, i):
print( "info: ", i)
return OutputHandler.HANDLED
def outputMessage(self, e):
print( "error:", e)
return OutputHandler.HANDLED
class Progress:
TYPE_SENDFILE = 1
TYPE_RECEIVEFILE = 2
UNIT_PERCENT = 1
UNIT_FILES = 2
UNIT_KBYTES = 3
UNIT_MBYTES = 4
def __init__(self):
pass
def init(self, type):
self.type = type
def setDescription( self, description, units ):
self.description = description
self.units = units
def setTotal( self, total ):
self.total = total
def update( self, position ):
self.position = position
def done( self, fail ):
pass
class TextProgress(Progress):
TYPES = [ "Unknown", "Submit", "Sync" ]
UNITS = [ "Unknown", "Percent", "Files", "KBytes", "MBytes" ]
def __init__(self):
Progress.__init__(self)
def init(self, type):
Progress.init(self, type)
print( "Progress.init with '%s'" % self.TYPES[type] )
def setDescription(self, description, units):
Progress.setDescription(self, description, units)
print( "Progress.setDescription with '%s' and units '%s'" % (description, self.UNITS[units]) )
def setTotal( self, total ):
Progress.setTotal(self, total)
print( "Progress.setTotal with '%s' " % total )
def update( self, position ):
Progress.update(self, position )
print( "Progress.update with '%s'" % position )
def done( self, fail ):
Progress.done(self, fail)
print( "Progress.done with '%s"'' % fail )
def processFilelog(h):
if "depotFile" in h:
df = DepotFile( h[ "depotFile" ] )
for n, rev in enumerate( h[ "rev" ]):
# Create a new revision of this file ready for populating
r = df.new_revision()
# Populate the base attributes of each revision
r.rev = int( rev )
r.change = int( h[ "change" ][ n ] )
r.action = h[ "action" ][ n ]
r.type = h[ "type" ][ n ]
r.time = datetime.datetime.utcfromtimestamp( int( h[ "time" ][ n ]) )
r.user = h[ "user" ][ n ]
r.client = h[ "client" ][ n ]
r.desc = h[ "desc" ][ n ]
if "digest" in h and n < len(h[ "digest" ]):
r.digest = h[ "digest" ][ n ]
if "fileSize" in h and n < len(h[ "fileSize" ]):
r.fileSize = h[ "fileSize" ][ n ]
# Now if there are any integration records for this revision,
# add them in too
if (not "how" in h) or (n >= len(h["how"]) or h["how"][n] == None):
continue
else:
for m, how in enumerate( h[ "how" ][ n ] ):
file = h[ "file" ][ n ][ m ]
srev = h[ "srev" ][ n ][ m ].lstrip('#')
erev = h[ "erev" ][ n ][ m ].lstrip('#')
if srev == "none":
srev = 0
else:
srev = int( srev )
if erev == "none":
erev = 0
else:
erev = int( erev )
r.integration( how, file, srev, erev )
return df
else:
raise Exception("Not a filelog object: " + h)
class FilelogOutputHandler(OutputHandler):
def __init__(self):
OutputHandler.__init__(self)
def outputStat(self, h):
df = processFilelog(h)
return self.outputFilelog(df)
def outputFilelog(self, f):
return OutputHandler.REPORT
# This is where the C/C++ shared library is loaded
# It has to be in this place because the library needs to access
# the classes defined above. Accessing classes defined below this
# entry would cause an endless loop
import P4API
class P4(P4API.P4Adapter):
"""Use this class to communicate with a Perforce server
Instances of P4 will use the environment settings (including P4CONFIG)
to determine the connection parameters such as P4CLIENT and P4PORT.
This attributes can also be set separately before connecting.
To run any Perforce commands, users of this class first need to run
the connect() method.
It is good practice to disconnect() after the program is complete.
"""
# Constants useful for exception_level
# RAISE_ALL: Errors and Warnings are raised as exceptions (default)
# RAISE_ERROR: Only Errors are raised as exceptions
# RAISE_NONE: No exceptions are raised, instead False is returned
RAISE_ALL = 2
RAISE_ERROR = 1
RAISE_ERRORS = 1
RAISE_NONE = 0
# Named values for generic error codes returned by
# P4API.Message.generic
EV_NONE = 0 # misc
# The fault of the user
EV_USAGE = 0x01 # request not consistent with dox
EV_UNKNOWN = 0x02 # using unknown entity
EV_CONTEXT = 0x03 # using entity in wrong context
EV_ILLEGAL = 0x04 # trying to do something you can't
EV_NOTYET = 0x05 # something must be corrected first
EV_PROTECT = 0x06 # protections prevented operation
# No fault at all
EV_EMPTY = 0x11 # action returned empty results
# not the fault of the user
EV_FAULT = 0x21 # inexplicable program fault
EV_CLIENT = 0x22 # client side program errors
EV_ADMIN = 0x23 # server administrative action required
EV_CONFIG = 0x24 # client configuration inadequate
EV_UPGRADE = 0x25 # client or server too old to interact
EV_COMM = 0x26 # communications error
EV_TOOBIG = 0x27 # not even Perforce can handle this much
# Named values for error severities returned by
# P4API.Message.severity
E_EMPTY = 0 # nothing yet
E_INFO = 1 # something good happened
E_WARN = 2 # something not good happened
E_FAILED = 3 # user did something wrong
E_FATAL = 4 # system broken -- nothing can continue
# mappings for __iterate
# list-of-specs => ( name-of-one-spec, field-name-in-list-of-specs )
specfields = {
'clients' : ('client', 'client'),
'labels' : ('label', 'label'),
'branches' : ('branch', 'branch'),
'changes' : ('change', 'change'),
'streams' : ('stream', 'Stream'),
'jobs' : ('job', 'Job'),
'users' : ('user', 'User'),
'groups' : ('group', 'group'),
'depots' : ('depot', 'name'),
'servers' : ('server', 'Name')
}
def __init__(self, *args, **kwlist):
P4API.P4Adapter.__init__(self, *args, **kwlist)
def __del__(self):
if self.debug > 3:
print("P4.__del__()", file=sys.stderr)
def __getattr__(self, name):
if name.startswith("run_"):
cmd = name[len("run_"):]
return lambda *args, **kargs: self.run(cmd, *args, **kargs)
elif name.startswith("delete_"):
cmd = name[len("delete_"):]
return lambda *args, **kargs: self.run(cmd, "-d", *args, **kargs)
elif name.startswith("fetch_"):
cmd = name[len("fetch_"):]
return lambda *args, **kargs: self.run(cmd, "-o", *args, **kargs)[0]
elif name.startswith("save_"):
cmd = name[len("save_"):]
return lambda *args, **kargs: self.__save(cmd, *args, **kargs)
elif name.startswith("parse_"):
cmd = name[len("parse_"):]
return lambda *args, **kargs: self.__parse_spec(cmd, *args, **kargs)
elif name.startswith("format_"):
cmd = name[len("format_"):]
return lambda *args, **kargs: self.__format_spec(cmd, *args, **kargs)
elif name.startswith("iterate_"):
cmd = name[len("iterate_"):]
return lambda *args, **kargs: self.__iterate(cmd, *args, **kargs)
else:
raise AttributeError(name)
def __save(self, cmd, *args, **kargs):
self.input = args[0]
return self.run(cmd, "-i", args[1:], **kargs)
def __parse_spec(self, cmd, *args, **kargs):
form = args[0]
comments = "\n".join( [ x for x in form.split('\n') if x.startswith('#') ] ) + "\n"
spec = self.parse_spec(cmd, *args, **kargs)
spec.__dict__['comment'] = comments
return spec
def __format_spec(self, cmd, *args, **kargs):
spec = args[0]
form = self.format_spec(cmd, *args, **kargs)
if 'comment' in spec.__dict__:
form = spec.__dict__['comment'] + "\n" + form
return form
def __iterate(self, cmd, *args, **kargs):
specs = self.run(cmd, *args, **kargs)
if cmd in self.specfields:
spec = self.specfields[cmd][0]
field = self.specfields[cmd][1]
# Return a generators (Python iterator object)
# On iteration, this will retrieve one spec at a time
return ( self.run(spec, '-o', x[field])[0] for x in specs )
else:
raise Exception('Unknown spec list command: %s', cmd)
def __repr__(self):
state = "disconnected"
if self.connected():
state = "connected"
return "P4 [%s@%s %s] %s" % \
(self.user, self.client, self.port, state)
def identify(cls):
return P4API.identify()
identify = classmethod(identify)
def run(self, *args, **kargs):
"Generic run method"
context = {}
for (k,v) in list(kargs.items()):
context[k] = getattr(self, k)
setattr(self, k, v)
result = P4API.P4Adapter.run(self, *self.__flatten(args))
for (k,v) in list(context.items()):
setattr( self, k, v)
return result
def run_submit(self, *args, **kargs):
"Simplified submit - if any arguments is a dict, assume it to be the changeform"
nargs = list(args)
form = None
for n, arg in enumerate(nargs):
if isinstance( arg, dict):
self.input = arg
nargs.pop(n)
nargs.append("-i")
break
return self.run("submit", *nargs, **kargs)
def run_shelve(self, *args):
"Simplified shelve - if any arguments is a dict, assume it to be the changeform"
nargs = list(args)
form = None
for n, arg in enumerate(nargs):
if isinstance( arg, dict):
self.input = arg
nargs.pop(n)
nargs.append("-i")
break
return self.run("shelve", *nargs)
def delete_shelve(self, *args):
"Simplified deletion of shelves - if no -c is passed in, add it to the args"
nargs = list(args)
if '-c' not in nargs:
nargs = ['-c'] + nargs # prepend -c if it is not there
nargs = ['-d'] + nargs
return self.run("shelve", *nargs)
def run_login(self, *args):
"Simple interface to make login easier"
self.input = self.password
return self.run("login", *args)
def run_password( self, oldpass, newpass ):
"Simple interface to allow setting of the password"
if( oldpass and len(oldpass) > 0 ):
self.input = [ oldpass, newpass, newpass ]
else:
self.input = [ newpass, newpass ]
return self.run( "password" )
#
# run_filelog: convert "p4 filelog" responses into objects with useful
# methods
#
# Requires tagged output to be of any real use. If tagged output it not
# enabled then you just get the raw data back
#
def run_filelog( self, *args, **kargs ):
raw = self.run( 'filelog', args, **kargs )
if (not self.tagged or not raw):
# untagged mode returns simple strings, which breaks the code below
# raw could be None if a handler is used
return raw
result = []
for h in raw:
df = None
if isinstance( h, dict ):
df = processFilelog( h )
else:
df = h
result.append( df )
return result
def run_print(self, *args, **kargs):
raw = self.run('print', args, **kargs)
result = []
if raw:
for line in raw:
if isinstance(line, dict):
result.append(line)
result.append("")
else:
# to support encoding for Python 3, we have to do a little dance
# we cannot add bytes to the str "", but we expect that all lines
# are either str or bytes. So if we encounter bytes, we replace the content
try:
result[-1] += line
except TypeError:
if type(line) == bytes and type(result[-1]) == str and result[-1] == "":
result[-1] = line
else:
raise
return result
else:
return []
def run_resolve(self, *args, **kargs):
if self.resolver:
myResolver = self.resolver
else:
myResolver = Resolver()
if "resolver" in kargs:
myResolver = kargs["resolver"]
savedResolver = self.resolver
self.resolver = myResolver
result = self.run("resolve", args)
self.resolver = savedResolver
return result
def run_tickets(self, *args):
fname = self.ticket_file
with open(fname) as f:
tickets_raw = f.readlines()
pattern = re.compile('([^=]*)=(.*):([^:]*)\n')
tickets = [ pattern.match(x).groups() for x in tickets_raw ]
keys = [ "Host", "User", "Ticket" ]
result = [ dict(zip(keys, x)) for x in tickets ]
return result
def __flatten(self, args):
result = []
if isinstance(args, tuple) or isinstance(args, list):
for i in args:
result.extend(self.__flatten(i))
else:
result.append(args)
return tuple(result)
def __enter__( self ):
return self
def __exit__( self, exc_type, exc_val, exc_tb ):
if self.connected():
self.disconnect()
return False
def connect( self ):
P4API.P4Adapter.connect( self )
return self
@contextmanager
def while_tagged( self, t ):
old = self.tagged
self.tagged = t
yield
self.tagged = old
@contextmanager
def at_exception_level( self, e ):
old = self.exception_level
self.exception_level = e
yield
self.exception_level = old
@contextmanager
def using_handler( self, c ):
old = self.handler
self.handler = c
yield
self.handler = old
@contextmanager
def saved_context( self , **kargs):
"""Saves the context of this p4 object and restores it again at the end of the block"""
saved_context = {}
for attr in self.__members__:
saved_context[attr] = getattr(self, attr)
for (k,v) in list(kargs.items()):
setattr( self, k, v)
yield
# now restore the context again. Ignore AttributeError exception
# Exception is expected because some attributes only have getters, no setters
for (k,v) in list(saved_context.items()):
try:
setattr( self, k, v )
except AttributeError:
pass # expected for server_level and p4config_file
class Map(P4API.P4Map):
def __init__(self, *args):
P4API.P4Map.__init__(self, *args)
if len(args) > 0:
self.insert( *args )
LEFT2RIGHT = True
RIGHT2LEFT = False
def __str__( self ):
result = ""
for a in self.as_array():
result += a + "\n"
return result
def is_empty(self):
"""Returns True if this map has no entries yet, otherwise False"""
return self.count() == 0
def includes(self, *args):
return self.translate(*args) != None
def reverse(self):
return Map(P4API.P4Map.reverse(self).as_array())
def insert(self, *args):
"""Insert an argument to the map. The argument can be:
A String,
Either of the form "[+-]//lhs/... //rhs/..." or "[+-]//lhs/..."
for label style maps.
A List:
This is a list of strings of one of the single string formats
described above.
A pair of Strings:
P4.Map.insert(lhs, rhs)
"""
if len(args) == 1 :
arg = args[0]
if isinstance( arg, str ):
P4API.P4Map.insert( self, arg )
elif isinstance( arg, list ):
for s in arg:
P4API.P4Map.insert( self, s )
else: # expecting 2 args in this case: left, right
left = args[0].strip()
right = args[1].strip()
P4API.P4Map.insert(self, left, right )
if __name__ == "__main__":
p4 = P4()
p4.connect()
try:
ret = p4.run(sys.argv[1:])
for line in ret:
if isinstance(line, dict):
print("-----")
for k in list(line.keys()):
print(k, "=", line[k])
else:
print(line)
except:
for e in p4.errors:
print(e)
| |
#!/usr/bin/env python
# ==============================================================================
#
# FILE: runAWSCommand.py
#
# USAGE: runAWSCommand.py
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Gregg Jensen (), gjensen@devops.center
# Bob Lozano (), bob@devops.center
# ORGANIZATION: devops.center
# CREATED: 11/21/2016 15:13:37
# REVISION: ---
#
# Copyright 2014-2017 devops.center llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import sys
import argparse
import json
from pprint import pprint
import subprocess
import re
from datetime import datetime
# ==============================================================================
"""
This script will access AWS for a given profile and check for any reserved
instances that will expire within a month from the script is run.
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
class AWSCommand:
"""Class that takes a list of ActionableWatchers and executes separate
watchdog processes for each item in the list"""
def __init__(self, profileNameIn):
"""Creates the instance with the given profile name and cmmand"""
self.profileName = profileNameIn
self.cmdToRun = ""
def run(self, cmdToRunIn):
self.cmdToRun = cmdToRunIn
method = getattr(self, self.cmdToRun, lambda: "nothing")
return method()
def getListOfInstances(self):
awsCmd = ("aws --profile " + self.profileName +
" ec2 describe-instances")
data = subprocess.check_output(awsCmd, shell=True)
awsOutput = json.loads(data)
return awsOutput
def getReservedTypeAndDate(self):
awsCmd = ("aws --profile " + self.profileName +
" ec2 describe-reserved-instances --query "
"'ReservedInstances[*].{InstanceType:InstanceType,End:End}'")
awsOutput = subprocess.check_output(awsCmd, shell=True)
# split the output on the comman giving a list of start and instance
# type. Because we know that we are just going through the string
# as it comes we know that the start and instanceType will end up
# next to each other.
awsOutputList = awsOutput.split('\n')
# for item in awsOutputList:
# print "=>{}<=".format(item)
reservedList = []
i = 0
while i < len(awsOutputList):
if "End" in awsOutputList[i]:
# the end line initially looks something like:
# "End": "2016-03-04T22:33:31.659Z"
# quotes and all. So the regular expression below will
# find all the strings in side each of the double quotes.
# that returns a list. We want the second one so we pull
# the index of 1. Then from that result we only want the
# first 10 characters. and that gives the date we can use
fullEndDateTime = re.findall('"(.*?)"', awsOutputList[i])[1]
endDate = re.findall('"(.*?)"', awsOutputList[i])[1][:10]
# print endDate
# now we want to increase the index count to get the associated
# instanceType for this date
i += 1
instanceType = re.findall('"(.*?)"', awsOutputList[i])[1]
# print instanceType
# and finally put the tuple in the list
reservedList.append((endDate, instanceType, fullEndDateTime))
i += 1
return(reservedList)
def checkReservedInstanceRenewal(self):
reservedTypeAndDateList = self.getReservedTypeAndDate()
# for item in reservedTypeAndDateList:
# print "[{}] date: {}".format(item[1], item[0])
instanceList = self.getListOfInstances()
returnList = []
for key in instanceList["Reservations"]:
for inst in key['Instances']:
if inst["State"]["Name"] == "running":
# get the name
for tags in inst["Tags"]:
if tags["Key"] == "Name":
instanceName = tags["Value"]
# if there is a name then get the instanceType
instanceType = inst["InstanceType"]
# push them onto the list
returnList.append((instanceName, instanceType))
# go through the reserved list and check for any dates that are going
# to expire one month from now. And then print out the names that
# are in that class that will/could be associated
now = datetime.now()
date_format = '%Y-%m-%d'
for reservedItem in reservedTypeAndDateList:
reservedDate = datetime.strptime(reservedItem[0], date_format)
diff = reservedDate - now
# print "diff: {} reservedDate: {} and now: {} ".format(
# diff.days, reservedDate, now)
# now if any of those days are between 330-365 go through the
# instance list and get the name
if diff.days < 30 and diff.days > -30:
if diff.days <= -2:
print "These appear to be past due:"
elif diff.days >= -1 and diff.days <= 0:
print "These appear to be due today:."
else:
print "These appear to be coming due:."
# go through the instance list and get the name
for instance in returnList:
if reservedItem[1] == instance[1]:
print "[{}] {} reserved instance ends: {}".format(
reservedItem[1], instance[0], reservedItem[2])
def listInstances(self):
print "going to do listInstances"
instanceList = self.getListOfInstances()
for item in instanceList["Reservations"]:
pprint(item)
def checkArgs():
parser = argparse.ArgumentParser(
description='Script that provides a facility to execute various ' +
'AWS commands and show the output')
parser.add_argument('--appName', help='Name of the appName ' +
'in which to execute the AWS command',
required=True)
parser.add_argument('-c', '--command', help='The AWS command action ' +
'to be performed:',
choices=["listInstances",
"checkReservedInstanceRenewal"],
required=True,
default='container')
args = parser.parse_args()
retProfileName = args.appName
retCommand = args.command
# if we get here then the
return (retProfileName, retCommand)
def main(argv):
(profileName, cmdToRun) = checkArgs()
# print 'customerAppName/profileName is: ' + profileName
# print 'cmdToRun is: ' + cmdToRun
awsCmd = AWSCommand(profileName)
awsCmd.run(cmdToRun)
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| |
#!/usr/bin/env python2.1
#
# A visual stimuli presentation script (showmovie.py)
# (written for fMRI study)
#
# requires: pygame
# pyOpenGL
# buffermovie.py
#
# 1-9-2009: sn
# HDD streaming movie presentation with some buffer
#
import sys
import time
import datetime
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import os
import buffermovie
# parameters for screen setting
#scsize = (1024,768)
scsize = (800,600)
fullscreen = 1 # 0 for debug, 1 for prod
### for movie
blankcolor = (140,140,140)
### for text
#blankcolor = (0,0,0)
# movie buffer size (frames)
buffersize = 300
###tfactor for quarter head scans
#tfactor = 0.996286
###tfactor for Thomas full head
#tfactor = 0.968788
###tfactor for quarter head scans in 3T
#tfactor = 0.999749
###tfactor for 7T, GE white coil, 1.2mm isotropic
tfactor = 0.999961
def getfixationinfo(fmode):
if fmode==1:
fixationsize = (4,4)
fixationcolor = ((255,80,80), (80,255,80), (80,80,255))
fcchange = 3
elif fmode==2:
fixationsize = (4,4)
fixationcolor = ((255,255,255),(255,255,255))
fcchange = 1
else:
fixationsize = 0
fixationcolor = 0
fcchange = 0
return fixationsize, fixationcolor, fcchange
def getrect(size, color):
s = pygame.Surface(size,0,32)
pygame.draw.rect(s, color, (0, 0, size[0], size[1]))
data = pygame.image.tostring(s, 'RGBA')
return data
def draw(pos, size, data, dtype):
glRasterPos2d(-pos[0], -pos[1])
glDrawPixels(size[0], size[1], dtype , GL_UNSIGNED_BYTE , data)
def flipscreen():
glFinish()
pygame.display.flip()
def movieshow(movie, show_hz, fixationmode = 0):
# set up a screen
pygame.init()
pygame.mouse.set_visible(0)
if fullscreen:
flags = FULLSCREEN | DOUBLEBUF | HWSURFACE | OPENGL
else:
flags = DOUBLEBUF | HWSURFACE | OPENGL
screen = pygame.display.set_mode(scsize, flags)
right_offset = -50
right_offset = 0
sccenter = (scsize[0]/2, scsize[1]/2)
glOrtho(0,-scsize[0],0,-scsize[1],0,1)
impos = ((scsize[0]-movie.imsize[0])/2 + right_offset, (scsize[1]-movie.imsize[1])/2)
# fixation spot
[fixationsize, fixationcolor, fcchange] = getfixationinfo(fixationmode)
if fixationsize:
fixationstr = list()
for fc in fixationcolor:
fixationstr.append(getrect(fixationsize, fc))
fcnum = len(fixationcolor)
fixationpos = (scsize[0]/2-fixationsize[0]/2 + right_offset, scsize[1]/2-fixationsize[1]/2)
# blank screen
blankstr = getrect(scsize, blankcolor)
playstatus = 0
kwait = 1
while kwait:
draw((0,0), scsize, blankstr, GL_RGBA)
if fixationsize:
draw(fixationpos, fixationsize, fixationstr[0], GL_RGBA)
flipscreen()
for event in pygame.event.get():
if (event.type == KEYDOWN):
# Key for the TTL
if event.key == K_t: # or event.key == K_5:
kwait = 0
if event.key == K_ESCAPE:
playstatus = 99
# if event.key == K_s: # added for NS
# kwait = 0
if playstatus == 99:
break
lasttime = gettime()
firsttime = lasttime
finaltime = firsttime + movie.numframes/show_hz
bufferlog = list([-1]*int(movie.numframes/show_hz))
bufferlogtick = 0
thisframe = 0
while lasttime < finaltime:
if playstatus == 99:
break
thisframe = int(1.0*show_hz*(lasttime-firsttime))
if thisframe >= movie.numframes:
thisframe = movie.numframes-1
im, imsize = movie.getframe(thisframe)
draw(impos, imsize, im, GL_RGBA)
if fixationsize:
fn = int(lasttime*fcchange)%fcnum
draw(fixationpos, fixationsize, fixationstr[fn], GL_RGBA)
flipscreen()
movie.fetch()
lasttime = gettime()
if lasttime-firsttime > bufferlogtick+1:
bufferlog[bufferlogtick] = movie.frame_loaded-movie.frame_shown+1
bufferlogtick = bufferlogtick+1
while True:
if playstatus==2:
playstatus = 1
for event in pygame.event.get():
if (event.type == KEYDOWN):
if event.key == K_ESCAPE:
playstatus = 99
#if event.key == K_t:
# kts.append(lasttime-firsttime)
if playstatus == 0:
break
elif playstatus == 2:
break
elif playstatus == 99:
break
ted = gettime()
td = lasttime-firsttime
if td:
print "%.2f sec for %d frames (%.2f frames/sec)" % ( td, thisframe+1, (thisframe+1)/td )
print bufferlog
logwrite(bufferlog)
if playstatus == 99:
print "Aborted by user."
pygame.mouse.set_visible(1)
pygame.display.quit()
def gettime():
return time.time()/tfactor
def logwrite(bufferlog):
d=datetime.datetime.today()
fname=d.strftime('%Y%m%d_%H%M%S')
fout=open('./log/bufferlog'+fname, 'w')
for t in bufferlog:
fout.write(str(t)+'\n')
fout.close()
def show(impath, indexfile, show_hz = 15, flip = 0, fixationmode = 1):
#impath = '/Users/sn/fmristim/stim200812_randomcut/color/trn002/'
#indexfile = 'valseq01.index'
# set up a movie buffer
print "Pre-loading images...(buffering", buffersize, "frames)"
movie = buffermovie.buffermovie(impath, indexfile, buffersize, flip)
imsize = movie.imsize
print "done.\n"
print "Size:", imsize, 'pixels, ', movie.numframes, 'frames'
print "Path:", impath
print "Files:", movie.filenames[0], movie.filenames[1], '...', movie.filenames[-1]
movieshow(movie, show_hz, fixationmode)
| |
"""Value inspectors that can be passed to :func:`fudge.Fake.with_args` for more
expressive argument matching.
As a mnemonic device,
an instance of the :class:`fudge.inspector.ValueInspector` is available as "arg" :
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> image = fudge.Fake("image").expects("save").with_args(arg.endswith(".jpg"))
In other words, this declares that the first argument to ``image.save()``
should end with the suffix ".jpg"
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
import warnings
from fudge.util import fmt_val, fmt_dict_vals
__all__ = ['arg', 'arg_not']
class ValueInspector(object):
"""Dispatches tests to inspect values.
"""
invert_eq = False
def _make_value_test(self, test_class, *args, **kwargs):
if not self.invert_eq:
return test_class(*args, **kwargs)
class ValueTestInverter(test_class):
def __repr__(wrapper_self):
return "(NOT) %s" % test_class.__repr__(wrapper_self)
def __eq__(wrapper_self, other):
return not test_class.__eq__(wrapper_self, other)
return ValueTestInverter(*args, **kwargs)
def any(self):
"""Match any value.
This is pretty much just a placeholder for when you
want to inspect multiple arguments but don't care about
all of them.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> db = fudge.Fake("db")
>>> db = db.expects("transaction").with_args(
... "insert", isolation_level=arg.any())
...
>>> db.transaction("insert", isolation_level="lock")
>>> fudge.verify()
This also passes:
.. doctest::
:hide:
>>> fudge.clear_calls()
.. doctest::
>>> db.transaction("insert", isolation_level="autocommit")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
The arg_not version will not match anything and is probably not very
useful.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg_not
>>> query = fudge.Fake('query').expects_call().with_args(
... arg_not.any()
... )
>>> query('asdf')
Traceback (most recent call last):
...
AssertionError: fake:query((NOT) arg.any()) was called unexpectedly with args ('asdf')
>>> query()
Traceback (most recent call last):
...
AssertionError: fake:query((NOT) arg.any()) was called unexpectedly with args ()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(AnyValue)
def any_value(self):
"""**DEPRECATED**: use :func:`arg.any() <fudge.inspector.ValueInspector.any>`
"""
warnings.warn('arg.any_value() is deprecated in favor of arg.any()',
DeprecationWarning, 3)
return self.any()
def contains(self, part):
"""Ensure that a value contains some part.
This is useful for when you only care that a substring or subelement
exists in a value.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> addressbook = fudge.Fake().expects("import_").with_args(
... arg.contains("Baba Brooks"))
...
>>> addressbook.import_("Bill Brooks; Baba Brooks; Henry Brooks;")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
Since contains() just invokes the __in__() method, checking that a list
item is present works as expected :
.. doctest::
>>> colorpicker = fudge.Fake("colorpicker")
>>> colorpicker = colorpicker.expects("select").with_args(arg.contains("red"))
>>> colorpicker.select(["green","red","blue"])
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
arg_not.contains matches an argument not containing some element.
.. doctest::
>>> from fudge.inspector import arg_not
>>> colorpicker = colorpicker.expects('select').with_args(arg_not.contains('blue'))
>>> colorpicker.select('reddish')
>>> colorpicker.select(['red', 'green'])
>>> fudge.verify()
>>> colorpicker.select('blue-green')
Traceback (most recent call last):
...
AssertionError: fake:colorpicker.select(arg.contains('red'))[0] was called unexpectedly with args ('blue-green')
>>> colorpicker.select(['red', 'blue', 'green'])
Traceback (most recent call last):
...
AssertionError: fake:colorpicker.select((NOT) arg.contains('blue'))[1] was called unexpectedly with args (['red', 'blue', 'green'])
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(Contains, part)
def endswith(self, part):
"""Ensure that a value ends with some part.
This is useful for when values with dynamic parts that are hard to replicate.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> tmpfile = fudge.Fake("tempfile").expects("mkname").with_args(
... arg.endswith(".tmp"))
...
>>> tmpfile.mkname("7AakkkLazUUKHKJgh908JKjlkh.tmp")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
The arg_not version works as expected, matching arguments that do not
end with the given element.
.. doctest::
>>> from fudge.inspector import arg_not
>>> query = fudge.Fake('query').expects_call().with_args(arg_not.endswith('Ringo'))
>>> query('John, Paul, George and Steve')
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(Endswith, part)
def has_attr(self, **attributes):
"""Ensure that an object value has at least these attributes.
This is useful for testing that an object has specific attributes.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> db = fudge.Fake("db").expects("update").with_args(arg.has_attr(
... first_name="Bob",
... last_name="James" ))
...
>>> class User:
... first_name = "Bob"
... last_name = "James"
... job = "jazz musician" # this is ignored
...
>>> db.update(User())
>>> fudge.verify()
In case of error, the other object's __repr__ will be invoked:
.. doctest::
:hide:
>>> fudge.clear_calls()
.. doctest::
>>> class User:
... first_name = "Bob"
...
... def __repr__(self):
... return repr(dict(first_name=self.first_name))
...
>>> db.update(User())
Traceback (most recent call last):
...
AssertionError: fake:db.update(arg.has_attr(first_name='Bob', last_name='James')) was called unexpectedly with args ({'first_name': 'Bob'})
When called as a method on arg_not, has_attr does the opposite, and
ensures that the argument does not have the specified attributes.
.. doctest::
>>> from fudge.inspector import arg_not
>>> class User:
... first_name = 'Bob'
... last_name = 'Dobbs'
>>> query = fudge.Fake('query').expects_call().with_args(
... arg_not.has_attr(first_name='James')
... )
>>> query(User())
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(HasAttr, **attributes)
def passes_test(self, test):
"""Check that a value passes some test.
For custom assertions you may need to create your own callable
to inspect and verify a value.
.. doctest::
>>> def is_valid(s):
... if s in ('active','deleted'):
... return True
... else:
... return False
...
>>> import fudge
>>> from fudge.inspector import arg
>>> system = fudge.Fake("system")
>>> system = system.expects("set_status").with_args(arg.passes_test(is_valid))
>>> system.set_status("active")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
The callable you pass takes one argument, the value, and should return
True if it's an acceptable value or False if not.
.. doctest::
>>> system.set_status("sleep") # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: fake:system.set_status(arg.passes_test(<function is_valid at...)) was called unexpectedly with args ('sleep')
.. doctest::
:hide:
>>> fudge.clear_expectations()
If it makes more sense to perform assertions in your test function then
be sure to return True :
>>> def is_valid(s):
... assert s in ('active','deleted'), (
... "Unexpected status value: %s" % s)
... return True
...
>>> import fudge
>>> from fudge.inspector import arg
>>> system = fudge.Fake("system")
>>> system = system.expects("set_status").with_args(arg.passes_test(is_valid))
>>> system.set_status("sleep")
Traceback (most recent call last):
...
AssertionError: Unexpected status value: sleep
.. doctest::
:hide:
>>> fudge.clear_expectations()
Using the inverted version, arg_not.passes_test, asserts that the
argument does not pass the provided test.
.. doctest::
>>> from fudge.inspector import arg_not
>>> query = fudge.Fake('query').expects_call().with_args(
... arg_not.passes_test(lambda x: x > 10)
... )
>>> query(5)
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(PassesTest, test)
def isinstance(self, cls):
"""Check that a value is instance of specified class.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> system = fudge.Fake("system")
>>> system = system.expects("set_status").with_args(arg.isinstance(str))
>>> system.set_status("active")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
Should return True if it's allowed class or False if not.
.. doctest::
>>> system.set_status(31337) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: fake:system.set_status(arg.isinstance('str')) was called unexpectedly with args (31337)
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(IsInstance, cls)
def startswith(self, part):
"""Ensure that a value starts with some part.
This is useful for when values with dynamic parts that are hard to replicate.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> keychain = fudge.Fake("keychain").expects("accept_key").with_args(
... arg.startswith("_key"))
...
>>> keychain.accept_key("_key-18657yojgaodfty98618652olkj[oollk]")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
Using arg_not.startswith instead ensures that arguments do not start
with that part.
.. doctest::
>>> from fudge.inspector import arg_not
>>> query = fudge.Fake('query').expects_call().with_args(
... arg_not.startswith('asdf')
... )
>>> query('qwerty')
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return self._make_value_test(Startswith, part)
class NotValueInspector(ValueInspector):
"""Inherits all the argument methods from ValueInspector, but inverts them
to expect the opposite. See the ValueInspector method docstrings for
examples.
"""
invert_eq = True
def __call__(self, thing):
"""This will match any value except the argument given.
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg, arg_not
>>> query = fudge.Fake('query').expects_call().with_args(
... arg.any(),
... arg_not('foobar')
... )
>>> query([1, 2, 3], 'asdf')
>>> query('asdf', 'foobar')
Traceback (most recent call last):
...
AssertionError: fake:query(arg.any(), arg_not(foobar)) was called unexpectedly with args ('asdf', 'foobar')
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
return NotValue(thing)
arg = ValueInspector()
arg_not = NotValueInspector()
class ValueTest(object):
arg_method = None
__test__ = False # nose
def __eq__(self, other):
raise NotImplementedError()
def _repr_argspec(self):
raise NotImplementedError()
def __str__(self):
return self._repr_argspec()
def __unicode__(self):
return self._repr_argspec()
def __repr__(self):
return self._repr_argspec()
def _make_argspec(self, arg):
if self.arg_method is None:
raise NotImplementedError(
"%r must have set attribute arg_method" % self.__class__)
return "arg." + self.arg_method + "(" + arg + ")"
class Stringlike(ValueTest):
def __init__(self, part):
self.part = part
def _repr_argspec(self):
return self._make_argspec(fmt_val(self.part))
def stringlike(self, value):
if isinstance(value, (str, unicode)):
return value
else:
return str(value)
def __eq__(self, other):
check_stringlike = getattr(self.stringlike(other), self.arg_method)
return check_stringlike(self.part)
class Startswith(Stringlike):
arg_method = "startswith"
class Endswith(Stringlike):
arg_method = "endswith"
class HasAttr(ValueTest):
arg_method = "has_attr"
def __init__(self, **attributes):
self.attributes = attributes
def _repr_argspec(self):
return self._make_argspec(", ".join(sorted(fmt_dict_vals(self.attributes))))
def __eq__(self, other):
for name, value in self.attributes.items():
if not hasattr(other, name):
return False
if getattr(other, name) != value:
return False
return True
class AnyValue(ValueTest):
arg_method = "any"
def __eq__(self, other):
# will match anything:
return True
def _repr_argspec(self):
return self._make_argspec("")
class Contains(ValueTest):
arg_method = "contains"
def __init__(self, part):
self.part = part
def _repr_argspec(self):
return self._make_argspec(fmt_val(self.part))
def __eq__(self, other):
if self.part in other:
return True
else:
return False
class PassesTest(ValueTest):
arg_method = "passes_test"
def __init__(self, test):
self.test = test
def __eq__(self, other):
return self.test(other)
def _repr_argspec(self):
return self._make_argspec(repr(self.test))
class IsInstance(ValueTest):
arg_method = "isinstance"
def __init__(self, cls):
self.cls = cls
def __eq__(self, other):
return isinstance(other, self.cls)
def _repr_argspec(self):
if isinstance(self.cls, (tuple, list)):
return self._make_argspec(repr(tuple((cls.__name__ for cls in self.cls))))
else:
return self._make_argspec(repr(self.cls.__name__))
class NotValue(ValueTest):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return not self.item == other
def _repr_argspec(self):
return "arg_not(%s)" % self.item
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:admin:users:index')
USER_CREATE_URL = reverse('horizon:admin:users:create')
USER_UPDATE_URL = reverse('horizon:admin:users:update', args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',)})
def test_index(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'admin/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain_id = self._get_domain_id()
role = self.roles.first()
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update(self):
user = self.users.get(id="1")
test_password = 'normalpwd'
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=u'test@example.com',
name=u'test_user',
password=test_password,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': test_password,
'project': self.tenant.id,
'confirm_password': test_password}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'tenant_list',
'user_update_tenant',
'keystone_can_edit_user',
'roles_for_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest),
'1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.keystone_can_edit_user().AndReturn(False)
api.keystone.keystone_can_edit_user().AndReturn(False)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
@test.create_stubs({api.keystone: ('user_get', 'tenant_list')})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 't',
'project': self.tenant.id,
'confirm_password': 't'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get', 'tenant_list')})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 'ThisIsASuperLongPassword',
'project': self.tenant.id,
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_user(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id).AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
True).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_disable_user(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
False).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_disable_user_exception(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(), user.id, True) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_list',)})
def test_disabling_current_user(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list',)})
def test_delete_user_with_improper_permissions(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You do not have permission to delete user: %s'
% self.request.user.username)
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs({api.keystone: ('tenant_list',
'get_default_role',
'role_list',
'user_list')})
def test_modal_create_user_with_passwords_not_matching(self):
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.user_list(IgnoreArg(), domain=None) \
.AndReturn(self.users.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create") \
.send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("a@b.com")
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
@test.create_stubs({api.keystone: ('tenant_list', 'user_get')})
def test_update_user_with_passwords_not_matching(self):
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(self.user)
api.keystone.tenant_list(IgnoreArg(), user=self.user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USER_UPDATE_URL))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").clear()
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO as StringIO
import random
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest.test import attr
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""
Here we test the basic operations of images
"""
@attr(type='gate')
def test_register_upload_get_image_file(self):
"""
Here we test these functionalities - Register image,
upload the image file, get image and get image file api's
"""
image_name = data_utils.rand_name('image')
resp, body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='public')
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
self.assertEqual('public', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
# Now try uploading an image file
file_content = '*' * 1024
image_file = StringIO.StringIO(file_content)
resp, body = self.client.store_image(image_id, image_file)
self.assertEqual(resp.status, 204)
# Now try to get image details
resp, body = self.client.get_image(image_id)
self.assertEqual(200, resp.status)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
resp, body = self.client.get_image_file(image_id)
self.assertEqual(200, resp.status)
self.assertEqual(file_content, body)
@attr(type='gate')
def test_delete_image(self):
# Deletes a image by image_id
# Create image
image_name = data_utils.rand_name('image')
resp, body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='public')
self.assertEqual(201, resp.status)
image_id = body['id']
# Delete Image
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
resp, images = self.client.image_list()
self.assertEqual(resp.status, 200)
self.assertNotIn(image_id, images)
class ListImagesTest(base.BaseV2ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def setUpClass(cls):
super(ListImagesTest, cls).setUpClass()
# We add a few images here to test the listing functionality of
# the images API
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('ami', 'raw')
# Add some more for listing
cls._create_standard_image('ami', 'ami')
cls._create_standard_image('ari', 'ari')
cls._create_standard_image('aki', 'aki')
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = StringIO.StringIO('*' * random.randint(1024, 4096))
name = data_utils.rand_name('image-')
resp, body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
visibility='public')
image_id = body['id']
resp, body = cls.client.store_image(image_id, data=image_file)
return image_id
def _list_by_param_value_and_assert(self, params):
"""
Perform list action with given params and validates result.
"""
resp, images_list = self.client.image_list(params=params)
self.assertEqual(200, resp.status)
# Validating params of fetched images
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
@attr(type='gate')
def test_index_no_params(self):
# Simple test to see all fixture images returned
resp, images_list = self.client.image_list()
self.assertEqual(resp['status'], '200')
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
@attr(type='gate')
def test_list_images_param_container_format(self):
# Test to get all images with container_format='bare'
params = {"container_format": "bare"}
self._list_by_param_value_and_assert(params)
@attr(type='gate')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@attr(type='gate')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = public
params = {"visibility": "public"}
self._list_by_param_value_and_assert(params)
@attr(type='gate')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
resp, image = self.client.get_image(image_id)
self.assertEqual(resp['status'], '200')
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@attr(type='gate')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
resp, image = self.client.get_image(image_id)
self.assertEqual(resp['status'], '200')
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
resp, images_list = self.client.image_list(params=params)
self.assertEqual(resp['status'], '200')
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertTrue(image_size >= params['size_min'] and
image_size <= params['size_max'],
"Failed to get images by size_min and size_max")
@attr(type='gate')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@attr(type='gate')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
resp, images_list = self.client.image_list(params=params)
self.assertEqual(resp['status'], '200')
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
| |
from __future__ import absolute_import
import numpy as np
import unittest
import centrosome.lapjv as LAPJV
from centrosome.filter import permutations
from six.moves import range
from six.moves import zip
class TestLAPJVPYX(unittest.TestCase):
def test_01_01_reduction_transfer(self):
"""Test the reduction transfer implementation"""
cases = [
dict(
i=[0, 1, 2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[2, 0, 1],
y=[1, 2, 0],
c=[5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0],
u_out=[2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0],
),
dict(
i=[1, 2, 3],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6, 9],
count=[3, 3, 3, 3],
x=[3, 2, 0, 1],
y=[1, 2, 0, 3],
c=[0.0, 0.0, 0.0, 5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0, 0.0],
u_out=[0.0, 2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0, 0.0],
),
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
LAPJV.reduction_transfer(
np.ascontiguousarray(case["i"], np.uint32),
np.ascontiguousarray(case["j"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
np.ascontiguousarray(case["x"], np.uint32),
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
def test_02_01_augmenting_row_reduction(self):
cases = [
dict(
n=3,
ii=[1],
jj=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[1, 3, 0],
y=[2, 0, 3],
u_in=[1.0, 2.0, 3.0],
v_in=[1.0, 2.0, 3.0],
c=[3.0, 6.0, 5.0, 5.0, 5.0, 7.1, 8.0, 11.0, 9.0],
u_out=[1.0, 2.0, 3.0],
v_out=[1.0, 1.0, 3.0],
x_out=[2, 1, 0],
y_out=[2, 1, 0],
)
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
x = np.ascontiguousarray(case["x"], np.uint32)
y = np.ascontiguousarray(case["y"], np.uint32)
LAPJV.augmenting_row_reduction(
case["n"],
np.ascontiguousarray(case["ii"], np.uint32),
np.ascontiguousarray(case["jj"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
x,
y,
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
expected_x = np.array(case["x_out"])
expected_y = np.array(case["y_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
np.testing.assert_array_equal(expected_x, x)
np.testing.assert_array_equal(expected_y, y)
def test_03_01_augment(self):
cases = [
dict(
n=3,
i=[2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x_in=[0, 1, 3],
x_out=[0, 1, 2],
y_in=[0, 1, 3],
y_out=[0, 1, 2],
u_in=[4, 0, 2],
v_in=[-1, 1, 1],
u_out=[4, 0, 2],
v_out=[-1, 1, 1],
c=[3, 5, 7, 4, 1, 6, 2, 3, 3],
)
]
for case in cases:
n = case["n"]
i = np.ascontiguousarray(case["i"], np.uint32)
j = np.ascontiguousarray(case["j"], np.uint32)
idx = np.ascontiguousarray(case["idx"], np.uint32)
count = np.ascontiguousarray(case["count"], np.uint32)
x = np.ascontiguousarray(case["x_in"], np.uint32)
y = np.ascontiguousarray(case["y_in"], np.uint32)
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
c = np.ascontiguousarray(case["c"], np.float64)
LAPJV.augment(n, i, j, idx, count, x, y, u, v, c)
np.testing.assert_array_equal(x, case["x_out"])
np.testing.assert_array_equal(y, case["y_out"])
np.testing.assert_almost_equal(u, case["u_out"])
np.testing.assert_almost_equal(v, case["v_out"])
class TestLAPJV(unittest.TestCase):
def test_01_02(self):
r = np.random.RandomState()
r.seed(11)
for reductions in [0, 2]:
for _ in range(100):
c = r.randint(1, 10, (5, 5))
i, j = np.mgrid[0:5, 0:5]
i = i.flatten()
j = j.flatten()
x, y, u, v = LAPJV.lapjv(i, j, c.flatten(), True, reductions)
min_cost = np.sum(c)
best = None
for permutation in permutations([0, 1, 2, 3, 4]):
cost = sum([c[i, permutation[i]] for i in range(5)])
if cost < min_cost:
best = list(permutation)
min_cost = cost
result_cost = sum([c[i, x[i]] for i in range(5)])
self.assertAlmostEqual(min_cost, result_cost)
def test_01_03(self):
"""Regression tests of matrices that crashed lapjv"""
dd = [
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 5.34621029],
[1.0, 7.0, 55.0],
[2.0, 2.0, 2.09806089],
[2.0, 8.0, 55.0],
[3.0, 3.0, 4.82063029],
[3.0, 9.0, 55.0],
[4.0, 4.0, 3.99481917],
[4.0, 10.0, 55.0],
[5.0, 5.0, 3.18959054],
[5.0, 11.0, 55.0],
[6.0, 1.0, 55.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[6.0, 10.0, 0.0],
[6.0, 11.0, 0.0],
[7.0, 2.0, 55.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[7.0, 10.0, 0.0],
[7.0, 11.0, 0.0],
[8.0, 3.0, 55.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[8.0, 10.0, 0.0],
[8.0, 11.0, 0.0],
[9.0, 4.0, 55.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
[9.0, 10.0, 0.0],
[9.0, 11.0, 0.0],
[10.0, 5.0, 55.0],
[10.0, 7.0, 0.0],
[10.0, 8.0, 0.0],
[10.0, 9.0, 0.0],
[10.0, 10.0, 0.0],
[10.0, 11.0, 0.0],
[11.0, 6.0, 55.0],
[11.0, 7.0, 0.0],
[11.0, 8.0, 0.0],
[11.0, 9.0, 0.0],
[11.0, 10.0, 0.0],
[11.0, 11.0, 0.0],
]
),
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.12227977],
[1.0, 6.0, 55.0],
[2.0, 2.0, 18.66735253],
[2.0, 4.0, 16.2875504],
[2.0, 7.0, 55.0],
[3.0, 5.0, 1.29944194],
[3.0, 8.0, 55.0],
[4.0, 5.0, 32.61892281],
[4.0, 9.0, 55.0],
[5.0, 1.0, 55.0],
[5.0, 6.0, 0.0],
[5.0, 7.0, 0.0],
[5.0, 8.0, 0.0],
[5.0, 9.0, 0.0],
[6.0, 2.0, 55.0],
[6.0, 6.0, 0.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[7.0, 3.0, 55.0],
[7.0, 6.0, 0.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[8.0, 4.0, 55.0],
[8.0, 6.0, 0.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[9.0, 5.0, 55.0],
[9.0, 6.0, 0.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
]
),
]
expected_costs = [74.5, 1000000]
for d, ec in zip(dd, expected_costs):
n = np.max(d[:, 0].astype(int)) + 1
x, y = LAPJV.lapjv(d[:, 0].astype(int), d[:, 1].astype(int), d[:, 2])
c = np.ones((n, n)) * 1000000
c[d[:, 0].astype(int), d[:, 1].astype(int)] = d[:, 2]
self.assertTrue(np.sum(c[np.arange(n), x]) < ec)
self.assertTrue(np.sum(c[y, np.arange(n)]) < ec)
| |
import os
import pytz
from datetime import tzinfo, timedelta, datetime
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import F, Count, ExpressionWrapper, DateField, DateTimeField
from django.db.models.signals import pre_delete, post_save, pre_save
from django.utils.translation import gettext_noop
from model_utils import Choices, FieldTracker
from scholars.courses.signals import update_course_counters, update_course_status_phase
from scholars.users.models import User
from scholars.utils.models import TimeStampable
from scholars.utils.utils import clear_folder
from scholars.utils.utils import import_presentation, send_manually_exception_email, copy_file, writable_permissions
from scholars.utils.slack import Slack
def get_image_path(instance, filename):
import os
full_name = os.path.join(
"%d" % instance.course.id, "images", "%03d.png" % instance.position)
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
# if os.path.exists(media_path):
# return os.path.join(settings.MEDIA_URL, full_name)
return full_name
def get_audio_path(instance, filename):
import os
full_name = os.path.join(
"%d" % instance.course.id, "audio", "%03d.mp3" % instance.position)
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
# if os.path.exists(media_path):
# return os.path.join(settings.MEDIA_URL, full_name)
return full_name
def get_video_path(id):
import os
full_name = os.path.join(
"%d" % id, "videos", "video.mp4")
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
if os.path.exists(media_path):
return os.path.join(settings.MEDIA_URL, full_name)
return None
class Category(TimeStampable):
name = models.CharField(max_length=2048, unique=True)
shortcode = models.CharField(max_length=8, null=True, blank=True)
class Meta:
verbose_name_plural = "categories"
def __unicode__(self):
return "%s" % self.name
def __str__(self):
return "%s" % self.name
class CourseQuerySet(models.QuerySet):
def in_progress(self):
return self.filter(
status=Course.STATUS.in_progress
)
def include_members_needed(self):
return self.select_related().annotate(
presentation_needed=F('num_presentation') - F('num_presentation_actual'),
graphics_needed=F('num_graphics') - F('num_graphics_actual'),
scripting_needed=F('num_scripting') - F('num_scripting_actual'),
audio_needed=F('num_audio') - F('num_audio_actual'),
dri_needed=F('num_dri') - F('num_dri_actual'),
members_count=Count('members'),
min_date=ExpressionWrapper(F('created_at') + timedelta(days=7), output_field=DateTimeField())
)
def meets_requirements_for_in_progress(self):
return self.include_members_needed().filter(
status=Course.STATUS.proposed,
min_date__lt=datetime.now(pytz.timezone('UTC')),
presentation_needed__lte=0,
graphics_needed__lte=0,
scripting_needed__lte=0,
audio_needed__lte=0,
dri_needed__lte=0,
members_count__gte=10
).order_by('-members_count')
class Course(TimeStampable):
STATUS = Choices(
(0, 'proposed', 'Proposed'),
(1, 'in_progress', 'In Progress'),
(2, 'published', 'Published')
)
PHASE = Choices(
(0, 'onboarding', 'Onboarding'),
(1, 'reading', 'Reading'),
(2, 'discussion', 'Discussion'),
(3, 'slides', 'Slides'),
(4, 'peer_review', 'Peer Review'),
(5, 'audio', 'Audio'),
(6, 'refine', 'Refinement'),
(7, 'pending_approval', 'Pending Approval'),
)
LANGUAGE = Choices(
('xx', 'xx', gettext_noop('Unknown')),
('af', 'af', gettext_noop('Afrikaans')),
('ar', 'ar', gettext_noop('Arabic')),
('ast', 'ast', gettext_noop('Asturian')),
('az', 'az', gettext_noop('Azerbaijani')),
('bg', 'bg', gettext_noop('Bulgarian')),
('be', 'be', gettext_noop('Belarusian')),
('bn', 'bn', gettext_noop('Bengali')),
('br', 'br', gettext_noop('Breton')),
('bs', 'bs', gettext_noop('Bosnian')),
('ca', 'ca', gettext_noop('Catalan')),
('cs', 'cs', gettext_noop('Czech')),
('cy', 'cy', gettext_noop('Welsh')),
('da', 'da', gettext_noop('Danish')),
('de', 'de', gettext_noop('German')),
('el', 'el', gettext_noop('Greek')),
('en', 'en', gettext_noop('English')),
('eo', 'eo', gettext_noop('Esperanto')),
('es', 'es', gettext_noop('Spanish')),
('et', 'et', gettext_noop('Estonian')),
('eu', 'eu', gettext_noop('Basque')),
('fa', 'fa', gettext_noop('Persian')),
('fi', 'fi', gettext_noop('Finnish')),
('fil', 'fil', gettext_noop('Filipino')),
('fr', 'fr', gettext_noop('French')),
('fy', 'fy', gettext_noop('Frisian')),
('ga', 'ga', gettext_noop('Irish')),
('gd', 'gd', gettext_noop('Scottish Gaelic')),
('gl', 'gl', gettext_noop('Galician')),
('he', 'he', gettext_noop('Hebrew')),
('hi', 'hi', gettext_noop('Hindi')),
('hr', 'hr', gettext_noop('Croatian')),
('hu', 'hu', gettext_noop('Hungarian')),
('ia', 'ia', gettext_noop('Interlingua')),
('id', 'id', gettext_noop('Indonesian')),
('io', 'io', gettext_noop('Ido')),
('is', 'is', gettext_noop('Icelandic')),
('it', 'it', gettext_noop('Italian')),
('ja', 'ja', gettext_noop('Japanese')),
('ka', 'ka', gettext_noop('Georgian')),
('kk', 'kk', gettext_noop('Kazakh')),
('km', 'km', gettext_noop('Khmer')),
('kn', 'kn', gettext_noop('Kannada')),
('ko', 'ko', gettext_noop('Korean')),
('lb', 'lb', gettext_noop('Luxembourgish')),
('lt', 'lt', gettext_noop('Lithuanian')),
('lv', 'lv', gettext_noop('Latvian')),
('mk', 'mk', gettext_noop('Macedonian')),
('ml', 'ml', gettext_noop('Malayalam')),
('mn', 'mn', gettext_noop('Mongolian')),
('mr', 'mr', gettext_noop('Marathi')),
('my', 'my', gettext_noop('Burmese')),
('nb', 'nb', gettext_noop('Norwegian Bokmal')),
('ne', 'ne', gettext_noop('Nepali')),
('nl', 'nl', gettext_noop('Dutch')),
('nn', 'nn', gettext_noop('Norwegian Nynorsk')),
('or', 'or', gettext_noop('Odia')),
('os', 'os', gettext_noop('Ossetic')),
('pa', 'pa', gettext_noop('Punjabi')),
('pl', 'pl', gettext_noop('Polish')),
('pt', 'pt', gettext_noop('Portuguese')),
('ro', 'ro', gettext_noop('Romanian')),
('ru', 'ru', gettext_noop('Russian')),
('sk', 'sk', gettext_noop('Slovak')),
('sl', 'sl', gettext_noop('Slovenian')),
('sq', 'sq', gettext_noop('Albanian')),
('sr', 'sr', gettext_noop('Serbian')),
('sv', 'sv', gettext_noop('Swedish')),
('sw', 'sw', gettext_noop('Swahili')),
('ta', 'ta', gettext_noop('Tamil')),
('te', 'te', gettext_noop('Telugu')),
('th', 'th', gettext_noop('Thai')),
('tr', 'tr', gettext_noop('Turkish')),
('tt', 'tt', gettext_noop('Tatar')),
('udm', 'udm', gettext_noop('Udmurt')),
('uk', 'uk', gettext_noop('Ukrainian')),
('ur', 'ur', gettext_noop('Urdu')),
('vi', 'vi', gettext_noop('Vietnamese')),
('zh', 'zh', gettext_noop('Mandarin')),
('zho', 'zho', gettext_noop('Chinese')),
)
doi = models.CharField(max_length=256, null=True, blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name="variants")
version = models.PositiveIntegerField(default=1)
lang = models.CharField(max_length=8, choices=LANGUAGE, default='en')
name = models.CharField(max_length=2048)
owner = models.ForeignKey(User, related_name='courses')
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.proposed)
phase = models.PositiveIntegerField(choices=PHASE, default=PHASE.onboarding)
is_featured = models.BooleanField(default=False)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, blank=True)
url = models.CharField(max_length=1024, null=True, blank=True)
pdf = models.CharField(max_length=1024, null=True, blank=True)
type = models.CharField(max_length=256, null=True, blank=True)
publisher = models.CharField(max_length=2048, null=True, blank=True)
published_on = models.DateField(null=True, blank=True)
authors = models.CharField(max_length=4096, null=True, blank=True)
pages = models.CharField(max_length=64, null=True, blank=True)
# minimum requirements
num_presentation = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_graphics = models.PositiveIntegerField(default=2, validators=[MinValueValidator(0)])
num_scripting = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_audio = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_dri = models.PositiveIntegerField(default=2)
# available
num_presentation_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_graphics_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_scripting_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_audio_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_dri_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
# presentation
gid = models.CharField(max_length=256, null=True, blank=True)
# questionnaire
qid = models.CharField(max_length=256, null=True, blank=True)
# slack
cid = models.CharField(max_length=256, null=True, blank=True)
channel = models.CharField(max_length=256, null=True, blank=True)
# youtube
yid = models.CharField(max_length=256, null=True, blank=True)
# timelines
in_progress_at = models.DateTimeField(null=True, blank=True)
# phase timelines
reading_at = models.DateTimeField(null=True, blank=True)
discussion_at = models.DateTimeField(null=True, blank=True)
slides_at = models.DateTimeField(null=True, blank=True)
peer_review_at = models.DateTimeField(null=True, blank=True)
audio_at = models.DateTimeField(null=True, blank=True)
refine_at = models.DateTimeField(null=True, blank=True)
pending_approval_at = models.DateTimeField(null=True, blank=True)
approved_at = models.DateTimeField(null=True, blank=True)
tracker = FieldTracker(fields=['status', 'phase'])
objects = models.Manager()
requirements = CourseQuerySet.as_manager()
class Meta:
unique_together = ('name', 'lang', 'version')
def __unicode__(self):
return "%s [%d slides][%s]" % (self.name, self.slides.count(), self.lang)
def __str__(self):
return "%s [%d slides][%s]" % (self.name, self.slides.count(), self.lang)
@property
def phase_display(self):
return Course.PHASE[self.phase]
@property
def status_display(self):
return Course.STATUS[self.status]
@property
def youtube_display(self):
if self.yid == '' or self.yid is None:
return ''
return 'https://youtu.be/%s' % self.yid
@property
def category_display(self):
if self.category is not None:
return self.category.name
return 'General'
@property
def lang_display(self):
if self.lang is not None:
return Course.LANGUAGE[self.lang]
return 'Unknown'
@property
def num_presentation_required(self):
required = self.num_presentation - self.num_presentation_actual
if required >= 0:
return required
return 0
@property
def num_graphics_required(self):
required = self.num_graphics - self.num_graphics_actual
if required >= 0:
return required
return 0
@property
def num_scripting_required(self):
required = self.num_scripting - self.num_scripting_actual
if required >= 0:
return required
return 0
@property
def num_audio_required(self):
required = self.num_audio - self.num_audio_actual
if required >= 0:
return required
return 0
@property
def num_dri_required(self):
required = self.num_dri - self.num_dri_actual
if required >= 0:
return required
return 0
@property
def total_members(self):
return self.members.count()
def get_video_url(self):
video_url = get_video_path(self.id)
if video_url is not None:
return '<a href="%s" target="_blank">Video</a>' % video_url
# u'<video width="320" height="240" controls><source src="%s" type="video/mp4">Your browser does not support the video tag.</video>' % video_url
return None
get_video_url.short_description = 'Video'
get_video_url.allow_tags = True
@property
def video_url(self):
video_url = None
if self.id is not None:
video_url = get_video_path(self.id)
return video_url
def total_slides(self):
if self.slides is not None:
return self.slides.count()
return 0
total_slides.short_description = 'Total Slides'
def pending_slides(self):
if self.slides is not None:
return self.slides.filter(status=Slide.STATUS.pending_approval).count()
return 0
pending_slides.short_description = 'Pending Approval'
def delete(self, using=None, keep_parents=False):
if self.pk is not None and len(str(self.pk)) > 0:
folder = os.path.join(settings.MEDIA_ROOT, '%d' % self.pk)
try:
clear_folder(folder)
except:
pass
super(Course, self).delete()
def make_in_progress(self):
try:
if self.id is not None:
# create questionnaire
if self.qid is None:
response = copy_file(self.id, settings.QUESTIONNAIRE_TEMPLATE, self.name)
writable_permissions(response['id'])
self.qid = response['id']
# self.save()
# copy_file(self.id, settings.WORKFLOW_TEMPLATE, self.name)
# create presentation
if self.gid is None:
response = copy_file(self.id, settings.PRESENTATION_TEMPLATE, self.name)
writable_permissions(response['id'])
self.gid = response['id']
# self.save()
try:
# create slack channel
if self.cid is None:
slack = Slack()
result = slack.create_channel('%s%d' % (self.category.shortcode.lower(), self.id))
print result
if 'ok' in result and result['ok']:
self.channel = result['channel']['name']
self.cid = result['channel']['id']
except Exception as es:
print "slack"
print es
except Exception as e:
# todo call sentry
print "error while changing status to progress"
print e
pre_save.connect(update_course_status_phase, sender=Course)
class CourseMember(TimeStampable):
EXPERTISE = Choices(
(1, 'novice', 'Novice'),
(2, 'primary', 'Primary'),
(3, 'medium', 'Medium'),
(4, 'advanced', 'Advanced'),
(5, 'expert', 'Expert'),
)
TIMEZONES = [(str(i), str(i)) for i in pytz.all_timezones]
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='members')
member = models.ForeignKey(User, related_name='course_contributions')
expertise = models.PositiveIntegerField(choices=EXPERTISE, default=EXPERTISE.novice)
timezone = models.CharField(max_length=128, choices=TIMEZONES, blank=True, null=True)
time_commitment = models.PositiveIntegerField(default=0) # hours per week
presentation = models.BooleanField(default=False)
graphics = models.BooleanField(default=False)
scripting = models.BooleanField(default=False)
audio = models.BooleanField(default=False)
dri = models.BooleanField(default=False)
been_dri = models.BooleanField(default=False)
# field for actual selection
is_dri = models.BooleanField(default=False)
class Meta:
unique_together = ('course', 'member')
def __str__(self):
return '%s - %s'% (self.course.name, self.member.username)
pre_delete.connect(update_course_counters, sender=CourseMember)
class Slide(TimeStampable):
STATUS = Choices(
(0, 'draft', 'Draft'),
(1, 'in_progress', 'In Progress'),
(2, 'pending_approval', 'Pending Approval'),
(3, 'published', 'Published')
)
gid = models.CharField(max_length=256, null=True, blank=True)
version = models.PositiveIntegerField(default=1)
image = models.ImageField(upload_to=get_image_path, null=True, blank=True)
audio = models.FileField(upload_to=get_audio_path, null=True, blank=True)
notes = models.TextField(max_length=5000, null=True, blank=True)
position = models.PositiveIntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='slides')
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.draft)
assigned_to = models.ForeignKey(User, related_name='slides', null=True, blank=True)
class Meta:
ordering = ['position']
def __str__(self):
return '%s %d' % (self.course.name, self.position)
@property
def status_text(self):
return self.STATUS[self.status]
# @property
def image_url(self):
return u'<img src="%s" style="max-width:250px;max-height:250px" />' % self.image.url
image_url.short_description = 'Image'
image_url.allow_tags = True
# @property
def audio_url(self):
return u'<audio controls src="%s" style="max-width:200px;" />' % self.audio.url
audio_url.short_description = 'Audio'
audio_url.allow_tags = True
class SlideReview(TimeStampable):
STATUS = Choices(
(1, 'proposed', 'Proposed'),
(2, 'resolved', 'Resolved')
)
STAGE = Choices(
(1, 'peer_review', 'Peer Review'),
(2, 'refine', 'Refine')
)
slide = models.ForeignKey(Slide, on_delete=models.CASCADE, related_name='reviews')
feedback = models.TextField(max_length=5000, null=True, blank=True)
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.proposed)
stage = models.PositiveIntegerField(choices=STAGE, default=STAGE.peer_review)
user = models.ForeignKey(User, related_name='reviews', null=True, blank=True)
def __str__(self):
return '%s %s' % (self.slide.course.name, self.STAGE[self.stage])
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.go import register
from pants.contrib.go.subsystems.fetchers import Fetcher, Fetchers
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_buildgen import GoBuildgen, GoTargetGenerator
class FakeFetcher(Fetcher):
def root(self, import_path):
return 'pantsbuild.org/fake'
def fetch(self, import_path, dest, rev=None):
raise AssertionError('No fetches should be executed during go.buildgen')
class GoBuildgenTest(TaskTestBase):
@classmethod
def task_type(cls):
return GoBuildgen
@property
def alias_groups(self):
# Needed for test_stitch_deps_remote_existing_rev_respected which re-loads a synthetic target
# from a generated BUILD file on disk that needs access to Go target aliases
return register.build_file_aliases()
def test_noop_no_targets(self):
context = self.context()
task = self.create_task(context)
task.execute()
self.assertEqual([], context.targets())
def test_noop_no_applicable_targets(self):
context = self.context(target_roots=[self.make_target(':a', Target)])
expected = context.targets()
task = self.create_task(context)
task.execute()
self.assertEqual(expected, context.targets())
def test_no_local_roots_failure(self):
context = self.context(target_roots=[self.make_target('src/go/fred', GoBinary)])
task = self.create_task(context)
with self.assertRaises(task.NoLocalRootsError):
task.execute()
def test_multiple_local_roots_failure(self):
self.create_dir('src/go')
self.create_dir('src/main/go')
context = self.context(target_roots=[self.make_target('src/go/fred', GoBinary)])
task = self.create_task(context)
with self.assertRaises(task.InvalidLocalRootsError):
task.execute()
def test_unrooted_failure(self):
self.create_dir('src/go')
context = self.context(target_roots=[self.make_target('src2/go/fred', GoBinary)])
task = self.create_task(context)
with self.assertRaises(task.UnrootedLocalSourceError):
task.execute()
def test_multiple_remote_roots_failure(self):
self.create_dir('3rdparty/go')
self.create_dir('src/go')
self.create_dir('src/go_remote')
context = self.context(target_roots=[self.make_target('src/go/fred', GoLibrary)])
task = self.create_task(context)
with self.assertRaises(task.InvalidRemoteRootsError):
task.execute()
def test_existing_targets_wrong_type(self):
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
import "fmt"
func main() {
fmt.Printf("Hello World!")
}
"""))
context = self.context(target_roots=[self.make_target('src/go/fred', GoLibrary)])
task = self.create_task(context)
with self.assertRaises(task.GenerationError) as exc:
task.execute()
self.assertEqual(GoTargetGenerator.WrongLocalSourceTargetTypeError, type(exc.exception.cause))
def test_noop_applicable_targets_simple(self):
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
import "fmt"
func main() {
fmt.Printf("Hello World!")
}
"""))
context = self.context(target_roots=[self.make_target('src/go/fred', GoBinary)])
expected = context.targets()
task = self.create_task(context)
task.execute()
self.assertEqual(expected, context.targets())
def test_noop_applicable_targets_complete_graph(self):
self.create_file(relpath='src/go/jane/bar.go', contents=dedent("""
package jane
var PublicConstant = 42
"""))
jane = self.make_target('src/go/jane', GoLibrary)
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
import (
"fmt"
"jane"
)
func main() {
fmt.Printf("Hello %s!", jane.PublicConstant)
}
"""))
fred = self.make_target('src/go/fred', GoBinary, dependencies=[jane])
context = self.context(target_roots=[fred])
expected = context.targets()
task = self.create_task(context)
task.execute()
self.assertEqual(expected, context.targets())
def stitch_deps_local(self, materialize):
self.set_options(materialize=materialize)
if materialize:
# We need physical directories on disk for `--materialize` since it does scans.
self.create_dir('src/go')
self.create_file(relpath='src/go/jane/bar.go', contents=dedent("""
package jane
var PublicConstant = 42
"""))
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
import (
"fmt"
"jane"
)
func main() {
fmt.Printf("Hello %s!", jane.PublicConstant)
}
"""))
if materialize:
# We need physical BUILD files on disk for `--materialize` since it does scans.
self.add_to_build_file('src/go/fred', 'go_binary()')
fred = self.target('src/go/fred')
target_roots = None
else:
fred = self.make_target('src/go/fred', GoBinary)
target_roots = [fred]
context = self.context(target_roots=target_roots)
pre_execute_files = self.buildroot_files()
task = self.create_task(context)
task.execute()
jane = self.target('src/go/jane')
self.assertIsNotNone(jane)
self.assertEqual([jane], fred.dependencies)
self.assertEqual({jane, fred}, set(self.build_graph.targets()))
return pre_execute_files
def test_stitch_deps(self):
pre_execute_files = self.stitch_deps_local(materialize=False)
self.assertEqual(pre_execute_files, self.buildroot_files())
def test_stitch_deps_generate_builds(self):
pre_execute_files = self.stitch_deps_local(materialize=True)
self.assertEqual({'src/go/jane/BUILD'}, self.buildroot_files() - pre_execute_files)
def test_stitch_deps_generate_builds_custom_extension(self):
self.set_options(extension='.gen')
pre_execute_files = self.stitch_deps_local(materialize=True)
# NB: The src/go/fred/BUILD file on disk was deleted and replaced with src/go/fred/BUILD.gen.
self.assertEqual({'src/go/fred/BUILD.gen', 'src/go/jane/BUILD.gen'},
self.buildroot_files() - pre_execute_files)
def stitch_deps_remote(self, remote=True, materialize=False, fail_floating=False):
self.set_options(remote=remote, materialize=materialize, fail_floating=fail_floating)
self.set_options_for_scope(Fetchers.options_scope,
mapping={r'pantsbuild.org/.*':
'{}.{}'.format(FakeFetcher.__module__,
FakeFetcher.__name__)})
if materialize:
# We need physical directories on disk for `--materialize` since it does scans.
self.create_dir('3rdparty/go')
self.create_dir('src/go')
self.create_file(relpath='src/go/jane/bar.go', contents=dedent("""
package jane
import "pantsbuild.org/fake/prod"
var PublicConstant = prod.DoesNotExistButWeShouldNotCareWhenCheckingDepsAndNotInstalling
"""))
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
import (
"fmt"
"jane"
)
func main() {
fmt.Printf("Hello %s!", jane.PublicConstant)
}
"""))
if materialize:
# We need physical BUILD files on disk for `--materialize` since it does a scan.
self.add_to_build_file('src/go/fred', 'go_binary()')
fred = self.target('src/go/fred')
target_roots = None
else:
fred = self.make_target('src/go/fred', GoBinary)
target_roots = [fred]
context = self.context(target_roots=target_roots)
pre_execute_files = self.buildroot_files()
task = self.create_task(context)
task.execute()
jane = self.target('src/go/jane')
self.assertIsNotNone(jane)
self.assertEqual([jane], fred.dependencies)
prod = self.target('3rdparty/go/pantsbuild.org/fake:prod')
self.assertIsNotNone(prod)
self.assertEqual([prod], jane.dependencies)
self.assertEqual({prod, jane, fred}, set(self.build_graph.targets()))
return pre_execute_files
def test_stitch_deps_remote(self):
self.create_dir('3rdparty/go')
pre_execute_files = self.stitch_deps_remote(materialize=False)
self.assertEqual(pre_execute_files, self.buildroot_files())
def test_stitch_deps_remote_unused(self):
# An unused remote lib
self.add_to_build_file('3rdparty/go/github.com/user/repo', 'go_remote_library()')
pre_execute_files = self.stitch_deps_remote(materialize=False)
# Check the unused remote lib was not deleted since we can't know if it was actually unused or
# a transitive dep of a used remote_lib.
self.assertIn('3rdparty/go/github.com/user/repo/BUILD', self.buildroot_files())
self.assertEqual(pre_execute_files, self.buildroot_files())
def test_stitch_deps_remote_existing_rev_respected(self):
self.make_target('3rdparty/go/pantsbuild.org/fake:prod',
GoRemoteLibrary,
pkg='prod',
rev='v1.2.3')
pre_execute_files = self.stitch_deps_remote(materialize=True)
self.build_graph.reset() # Force targets to be loaded off disk
self.assertEqual('v1.2.3', self.target('3rdparty/go/pantsbuild.org/fake:prod').rev)
self.assertEqual({'src/go/jane/BUILD', '3rdparty/go/pantsbuild.org/fake/BUILD'},
self.buildroot_files() - pre_execute_files)
def test_stitch_deps_remote_generate_builds(self):
pre_execute_files = self.stitch_deps_remote(materialize=True)
self.assertEqual({'src/go/jane/BUILD', '3rdparty/go/pantsbuild.org/fake/BUILD'},
self.buildroot_files() - pre_execute_files)
def test_stitch_deps_remote_disabled_fails(self):
self.create_dir('3rdparty/go')
with self.assertRaises(GoBuildgen.GenerationError) as exc:
self.stitch_deps_remote(remote=False)
self.assertEqual(GoTargetGenerator.NewRemoteEncounteredButRemotesNotAllowedError,
type(exc.exception.cause))
def test_fail_floating(self):
with self.assertRaises(GoBuildgen.FloatingRemoteError):
self.stitch_deps_remote(remote=True, materialize=True, fail_floating=True)
def test_issues_2395(self):
# Previously, when a remote was indirectly discovered via a scan of locals (no target roots
# presented on the CLI), the remote would be queried for from the build graph under the
# erroneous assumption it had been injected. This would result in a graph miss (BUILD file was
# there on disk, but never loaded via injection) and lead to creation of a new synthetic remote
# target with no rev. The end result was lossy go remote library rev values when using the
# newer, encouraged, target-less invocation of GoBuildgen.
self.set_options(remote=True, materialize=True, fail_floating=True)
self.set_options_for_scope(Fetchers.options_scope,
mapping={r'pantsbuild.org/.*':
'{}.{}'.format(FakeFetcher.__module__,
FakeFetcher.__name__)})
self.add_to_build_file(relpath='3rdparty/go/pantsbuild.org/fake',
target='go_remote_library(rev="v4.5.6")')
self.create_file(relpath='src/go/jane/bar.go', contents=dedent("""
package jane
import "pantsbuild.org/fake"
var PublicConstant = fake.DoesNotExistButWeShouldNotCareWhenCheckingDepsAndNotInstalling
"""))
self.add_to_build_file(relpath='src/go/jane', target='go_library()')
context = self.context(target_roots=[])
pre_execute_files = self.buildroot_files()
task = self.create_task(context)
task.execute()
self.build_graph.reset() # Force targets to be loaded off disk
self.assertEqual('v4.5.6', self.target('3rdparty/go/pantsbuild.org/fake').rev)
self.assertEqual(pre_execute_files, self.buildroot_files())
def test_issues_2616(self):
self.set_options(remote=False)
self.create_file(relpath='src/go/jane/bar.go', contents=dedent("""
package jane
var PublicConstant = 42
"""))
self.create_file(relpath='src/go/fred/foo.go', contents=dedent("""
package main
/*
#include <stdlib.h>
*/
import "C" // C was erroneously categorized as a remote lib in issue 2616.
import (
"fmt"
"jane"
)
func main() {
fmt.Printf("Hello %s!", jane.PublicConstant)
fmt.Printf("Random from C: %d", int(C.random()))
}
"""))
fred = self.make_target('src/go/fred', GoBinary)
context = self.context(target_roots=[fred])
task = self.create_task(context)
task.execute()
jane = self.target('src/go/jane')
self.assertIsNotNone(jane)
self.assertEqual([jane], fred.dependencies)
self.assertEqual({jane, fred}, set(self.build_graph.targets()))
| |
"""Module containing the base class for all pyleus components and a wrapper
class around Storm configurations.
"""
from __future__ import absolute_import
import argparse
from collections import deque
import logging
import logging.config
import os
import sys
import traceback
try:
import simplejson as json
_ = json # pyflakes
except ImportError:
import json
from pyleus.storm import DEFAULT_STREAM
from pyleus.storm import LOG_TRACE
from pyleus.storm import LOG_DEBUG
from pyleus.storm import LOG_INFO
from pyleus.storm import LOG_WARN
from pyleus.storm import LOG_ERROR
from pyleus.storm import StormTuple
from pyleus.storm.serializers.msgpack_serializer import MsgpackSerializer
from pyleus.storm.serializers.json_serializer import JSONSerializer
# Please keeep in sync with java TopologyBuilder
DESCRIBE_OPT = "--describe"
COMPONENT_OPTIONS_OPT = "--options"
PYLEUS_CONFIG_OPT = "--pyleus-config"
DEFAULT_LOGGING_CONFIG_PATH = "pyleus_logging.conf"
JSON_SERIALIZER = "json"
MSGPACK_SERIALIZER = "msgpack"
SERIALIZERS = {
JSON_SERIALIZER: JSONSerializer,
MSGPACK_SERIALIZER: MsgpackSerializer,
}
log = logging.getLogger(__name__)
def _is_namedtuple(obj):
return (type(obj) is type and
issubclass(obj, tuple) and
hasattr(obj, "_fields"))
def _serialize(obj):
"""Given a list, a tuple or a namedtuple, return it as a list. In case of
None, simply return None.
"""
if obj is None:
return None
# obj is a namedtuple "class"
elif _is_namedtuple(obj):
return list(obj._fields)
# obj is a list or a tuple
return list(obj)
def _expand_output_fields(obj):
"""Expand all allowed notations for defining OUTPUT_FIELDS into the
extended one.
"""
# if single-stream notation
if not isinstance(obj, dict):
return {DEFAULT_STREAM: _serialize(obj)}
# if multiple-streams notation
for key, value in obj.items():
obj[key] = _serialize(value)
return obj
class StormConfig(dict):
"""Add some convenience properites to a configuration ``dict`` from Storm.
You can access Storm configuration dictionary within a component through
``self.conf``.
"""
def __init__(self, conf):
super(StormConfig, self).__init__()
self.update(conf)
@property
def tick_tuple_freq(self):
"""Helper property to access the value of tick tuple frequency stored
in Storm configuration.
:return: tick tuple frequency for the component
:rtype: ``float`` or ``None``
.. note::
Bolts not specifying tick tuple frequency default to ``None``,
while spouts are not supposed to use tick tuples at all.
"""
return self.get("topology.tick.tuple.freq.secs")
class Component(object):
"""Base class for all pyleus components."""
COMPONENT_TYPE = None # One of "bolt", "spout"
#: ``list`` or ``dict`` of output fields for the component.
#:
#: .. note:: Specify in subclass.
#:
#: .. seealso:: :ref:`groupings`
OUTPUT_FIELDS = None
#: ``list`` of user-defined options for the component.
#:
#: .. note:: Specify in subclass.
OPTIONS = None
# Populated in Component.run()
#: ``dict`` containing options passed to component in the yaml definition
#: file.
options = None
#: :class:`~.StormConfig` containing the Storm configuration for the
#: component.
conf = None
#: ``dict`` containing the Storm context for the component.
context = None
pyleus_config = None
def __init__(self, input_stream=None, output_stream=None):
"""The Storm component will parse the command line in order
to figure out if it has been queried for a description or for
actually running."""
super(Component, self).__init__()
if input_stream is None:
input_stream = sys.stdin
if output_stream is None:
output_stream = sys.stdout
self._input_stream = input_stream
self._output_stream = output_stream
self._pending_commands = deque()
self._pending_taskids = deque()
self._serializer = None
def describe(self):
"""Print to stdout a JSON description of the component.
The java TopologyBuilder will use the JSON descrption for topology
cofiguration and validation.
"""
print(json.dumps({
"component_type": self.COMPONENT_TYPE,
"output_fields": _expand_output_fields(self.OUTPUT_FIELDS),
"options": _serialize(self.OPTIONS)}))
def initialize_logging(self):
"""Load logging configuration file from command line configuration (if
provided) and initialize logging for the component.
"""
logging_config_path = self.pyleus_config.get('logging_config_path')
if logging_config_path:
logging.config.fileConfig(logging_config_path)
elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):
logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)
def initialize_serializer(self):
"""Load serializer type from command line configuration and instantiate
the associated
:class:`~pyleus.storm.serializers.serializer.Serializer`.
"""
serializer = self.pyleus_config.get('serializer')
if serializer in SERIALIZERS:
self._serializer = SERIALIZERS[serializer](
self._input_stream, self._output_stream)
else:
raise ValueError("Unknown serializer: {0}", serializer)
def setup_component(self):
"""Storm component setup before execution. It will also
call the initialization method implemented in the subclass.
"""
self.conf, self.context = self._init_component()
self.initialize()
def initialize(self):
"""Called after component has been launched, but before processing any
tuples. You can use this method to setup your component.
.. note:: Implement in subclass.
"""
pass
def run(self):
"""Entry point for the component running logic.
Forgetting to call it as following will prevent the topology from
running.
:Example:
.. code-block:: python
if __name__ == '__main__':
MyComponent().run()
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(DESCRIBE_OPT, action="store_true", default=False)
parser.add_argument(COMPONENT_OPTIONS_OPT, default=None)
parser.add_argument(PYLEUS_CONFIG_OPT, default=None)
args = parser.parse_args()
if args.describe:
self.describe()
return
self.options = json.loads(args.options) if args.options else {}
self.pyleus_config = json.loads(args.pyleus_config) \
if args.pyleus_config else {}
try:
self.initialize_logging()
self.initialize_serializer()
self.setup_component()
self.run_component()
except:
log.exception("Exception in {0}.run".format(self.COMPONENT_TYPE))
self.error(traceback.format_exc())
def run_component(self):
"""Run the main loop of the component. Implemented in Bolt and
Spout subclasses.
"""
raise NotImplementedError
def _msg_is_command(self, msg):
"""Storm differentiates between commands and taskids by whether the
message is a ``dict`` or ``list``.
"""
return isinstance(msg, dict)
def _msg_is_taskid(self, msg):
"""..seealso:: :meth:`~._msg_is_command`"""
return isinstance(msg, list)
def read_command(self):
"""Return the next command from the input stream, whether from the
_pending_commands queue or the stream directly if the queue is empty.
In that case, queue any taskids which are received until the next
command comes in.
"""
if self._pending_commands:
return self._pending_commands.popleft()
msg = self._serializer.read_msg()
while self._msg_is_taskid(msg):
self._pending_taskids.append(msg)
msg = self._serializer.read_msg()
return msg
def read_taskid(self):
"""Like :meth:`~.read_command`, but returns the next taskid and queues
any commands received while reading the input stream.
"""
if self._pending_taskids:
return self._pending_taskids.popleft()
msg = self._serializer.read_msg()
while self._msg_is_command(msg):
self._pending_commands.append(msg)
msg = self._serializer.read_msg()
return msg
def read_tuple(self):
"""Read and parse a command into a StormTuple object."""
cmd = self.read_command()
return StormTuple(
cmd['id'], cmd['comp'], cmd['stream'], cmd['task'], cmd['tuple'])
def _create_pidfile(self, pid_dir, pid):
"""Create a file based on pid used by Storm to watch over the Python
process.
"""
open(os.path.join(pid_dir, str(pid)), 'a').close()
def _init_component(self):
"""Receive the setup_info dict from the Storm task and report back with
our pid; also touch a pidfile in the pidDir specified in setup_info.
"""
setup_info = self._serializer.read_msg()
pid = os.getpid()
self._serializer.send_msg({'pid': pid})
self._create_pidfile(setup_info['pidDir'], pid)
return StormConfig(setup_info['conf']), setup_info['context']
def send_command(self, command, opts_dict=None):
"""Merge command with options and send the message through
:class:`~pyleus.storm.serializers.serializer.Serializer`
"""
if opts_dict is not None:
command_dict = dict(opts_dict)
command_dict['command'] = command
else:
command_dict = dict(command=command)
self._serializer.send_msg(command_dict)
def log(self, msg, level=LOG_INFO):
"""Send a log message.
:param msg: log message
:type msg: ``str``
:param level:
log levels defined as constants in :mod:`pyleus.storm`.
Allowed: ``LOG_TRACE``, ``LOG_DEBUG``, ``LOG_INFO``, ``LOG_WARN``,
``LOG_ERROR``. Default: ``LOG_INFO``
:type stream: ``int``
"""
self.send_command('log', {
'msg': msg,
'level': level,
})
def log_trace(self, msg):
"""Send a log message with level LOG_TRACE.
:param msg: log message
:type msg: ``str``
"""
self.log(msg, level=LOG_TRACE)
def log_debug(self, msg):
"""Send a log message with level LOG_DEBUG.
:param msg: log message
:type msg: ``str``
"""
self.log(msg, level=LOG_DEBUG)
def log_info(self, msg):
"""Send a log message with level LOG_INFO.
:param msg: log message
:type msg: ``str``
"""
self.log(msg, level=LOG_INFO)
def log_warn(self, msg):
"""Send a log message with level LOG_WARN.
:param msg: log message
:type msg: ``str``
"""
self.log(msg, level=LOG_WARN)
def log_error(self, msg):
"""Send a log message with level LOG_ERROR.
:param msg: log message
:type msg: ``str``
"""
self.log(msg, level=LOG_ERROR)
def error(self, msg):
"""Send an error message.
:param msg: error message
:type msg: ``str``
"""
self.send_command('error', {
'msg': msg,
})
| |
# Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import errno
import functools
import hashlib
import multiprocessing
import multiprocessing.dummy
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import weakref
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from tensorflow.python.framework import ops
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.util.tf_export import keras_export
try:
import queue
except ImportError:
import Queue as queue
try:
import typing
is_iterator = lambda x: isinstance(x, typing.Iterator)
except ImportError:
# Python2 uses next, and Python3 should have typing so __next__ is not needed.
is_iterator = lambda x: hasattr(x, '__iter__') and hasattr(x, 'next')
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (ops.Tensor, np.ndarray) + builtin_iterators):
return False
return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or is_iterator(x)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == 'sha256':
return hashlib.sha256()
if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter(object):
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
import mock
import pytest
from datetime import datetime
from django.core.exceptions import SuspiciousOperation
from uuid import UUID
from sentry.coreapi import (
APIError,
APIUnauthorized,
Auth,
ClientApiHelper,
InvalidFingerprint,
InvalidTimestamp,
get_interface,
CspApiHelper,
APIForbidden,
)
from sentry.testutils import TestCase
class BaseAPITest(TestCase):
helper_cls = ClientApiHelper
def setUp(self):
self.user = self.create_user('coreapi@example.com')
self.team = self.create_team(name='Foo')
self.project = self.create_project(team=self.team)
self.pk = self.project.key_set.get_or_create()[0]
self.helper = self.helper_cls(agent='Awesome Browser', ip_address='198.51.100.0')
class AuthFromRequestTest(BaseAPITest):
def test_valid(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_missing_space(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_ignore_case(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'SeNtRy sentry_key=value, biz=baz'}
request.GET = {}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_invalid_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_legacy_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_AUTHORIZATION': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_header_bad_token(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentryfoo'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_invalid_header_missing_pair(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry foo'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_invalid_malformed_value(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,,biz=baz'}
request.GET = {}
with self.assertRaises(APIUnauthorized):
self.helper.auth_from_request(request)
def test_multiple_auth_suspicious(self):
request = mock.Mock()
request.GET = {'sentry_version': '1', 'foo': 'bar'}
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
with pytest.raises(SuspiciousOperation):
self.helper.auth_from_request(request)
class ProjectIdFromAuthTest(BaseAPITest):
def test_invalid_if_missing_key(self):
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, Auth({}))
def test_valid_with_key(self):
auth = Auth({'sentry_key': self.pk.public_key})
result = self.helper.project_id_from_auth(auth)
self.assertEquals(result, self.project.id)
def test_invalid_key(self):
auth = Auth({'sentry_key': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
def test_invalid_secret(self):
auth = Auth({'sentry_key': self.pk.public_key, 'sentry_secret': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
def test_nonascii_key(self):
auth = Auth({'sentry_key': '\xc3\xbc'})
self.assertRaises(APIUnauthorized, self.helper.project_id_from_auth, auth)
class ProcessFingerprintTest(BaseAPITest):
def test_invalid_as_string(self):
self.assertRaises(
InvalidFingerprint, self.helper._process_fingerprint, {
'fingerprint': '2012-01-01T10:30:45',
}
)
def test_invalid_component(self):
self.assertRaises(
InvalidFingerprint, self.helper._process_fingerprint, {
'fingerprint': ['foo', ['bar']],
}
)
def simple(self):
data = self.helper._process_fingerprint({
'fingerprint': ['{{default}}', 1, 'bar', 4.5],
})
self.assertTrue('fingerprint' in data)
self.assertEquals(data['fingerprint'], ['{{default}}', '1', 'bar', '4.5'])
class ProcessDataTimestampTest(BaseAPITest):
def test_iso_timestamp(self):
d = datetime(2012, 1, 1, 10, 30, 45)
data = self.helper._process_data_timestamp(
{
'timestamp': '2012-01-01T10:30:45'
}, current_datetime=d
)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_iso_timestamp_with_ms(self):
d = datetime(2012, 1, 1, 10, 30, 45, 434000)
data = self.helper._process_data_timestamp(
{
'timestamp': '2012-01-01T10:30:45.434'
}, current_datetime=d
)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_timestamp_iso_timestamp_with_Z(self):
d = datetime(2012, 1, 1, 10, 30, 45)
data = self.helper._process_data_timestamp(
{
'timestamp': '2012-01-01T10:30:45Z'
}, current_datetime=d
)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_invalid_timestamp(self):
self.assertRaises(
InvalidTimestamp, self.helper._process_data_timestamp, {'timestamp': 'foo'}
)
def test_invalid_numeric_timestamp(self):
self.assertRaises(
InvalidTimestamp, self.helper._process_data_timestamp,
{'timestamp': '100000000000000000000.0'}
)
def test_future_timestamp(self):
self.assertRaises(
InvalidTimestamp, self.helper._process_data_timestamp,
{'timestamp': '2052-01-01T10:30:45Z'}
)
def test_long_microseconds_value(self):
d = datetime(2012, 1, 1, 10, 30, 45)
data = self.helper._process_data_timestamp(
{
'timestamp': '2012-01-01T10:30:45.341324Z'
}, current_datetime=d
)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
class ValidateDataTest(BaseAPITest):
def test_missing_project_id(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
})
assert data['project'] == self.project.id
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_empty_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {
'event_id': '',
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_missing_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_invalid_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {
'event_id': 'a' * 33,
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'event_id'
assert data['errors'][0]['value'] == 'a' * 33
data = self.helper.validate_data(self.project, {
'event_id': 'xyz',
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'event_id'
assert data['errors'][0]['value'] == 'xyz'
def test_invalid_event_id_raises(self):
self.assertRaises(APIError, self.helper.validate_data, self.project, {'event_id': 1})
def test_unknown_attribute(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'foo': 'bar',
})
assert 'foo' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo'
def test_invalid_interface_name(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'foo.baz': 'bar',
})
assert 'foo.baz' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo.baz'
def test_invalid_interface_import_path(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'sentry.interfaces.Exception2': 'bar',
}
)
assert 'sentry.interfaces.Exception2' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'sentry.interfaces.Exception2'
def test_does_expand_list(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'exception':
[{
'type': 'ValueError',
'value': 'hello world',
'module': 'foo.bar',
}]
}
)
assert 'sentry.interfaces.Exception' in data
def test_log_level_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'level': 'error',
})
assert data['level'] == 40
def test_invalid_log_level(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'level': 'foobar',
})
assert data['level'] == 40
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'level'
assert data['errors'][0]['value'] == 'foobar'
def test_tags_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'tags': 'bar',
})
assert 'tags' not in data
def test_tags_with_spaces(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'tags': {
'foo bar': 'baz bar'
},
}
)
assert data['tags'] == [('foo-bar', 'baz bar')]
def test_tags_out_of_bounds(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'tags': {
'f' * 33: 'value',
'foo': 'v' * 201,
'bar': 'value'
},
}
)
assert data['tags'] == [('bar', 'value')]
assert len(data['errors']) == 2
def test_tags_as_invalid_pair(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'tags': [('foo', 'bar'), ('biz', 'baz', 'boz')],
}
)
assert data['tags'] == [('foo', 'bar')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('biz', 'baz', 'boz')
def test_reserved_tags(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'tags': [('foo', 'bar'), ('release', 'abc123')],
}
)
assert data['tags'] == [('foo', 'bar')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('release', 'abc123')
def test_tag_value(self):
data = self.helper.validate_data(
self.project, {
'message': 'foo',
'tags': [('foo', 'bar\n'), ('biz', 'baz')],
}
)
assert data['tags'] == [('biz', 'baz')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('foo', 'bar\n')
def test_extra_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'extra': 'bar',
})
assert 'extra' not in data
def test_invalid_culprit_raises(self):
self.assertRaises(APIError, self.helper.validate_data, self.project, {'culprit': 1})
def test_release_too_long(self):
data = self.helper.validate_data(self.project, {
'release': 'a' * 65,
})
assert not data.get('release')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'release'
assert data['errors'][0]['value'] == 'a' * 65
def test_release_as_non_string(self):
data = self.helper.validate_data(self.project, {
'release': 42,
})
assert data.get('release') == '42'
def test_distribution_too_long(self):
data = self.helper.validate_data(self.project, {
'release': 'a' * 62,
'dist': 'b' * 65,
})
assert not data.get('dist')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'dist'
assert data['errors'][0]['value'] == 'b' * 65
def test_distribution_bad_char(self):
data = self.helper.validate_data(self.project, {
'release': 'a' * 62,
'dist': '^%',
})
assert not data.get('dist')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'dist'
assert data['errors'][0]['value'] == '^%'
def test_distribution_strip(self):
data = self.helper.validate_data(self.project, {
'release': 'a' * 62,
'dist': ' foo ',
})
assert data.get('dist') == 'foo'
def test_distribution_as_non_string(self):
data = self.helper.validate_data(self.project, {
'release': '42',
'dist': 23,
})
assert data.get('release') == '42'
assert data.get('dist') == '23'
def test_distribution_no_release(self):
data = self.helper.validate_data(self.project, {
'dist': 23,
})
assert data.get('dist') is None
def test_valid_platform(self):
data = self.helper.validate_data(self.project, {
'platform': 'python',
})
assert data.get('platform') == 'python'
def test_no_platform(self):
data = self.helper.validate_data(self.project, {})
assert data.get('platform') == 'other'
def test_invalid_platform(self):
data = self.helper.validate_data(self.project, {
'platform': 'foobar',
})
assert data.get('platform') == 'other'
def test_environment_too_long(self):
data = self.helper.validate_data(self.project, {
'environment': 'a' * 65,
})
assert not data.get('environment')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'environment'
assert data['errors'][0]['value'] == 'a' * 65
def test_environment_as_non_string(self):
data = self.helper.validate_data(self.project, {
'environment': 42,
})
assert data.get('environment') == '42'
def test_time_spent_too_large(self):
data = self.helper.validate_data(self.project, {
'time_spent': 2147483647 + 1,
})
assert not data.get('time_spent')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'time_spent'
assert data['errors'][0]['value'] == 2147483647 + 1
def test_time_spent_invalid(self):
data = self.helper.validate_data(self.project, {
'time_spent': 'lol',
})
assert not data.get('time_spent')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'time_spent'
assert data['errors'][0]['value'] == 'lol'
def test_time_spent_non_int(self):
data = self.helper.validate_data(self.project, {
'time_spent': '123',
})
assert data['time_spent'] == 123
class SafelyLoadJSONStringTest(BaseAPITest):
def test_valid_payload(self):
data = self.helper.safely_load_json_string('{"foo": "bar"}')
assert data == {'foo': 'bar'}
def test_invalid_json(self):
with self.assertRaises(APIError):
self.helper.safely_load_json_string('{')
def test_unexpected_type(self):
with self.assertRaises(APIError):
self.helper.safely_load_json_string('1')
class DecodeDataTest(BaseAPITest):
def test_valid_data(self):
data = self.helper.decode_data('foo')
assert data == u'foo'
assert type(data) == six.text_type
def test_invalid_data(self):
with self.assertRaises(APIError):
self.helper.decode_data('\x99')
class GetInterfaceTest(TestCase):
def test_does_not_let_through_disallowed_name(self):
with self.assertRaises(ValueError):
get_interface('subprocess')
def test_allows_http(self):
from sentry.interfaces.http import Http
result = get_interface('sentry.interfaces.Http')
assert result is Http
result = get_interface('request')
assert result is Http
class EnsureHasIpTest(BaseAPITest):
def test_with_remote_addr(self):
inp = {
'sentry.interfaces.Http': {
'env': {
'REMOTE_ADDR': '192.168.0.1',
},
},
}
out = inp.copy()
self.helper.ensure_has_ip(out, '127.0.0.1')
assert inp == out
def test_with_user_ip(self):
inp = {
'sentry.interfaces.User': {
'ip_address': '192.168.0.1',
},
}
out = inp.copy()
self.helper.ensure_has_ip(out, '127.0.0.1')
assert inp == out
def test_with_user_auto_ip(self):
out = {
'sentry.interfaces.User': {
'ip_address': '{{auto}}',
},
}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_without_ip_values(self):
out = {
'sentry.interfaces.User': {},
'sentry.interfaces.Http': {
'env': {},
},
}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_without_any_values(self):
out = {}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_with_http_auto_ip(self):
out = {
'sentry.interfaces.Http': {
'env': {
'REMOTE_ADDR': '{{auto}}',
},
},
}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.Http']['env']['REMOTE_ADDR'] == '127.0.0.1'
def test_with_all_auto_ip(self):
out = {
'sentry.interfaces.User': {
'ip_address': '{{auto}}',
},
'sentry.interfaces.Http': {
'env': {
'REMOTE_ADDR': '{{auto}}',
},
},
}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.Http']['env']['REMOTE_ADDR'] == '127.0.0.1'
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
class CspApiHelperTest(BaseAPITest):
helper_cls = CspApiHelper
def test_validate_basic(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://google.com",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.helper.validate_data(self.project, report)
assert result['logger'] == 'csp'
assert result['project'] == self.project.id
assert result['release'] == 'abc123'
assert result['errors'] == []
assert 'message' in result
assert 'culprit' in result
assert result['tags'] == [
('effective-directive', 'img-src'),
('blocked-uri', 'http://google.com'),
]
assert result['sentry.interfaces.User'] == {'ip_address': '198.51.100.0'}
assert result['sentry.interfaces.Http'] == {
'url': 'http://45.55.25.245:8123/csp',
'headers': {
'User-Agent': 'Awesome Browser',
'Referer': 'http://example.com'
}
}
@mock.patch('sentry.interfaces.csp.Csp.to_python', mock.Mock(side_effect=Exception))
def test_validate_raises_invalid_interface(self):
with self.assertRaises(APIForbidden):
self.helper.validate_data(self.project, {})
def test_tags_out_of_bounds(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"v" * 201,
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.helper.validate_data(self.project, report)
assert result['tags'] == [
('effective-directive', 'img-src'),
]
assert len(result['errors']) == 1
def test_tag_value(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"img-src",
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://google.com\n",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.helper.validate_data(self.project, report)
assert result['tags'] == [
('effective-directive', 'img-src'),
]
assert len(result['errors']) == 1
def test_no_tags(self):
report = {
"document-uri":
"http://45.55.25.245:8123/csp",
"referrer":
"http://example.com",
"violated-directive":
"img-src https://45.55.25.245:8123/",
"effective-directive":
"v" * 201,
"original-policy":
"default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion",
"blocked-uri":
"http://google.com\n",
"status-code":
200,
"_meta": {
"release": "abc123",
}
}
result = self.helper.validate_data(self.project, report)
assert 'tags' not in result
assert len(result['errors']) == 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.