CombinedText stringlengths 4 3.42M |
|---|
from flask import render_template, redirect,url_for, g
from flask_googleauth import GoogleAuth
from app import app
auth = GoogleAuth(app)
def getEmail(user):
return user.get(u'email', None)
def isCMU(email):
return email.split('@')[1] == 'andrew.cmu.edu'
def andrewID(email):
return email.split('@')[0]
@app.route('/')
def index():
if g.user:
email = getEmail(g.user)
if isCMU(email):
return render_template("home.html", andrewID = andrewID(email))
else:
return redirect(url_for("error"))
return render_template("index.html")
@app.route('/error')
def error():
return ":("
@app.route('/home')
def home():
return render_template("home.html")
@app.route('/buy')
def buy():
return render_template("buy.html")
@app.route('/sell')
def sell():
return render_template("sell.html", user=g.user)
@app.route('/about')
def about():
return render_template("about.html")
@auth.required
@app.route('/login')
def login():
return
fix views again?
from flask import render_template, redirect,url_for, g
from flask_googleauth import GoogleAuth
from app import app
auth = GoogleAuth(app)
def getEmail(user):
return user.get(u'email', None)
def isCMU(email):
return email.split('@')[1] == 'andrew.cmu.edu'
def andrewID(email):
return email.split('@')[0]
@app.route('/')
def index():
if g.user:
email = getEmail(g.user)
if isCMU(email):
print andrewID(email)
return render_template("home.html", andrewID = andrewID(email))
else:
return redirect(url_for("error"))
return render_template("index.html")
@app.route('/error')
def error():
return ":("
@app.route('/home')
def home():
return render_template("home.html")
@app.route('/buy')
def buy():
return render_template("buy.html")
@app.route('/sell')
def sell():
return render_template("sell.html", user=g.user)
@app.route('/about')
def about():
return render_template("about.html")
@auth.required
@app.route('/login')
def login():
return
|
from sys import version_info
from django.conf import settings
from django.db.models import Exists
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models.query import Q
from django.http.response import HttpResponseBadRequest
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import FilterSet
from django_filters.rest_framework import ModelChoiceFilter
from morango.models import InstanceIDModel
from morango.models import SyncSession
from rest_framework import mixins
from rest_framework import status
from rest_framework import views
from rest_framework import viewsets
from rest_framework.response import Response
import kolibri
from .models import DevicePermissions
from .models import DeviceSettings
from .models import UserSyncStatus
from .permissions import NotProvisionedCanPost
from .permissions import UserHasAnyDevicePermissions
from .serializers import DevicePermissionsSerializer
from .serializers import DeviceProvisionSerializer
from .serializers import DeviceSettingsSerializer
from kolibri.core.api import ReadOnlyValuesViewset
from kolibri.core.auth.api import KolibriAuthPermissions
from kolibri.core.auth.api import KolibriAuthPermissionsFilter
from kolibri.core.auth.models import Collection
from kolibri.core.content.permissions import CanManageContent
from kolibri.utils.conf import OPTIONS
from kolibri.utils.server import get_urls
from kolibri.utils.server import installation_type
from kolibri.utils.system import get_free_space
from kolibri.utils.time_utils import local_now
class DevicePermissionsViewSet(viewsets.ModelViewSet):
queryset = DevicePermissions.objects.all()
serializer_class = DevicePermissionsSerializer
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
class DeviceProvisionView(viewsets.GenericViewSet):
permission_classes = (NotProvisionedCanPost,)
serializer_class = DeviceProvisionSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.save()
output_serializer = self.get_serializer(data)
return Response(output_serializer.data, status=status.HTTP_201_CREATED)
class FreeSpaceView(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (CanManageContent,)
def list(self, request):
path = request.query_params.get("path")
if path is None:
free = get_free_space()
elif path == "Content":
free = get_free_space(OPTIONS["Paths"]["CONTENT_DIR"])
else:
free = get_free_space(path)
return Response({"freespace": free})
class DeviceInfoView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request, format=None):
info = {}
info["version"] = kolibri.__version__
status, urls = get_urls()
if not urls:
# Will not return anything when running the debug server, so at least return the current URL
urls = [
request.build_absolute_uri(OPTIONS["Deployment"]["URL_PATH_PREFIX"])
]
filtered_urls = [
url for url in urls if "127.0.0.1" not in url and "localhost" not in url
]
if filtered_urls:
urls = filtered_urls
info["urls"] = urls
db_engine = settings.DATABASES["default"]["ENGINE"]
if db_engine.endswith("sqlite3"):
# Return path to .sqlite file (usually in KOLIBRI_HOME folder)
info["database_path"] = settings.DATABASES["default"]["NAME"]
elif db_engine.endswith("postgresql"):
info["database_path"] = "postgresql"
else:
info["database_path"] = "unknown"
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info["device_id"] = instance_model.id
info["os"] = instance_model.platform
info["content_storage_free_space"] = get_free_space(
OPTIONS["Paths"]["CONTENT_DIR"]
)
# This returns the localized time for the server
info["server_time"] = local_now()
# Returns the named timezone for the server (the time above only includes the offset)
info["server_timezone"] = settings.TIME_ZONE
info["installer"] = installation_type()
info["python_version"] = "{major}.{minor}.{micro}".format(
major=version_info.major, minor=version_info.minor, micro=version_info.micro
)
if not request.user.is_superuser:
# If user is not superuser, return just free space available and kolibri version
keys_to_remove = [
"urls",
"database_path",
"device_id",
"os",
"server_time",
"server_timezone",
"installer",
"python_version",
]
for key in keys_to_remove:
del info[key]
return Response(info)
class DeviceSettingsView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response(DeviceSettingsSerializer(settings).data)
def patch(self, request):
settings = DeviceSettings.objects.get()
serializer = DeviceSettingsSerializer(settings, data=request.data)
if not serializer.is_valid():
return HttpResponseBadRequest(serializer.errors)
serializer.save()
return Response(serializer.data)
class DeviceNameView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response({"name": settings.name})
def patch(self, request):
settings = DeviceSettings.objects.get()
settings.name = request.data["name"]
settings.save()
return Response({"name": settings.name})
class SyncStatusFilter(FilterSet):
member_of = ModelChoiceFilter(
method="filter_member_of", queryset=Collection.objects.all()
)
def filter_member_of(self, queryset, name, value):
return queryset.filter(
Q(user__memberships__collection=value) | Q(user__facility=value)
)
class Meta:
model = UserSyncStatus
fields = ["user", "member_of"]
class UserSyncStatusViewSet(ReadOnlyValuesViewset):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = UserSyncStatus.objects.all()
filter_class = SyncStatusFilter
values = (
"id",
"queued",
"last_synced",
"active",
"user",
"user_id",
)
def get_queryset(self):
return UserSyncStatus.objects.filter()
def annotate_queryset(self, queryset):
queryset = queryset.annotate(
last_synced=Max("sync_session__last_activity_timestamp")
)
sync_sessions = SyncSession.objects.filter(
id=OuterRef("sync_session__pk"), active=True
)
queryset = queryset.annotate(active=Exists(sync_sessions))
return queryset
Use TransferSession active for more granular indication of a sync currently being in progress.
from sys import version_info
from django.conf import settings
from django.db.models import Exists
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models.query import Q
from django.http.response import HttpResponseBadRequest
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import FilterSet
from django_filters.rest_framework import ModelChoiceFilter
from morango.models import InstanceIDModel
from morango.models import TransferSession
from rest_framework import mixins
from rest_framework import status
from rest_framework import views
from rest_framework import viewsets
from rest_framework.response import Response
import kolibri
from .models import DevicePermissions
from .models import DeviceSettings
from .models import UserSyncStatus
from .permissions import NotProvisionedCanPost
from .permissions import UserHasAnyDevicePermissions
from .serializers import DevicePermissionsSerializer
from .serializers import DeviceProvisionSerializer
from .serializers import DeviceSettingsSerializer
from kolibri.core.api import ReadOnlyValuesViewset
from kolibri.core.auth.api import KolibriAuthPermissions
from kolibri.core.auth.api import KolibriAuthPermissionsFilter
from kolibri.core.auth.models import Collection
from kolibri.core.content.permissions import CanManageContent
from kolibri.utils.conf import OPTIONS
from kolibri.utils.server import get_urls
from kolibri.utils.server import installation_type
from kolibri.utils.system import get_free_space
from kolibri.utils.time_utils import local_now
class DevicePermissionsViewSet(viewsets.ModelViewSet):
queryset = DevicePermissions.objects.all()
serializer_class = DevicePermissionsSerializer
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
class DeviceProvisionView(viewsets.GenericViewSet):
permission_classes = (NotProvisionedCanPost,)
serializer_class = DeviceProvisionSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.save()
output_serializer = self.get_serializer(data)
return Response(output_serializer.data, status=status.HTTP_201_CREATED)
class FreeSpaceView(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (CanManageContent,)
def list(self, request):
path = request.query_params.get("path")
if path is None:
free = get_free_space()
elif path == "Content":
free = get_free_space(OPTIONS["Paths"]["CONTENT_DIR"])
else:
free = get_free_space(path)
return Response({"freespace": free})
class DeviceInfoView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request, format=None):
info = {}
info["version"] = kolibri.__version__
status, urls = get_urls()
if not urls:
# Will not return anything when running the debug server, so at least return the current URL
urls = [
request.build_absolute_uri(OPTIONS["Deployment"]["URL_PATH_PREFIX"])
]
filtered_urls = [
url for url in urls if "127.0.0.1" not in url and "localhost" not in url
]
if filtered_urls:
urls = filtered_urls
info["urls"] = urls
db_engine = settings.DATABASES["default"]["ENGINE"]
if db_engine.endswith("sqlite3"):
# Return path to .sqlite file (usually in KOLIBRI_HOME folder)
info["database_path"] = settings.DATABASES["default"]["NAME"]
elif db_engine.endswith("postgresql"):
info["database_path"] = "postgresql"
else:
info["database_path"] = "unknown"
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info["device_id"] = instance_model.id
info["os"] = instance_model.platform
info["content_storage_free_space"] = get_free_space(
OPTIONS["Paths"]["CONTENT_DIR"]
)
# This returns the localized time for the server
info["server_time"] = local_now()
# Returns the named timezone for the server (the time above only includes the offset)
info["server_timezone"] = settings.TIME_ZONE
info["installer"] = installation_type()
info["python_version"] = "{major}.{minor}.{micro}".format(
major=version_info.major, minor=version_info.minor, micro=version_info.micro
)
if not request.user.is_superuser:
# If user is not superuser, return just free space available and kolibri version
keys_to_remove = [
"urls",
"database_path",
"device_id",
"os",
"server_time",
"server_timezone",
"installer",
"python_version",
]
for key in keys_to_remove:
del info[key]
return Response(info)
class DeviceSettingsView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response(DeviceSettingsSerializer(settings).data)
def patch(self, request):
settings = DeviceSettings.objects.get()
serializer = DeviceSettingsSerializer(settings, data=request.data)
if not serializer.is_valid():
return HttpResponseBadRequest(serializer.errors)
serializer.save()
return Response(serializer.data)
class DeviceNameView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response({"name": settings.name})
def patch(self, request):
settings = DeviceSettings.objects.get()
settings.name = request.data["name"]
settings.save()
return Response({"name": settings.name})
class SyncStatusFilter(FilterSet):
member_of = ModelChoiceFilter(
method="filter_member_of", queryset=Collection.objects.all()
)
def filter_member_of(self, queryset, name, value):
return queryset.filter(
Q(user__memberships__collection=value) | Q(user__facility=value)
)
class Meta:
model = UserSyncStatus
fields = ["user", "member_of"]
class UserSyncStatusViewSet(ReadOnlyValuesViewset):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = UserSyncStatus.objects.all()
filter_class = SyncStatusFilter
values = (
"id",
"queued",
"last_synced",
"active",
"user",
"user_id",
)
def get_queryset(self):
return UserSyncStatus.objects.filter()
def annotate_queryset(self, queryset):
queryset = queryset.annotate(
last_synced=Max("sync_session__last_activity_timestamp")
)
active_transfer_sessions = TransferSession.objects.filter(
sync_session=OuterRef("sync_session"), active=True
)
queryset = queryset.annotate(active=Exists(active_transfer_sessions))
return queryset
|
""" Utility functions related to HTTP requests """
import re
import logging
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.test.client import RequestFactory
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
# accommodates course api urls, excluding any course api routes that do not fall under v*/courses, such as v1/blocks.
COURSE_REGEX = re.compile(r'^(.*?/courses/)(?!v[0-9]+/[^/]+){}'.format(settings.COURSE_ID_PATTERN))
def safe_get_host(request):
"""
Get the host name for this request, as safely as possible.
If ALLOWED_HOSTS is properly set, this calls request.get_host;
otherwise, this returns whatever settings.SITE_NAME is set to.
This ensures we will never accept an untrusted value of get_host()
"""
if isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and '*' not in settings.ALLOWED_HOSTS:
return request.get_host()
else:
return configuration_helpers.get_value('site_domain', settings.SITE_NAME)
def course_id_from_url(url):
"""
Extracts the course_id from the given `url`.
"""
if not url:
return None
deprecated = False
if '/' in url:
deprecated = True
if deprecated:
COURSE_REGEX = re.compile(r'^.*/courses/(?P<course_id>[^/]+/[^/]+/[^/]+)')
key_generator = SlashSeparatedCourseKey.from_deprecated_string
else:
COURSE_REGEX = re.compile(r'^.*?/courses/(?P<course_id>[a-zA-Z0-9_+\/:]+)')
key_generator = CourseKey.from_string
match = COURSE_REGEX.match(url)
if match is None:
return None
course_id = match.group('course_id')
if course_id is None:
return None
try:
course_key = key_generator(course_id)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{}"'.format(course_id),
exc_info=True
)
return None
return course_key
class RequestMock(RequestFactory):
"""
RequestMock is used to create generic/dummy request objects in
scenarios where a regular request might not be available for use
"""
def request(self, **request):
"Construct a generic request object."
request = RequestFactory.request(self, **request)
handler = BaseHandler()
handler.load_middleware()
for middleware_method in handler._request_middleware:
if middleware_method(request):
raise Exception("Couldn't create request mock object - "
"request middleware returned a response")
return request
class RequestMockWithoutMiddleware(RequestMock):
"""
RequestMockWithoutMiddleware is used to create generic/dummy request
objects in scenarios where a regular request might not be available for use.
It's similiar to its parent except for the fact that it skips the loading
of middleware.
"""
def request(self, **request):
"Construct a generic request object."
request = RequestFactory.request(self, **request)
if not hasattr(request, 'session'):
request.session = {}
return request
mattdrayer/api-gradebook-generator-fix: Added session dummy to mock
""" Utility functions related to HTTP requests """
import re
import logging
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.test.client import RequestFactory
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
# accommodates course api urls, excluding any course api routes that do not fall under v*/courses, such as v1/blocks.
COURSE_REGEX = re.compile(r'^(.*?/courses/)(?!v[0-9]+/[^/]+){}'.format(settings.COURSE_ID_PATTERN))
def safe_get_host(request):
"""
Get the host name for this request, as safely as possible.
If ALLOWED_HOSTS is properly set, this calls request.get_host;
otherwise, this returns whatever settings.SITE_NAME is set to.
This ensures we will never accept an untrusted value of get_host()
"""
if isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and '*' not in settings.ALLOWED_HOSTS:
return request.get_host()
else:
return configuration_helpers.get_value('site_domain', settings.SITE_NAME)
def course_id_from_url(url):
"""
Extracts the course_id from the given `url`.
"""
if not url:
return None
deprecated = False
if '/' in url:
deprecated = True
if deprecated:
COURSE_REGEX = re.compile(r'^.*/courses/(?P<course_id>[^/]+/[^/]+/[^/]+)')
key_generator = SlashSeparatedCourseKey.from_deprecated_string
else:
COURSE_REGEX = re.compile(r'^.*?/courses/(?P<course_id>[a-zA-Z0-9_+\/:]+)')
key_generator = CourseKey.from_string
match = COURSE_REGEX.match(url)
if match is None:
return None
course_id = match.group('course_id')
if course_id is None:
return None
try:
course_key = key_generator(course_id)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{}"'.format(course_id),
exc_info=True
)
return None
return course_key
class RequestMock(RequestFactory):
"""
RequestMock is used to create generic/dummy request objects in
scenarios where a regular request might not be available for use
"""
def request(self, **request):
"Construct a generic request object."
request = RequestFactory.request(self, **request)
handler = BaseHandler()
handler.load_middleware()
for middleware_method in handler._request_middleware:
if middleware_method(request):
raise Exception("Couldn't create request mock object - "
"request middleware returned a response")
return request
class RequestMockWithoutMiddleware(RequestMock):
"""
RequestMockWithoutMiddleware is used to create generic/dummy request
objects in scenarios where a regular request might not be available for use.
It's similiar to its parent except for the fact that it skips the loading
of middleware.
"""
def request(self, **request):
"Construct a generic request object."
request = RequestFactory.request(self, **request)
if not hasattr(request, 'session'):
request.session = {}
return request
|
from __future__ import unicode_literals
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
def execute():
#auto repeat is not submittable in v12
frappe.reload_doc("automation", "doctype", "Auto Repeat")
frappe.db.sql("update `tabDocPerm` set submit=0, cancel=0, amend=0 where parent='Auto Repeat'")
frappe.db.sql("update `tabAuto Repeat` set docstatus=0 where docstatus=1 or docstatus=2")
for entry in frappe.get_all("Auto Repeat"):
doc = frappe.get_doc("Auto Repeat", entry.name)
#create custom field for allow auto repeat
fields = frappe.get_meta(doc.reference_doctype).fields
insert_after = fields[len(fields) - 1].fieldname
df = dict(fieldname="auto_repeat", label="Auto Repeat", fieldtype="Link", insert_after=insert_after,
options="Auto Repeat", hidden=1, print_hide=1, read_only=1)
create_custom_field(doc.reference_doctype, df)
if doc.status in ['Draft', 'Stopped', 'Cancelled']:
doc.disabled = 1
#updates current status as Active, Disabled or Completed on validate
doc.save()
fix: codacy
from __future__ import unicode_literals
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
def execute():
#auto repeat is not submittable in v12
frappe.reload_doc("automation", "doctype", "Auto Repeat")
frappe.db.sql("update `tabDocPerm` set submit=0, cancel=0, amend=0 where parent='Auto Repeat'")
frappe.db.sql("update `tabAuto Repeat` set docstatus=0 where docstatus=1 or docstatus=2")
for entry in frappe.get_all("Auto Repeat"):
doc = frappe.get_doc("Auto Repeat", entry.name)
#create custom field for allow auto repeat
fields = frappe.get_meta(doc.reference_doctype).fields
insert_after = fields[len(fields) - 1].fieldname
df = dict(fieldname="auto_repeat", label="Auto Repeat", fieldtype="Link", insert_after=insert_after,
options="Auto Repeat", hidden=1, print_hide=1, read_only=1)
create_custom_field(doc.reference_doctype, df)
if doc.status in ["Draft", "Stopped", "Cancelled"]:
doc.disabled = 1
#updates current status as Active, Disabled or Completed on validate
doc.save() |
import re
import keyword
def hash_schema(schema, hashfunc=hash):
"""Compute a unique hash for a (nested) schema
Unlike the built-in hash() function, this handles
dicts, lists, and sets in addition to tuples.
"""
def make_hashable(val):
if isinstance(val, list):
return tuple(make_hashable(v) for v in val)
elif isinstance(val, set):
return frozenset(make_hashable(v) for v in val)
elif isinstance(val, dict):
return frozenset((k, make_hashable(v)) for k, v in val.items())
else:
return val
return hashfunc(make_hashable(schema))
def regularize_name(name):
"""Regaularize a string to be a valid Python identifier
Examples
--------
>>> regularize_name("classname<(string|int)>")
'classname_string_int_'
>>> regularize_name("foo.bar")
'foo_bar'
>>> regularize_name("9abc")
'_9abc'
"""
name, subs = re.subn('[^_a-zA-Z0-9]+', '_', name)
if name[0].isdigit():
name = '_' + name
if keyword.iskeyword(name):
name = '_' + name
return name
def format_description(content, width=70, indent=8, indent_first=False):
"""Format documentation description"""
# TODO: document, test, and use
lines = content.splitlines()
def format_line(line):
line.replace("__Default value:__", "default value:")
if line.startswith('-'):
return textwrap.indent(textwrap.fill(line, width - indent - 2),
(indent + 2) * ' ')[2:]
else:
return textwrap.indent(textwrap.fill(line, width - indent), indent * ' ')
result = '\n'.join(map(format_line, lines))
if not indent_first:
result = result.lstrip()
return result
Add utility to convert string to ASCII compatible
import re
import keyword
def hash_schema(schema, hashfunc=hash):
"""Compute a unique hash for a (nested) schema
Unlike the built-in hash() function, this handles
dicts, lists, and sets in addition to tuples.
"""
def make_hashable(val):
if isinstance(val, list):
return tuple(make_hashable(v) for v in val)
elif isinstance(val, set):
return frozenset(make_hashable(v) for v in val)
elif isinstance(val, dict):
return frozenset((k, make_hashable(v)) for k, v in val.items())
else:
return val
return hashfunc(make_hashable(schema))
def regularize_name(name):
"""Regaularize a string to be a valid Python identifier
Examples
--------
>>> regularize_name("classname<(string|int)>")
'classname_string_int_'
>>> regularize_name("foo.bar")
'foo_bar'
>>> regularize_name("9abc")
'_9abc'
"""
name, subs = re.subn('[^_a-zA-Z0-9]+', '_', name)
if name[0].isdigit():
name = '_' + name
if keyword.iskeyword(name):
name = '_' + name
return name
def make_ascii_compatible(s):
"""Ensure a string is ascii-compatible.
This is not an issue for Python 3, but if used in code will break Python 2
"""
# Replace common non-ascii characters with suitable equivalents
s = s.replace('\u2013', '-')
s.encode('ascii') # if this errors, then add more replacements above
return s
def format_description(content, width=70, indent=8, indent_first=False):
"""Format documentation description"""
# TODO: document, test, and use
lines = content.splitlines()
def format_line(line):
line.replace("__Default value:__", "default value:")
if line.startswith('-'):
return textwrap.indent(textwrap.fill(line, width - indent - 2),
(indent + 2) * ' ')[2:]
else:
return textwrap.indent(textwrap.fill(line, width - indent), indent * ' ')
result = '\n'.join(map(format_line, lines))
if not indent_first:
result = result.lstrip()
return result
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyColormath(PythonPackage):
"""Color math and conversion library."""
homepage = "https://pypi.python.org/pypi/colormath/2.1.1"
url = "https://pypi.io/packages/source/c/colormath/colormath-2.1.1.tar.gz"
version('2.1.1', '10a0fb17e3c24363d0e1a3f2dccaa33b')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
py-colormath: new version, requires newer networkx (#9237)
* py-colormath: new version requires newer networkx
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyColormath(PythonPackage):
"""Color math and conversion library."""
homepage = "https://pypi.python.org/pypi/colormath/2.1.1"
url = "https://pypi.io/packages/source/c/colormath/colormath-2.1.1.tar.gz"
version('3.0.0', '3d4605af344527da0e4f9f504fad7ddbebda35322c566a6c72e28edb1ff31217')
version('2.1.1', '10a0fb17e3c24363d0e1a3f2dccaa33b')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
depends_on('py-networkx@2.0:', type=('build', 'run'), when='@3.0.0:')
|
"""Module to initialise and configure the flask app and the db."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from configuration.config import app_config
db = SQLAlchemy()
def create_app(configuration):
"""Initialise and cofigure the app and db."""
app = Flask(__name__)
app.config.from_object(app_config[configuration])
db.init_app(app)
return app
app = create_app("development")
[ft #147187301] Api instance
"""Module to initialise and configure the flask app and the db."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from configuration.config import app_config
from flask.ext.restful import Api
db = SQLAlchemy()
def create_app(configuration):
"""Initialise and cofigure the app and db."""
app = Flask(__name__)
app.config.from_object(app_config[configuration])
db.init_app(app)
return app
app = create_app("development")
api = Api(app=app, prefix="/api/v1")
|
import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import (HttpResponse, HttpResponseRedirect,
Http404, HttpResponsePermanentRedirect)
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list, object_detail
from django.views.static import serve
from core.views import serve_docs
from projects.models import Project
from projects.utils import highest_version
from watching.models import PageView
from taggit.models import Tag
def project_index(request, username=None, tag=None):
"""
The list of projects, which will optionally filter by user or tag,
in which case a 'person' or 'tag' will be added to the context
"""
queryset = Project.objects.live()
if username:
user = get_object_or_404(User, username=username)
queryset = queryset.filter(user=user)
else:
user = None
if tag:
tag = get_object_or_404(Tag, slug=tag)
queryset = queryset.filter(tags__name__in=[tag.slug])
else:
tag = None
return object_list(
request,
queryset=queryset,
extra_context={'person': user, 'tag': tag},
page=int(request.GET.get('page', 1)),
template_object_name='project',
)
def slug_detail(request, project_slug, filename):
"""
A detail view for a project with various dataz
"""
version_slug = 'latest'
if not filename:
filename = "index.html"
split_filename = filename.split('/')
if len(split_filename) > 1:
version = split_filename[1]
proj = get_object_or_404(Project, slug=project_slug)
valid_version = proj.versions.filter(slug=version).count()
if valid_version:
version_slug = version
filename = '/'.join(split_filename[1:])
return serve_docs(request=request, project_slug=project_slug, version_slug=version, filename=filename)
def project_detail(request, project_slug):
"""
A detail view for a project with various dataz
"""
queryset = Project.objects.live()
projects = Project.objects.filter(slug=project_slug)
updated = PageView.objects.filter(project__slug=project_slug)[:10]
if not projects.count():
#Handle old User URLs if possible.
#/projects/<user>/ used to be the user list, moved to
#/profiles/<user>/ and made projects top-level.
users = User.objects.filter(username=project_slug)
if users.count():
return HttpResponseRedirect(users[0].get_absolute_url())
return object_detail(
request,
queryset=queryset,
slug_field='slug',
slug=project_slug,
template_object_name='project',
extra_context = {
'pageview_list': updated
}
)
def legacy_project_detail(request, username, project_slug):
queryset = Project.objects.live()
return HttpResponsePermanentRedirect(reverse(
project_detail, kwargs = {
'project_slug': project_slug,
}
))
def tag_index(request):
"""
List of all tags by most common
"""
tag_qs = Project.tags.most_common()
return object_list(
request,
queryset=tag_qs,
page=int(request.GET.get('page', 1)),
template_object_name='tag',
template_name='projects/tag_list.html',
)
def search(request):
"""
our ghetto site search. see roadmap.
"""
if 'q' in request.GET:
term = request.GET['q']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)
if queryset.count() == 1:
return HttpResponseRedirect(queryset[0].get_absolute_url())
return object_list(
request,
queryset=queryset,
template_object_name='term',
extra_context={'term': term},
template_name='projects/search.html',
)
def search_autocomplete(request):
"""
return a json list of project names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)[:20]
project_names = queryset.values_list('name', flat=True)
json_response = simplejson.dumps(list(project_names))
return HttpResponse(json_response, mimetype='text/javascript')
def subdomain_handler(request, subdomain, filename):
"""
This provides the fall-back routing for subdomain requests.
This was made primarily to redirect old subdomain's to their version'd brothers.
"""
if not filename:
filename = "index.html"
split_filename = filename.split('/')
#A correct URL, with a language and version.
proj = get_object_or_404(Project, slug=subdomain)
if len(split_filename) > 2:
language = split_filename[0]
version = split_filename[1]
other_aliases = proj.aliases.filter(from_slug=version)
if other_aliases.count():
return HttpResponseRedirect('/en/%s/%s' %
(other_aliases[0].to_slug,
'/'.join(split_filename[1:])))
other_projects = proj.versions.filter(slug=version).count()
if not other_projects:
other_projects = proj.versions.filter(slug=language).count()
if other_projects:
version = language
#Hard code this for now.
if other_projects or version == 'latest' and language == 'en':
version_slug = version
filename = '/'.join(split_filename[2:])
return serve_docs(request=request,
project_slug=subdomain,
lang_slug='en',
version_slug=version_slug,
filename=filename)
else:
raise Http404('No version matching query')
elif len(split_filename) == 2:
version = split_filename[0]
other_aliases = proj.aliases.filter(from_slug=version)
if other_aliases.count():
if other_aliases[0].largest:
highest_ver = highest_version(proj.versions.filter(slug__contains=version, active=True))
version_slug = highest_ver[0].slug
else:
version_slug = other_aliases[0].to_slug
return HttpResponseRedirect('/en/%s/%s' %
(version_slug,
'/'.join(split_filename[1:])))
valid_version = proj.versions.filter(slug=version)
if valid_version:
return HttpResponseRedirect('/en/%s/%s' %
(version,
'/'.join(split_filename[1:])))
default_version = proj.get_default_version()
return HttpResponseRedirect('/en/%s/%s' % (default_version, filename))
Testing CNAME's is hard locally.
import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import (HttpResponse, HttpResponseRedirect,
Http404, HttpResponsePermanentRedirect)
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list, object_detail
from django.views.static import serve
from core.views import serve_docs
from projects.models import Project
from projects.utils import highest_version
from watching.models import PageView
from taggit.models import Tag
def project_index(request, username=None, tag=None):
"""
The list of projects, which will optionally filter by user or tag,
in which case a 'person' or 'tag' will be added to the context
"""
queryset = Project.objects.live()
if username:
user = get_object_or_404(User, username=username)
queryset = queryset.filter(user=user)
else:
user = None
if tag:
tag = get_object_or_404(Tag, slug=tag)
queryset = queryset.filter(tags__name__in=[tag.slug])
else:
tag = None
return object_list(
request,
queryset=queryset,
extra_context={'person': user, 'tag': tag},
page=int(request.GET.get('page', 1)),
template_object_name='project',
)
def slug_detail(request, project_slug, filename):
"""
A detail view for a project with various dataz
"""
version_slug = 'latest'
if not filename:
filename = "index.html"
split_filename = filename.split('/')
if len(split_filename) > 1:
version = split_filename[1]
proj = get_object_or_404(Project, slug=project_slug)
valid_version = proj.versions.filter(slug=version).count()
if valid_version:
version_slug = version
filename = '/'.join(split_filename[1:])
return serve_docs(request=request, project_slug=project_slug, version_slug=version, filename=filename)
def project_detail(request, project_slug):
"""
A detail view for a project with various dataz
"""
queryset = Project.objects.live()
projects = Project.objects.filter(slug=project_slug)
updated = PageView.objects.filter(project__slug=project_slug)[:10]
if not projects.count():
#Handle old User URLs if possible.
#/projects/<user>/ used to be the user list, moved to
#/profiles/<user>/ and made projects top-level.
users = User.objects.filter(username=project_slug)
if users.count():
return HttpResponseRedirect(users[0].get_absolute_url())
return object_detail(
request,
queryset=queryset,
slug_field='slug',
slug=project_slug,
template_object_name='project',
extra_context = {
'pageview_list': updated
}
)
def legacy_project_detail(request, username, project_slug):
queryset = Project.objects.live()
return HttpResponsePermanentRedirect(reverse(
project_detail, kwargs = {
'project_slug': project_slug,
}
))
def tag_index(request):
"""
List of all tags by most common
"""
tag_qs = Project.tags.most_common()
return object_list(
request,
queryset=tag_qs,
page=int(request.GET.get('page', 1)),
template_object_name='tag',
template_name='projects/tag_list.html',
)
def search(request):
"""
our ghetto site search. see roadmap.
"""
if 'q' in request.GET:
term = request.GET['q']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)
if queryset.count() == 1:
return HttpResponseRedirect(queryset[0].get_absolute_url())
return object_list(
request,
queryset=queryset,
template_object_name='term',
extra_context={'term': term},
template_name='projects/search.html',
)
def search_autocomplete(request):
"""
return a json list of project names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)[:20]
project_names = queryset.values_list('name', flat=True)
json_response = simplejson.dumps(list(project_names))
return HttpResponse(json_response, mimetype='text/javascript')
def subdomain_handler(request, subdomain, filename):
"""
This provides the fall-back routing for subdomain requests.
This was made primarily to redirect old subdomain's to their version'd brothers.
"""
if not filename:
filename = "index.html"
split_filename = filename.split('/')
#A correct URL, with a language and version.
proj = get_object_or_404(Project, slug=subdomain)
if len(split_filename) > 2:
language = split_filename[0]
version = split_filename[1]
other_aliases = proj.aliases.filter(from_slug=version)
if other_aliases.count():
return HttpResponseRedirect('/en/%s/%s' %
(other_aliases[0].to_slug,
'/'.join(split_filename[1:])))
other_projects = proj.versions.filter(slug=version).count()
if not other_projects:
other_projects = proj.versions.filter(slug=language).count()
if other_projects:
return HttpResponseRedirect('/en/%s/%s' %
(language,
'/'.join(split_filename[1:])))
#Hard code this for now.
if other_projects or version == 'latest' and language == 'en':
version_slug = version
filename = '/'.join(split_filename[2:])
return serve_docs(request=request,
project_slug=subdomain,
lang_slug='en',
version_slug=version_slug,
filename=filename)
else:
raise Http404('No version matching query')
elif len(split_filename) == 2:
version = split_filename[0]
other_aliases = proj.aliases.filter(from_slug=version)
if other_aliases.count():
if other_aliases[0].largest:
highest_ver = highest_version(proj.versions.filter(slug__contains=version, active=True))
version_slug = highest_ver[0].slug
else:
version_slug = other_aliases[0].to_slug
return HttpResponseRedirect('/en/%s/%s' %
(version_slug,
'/'.join(split_filename[1:])))
valid_version = proj.versions.filter(slug=version)
if valid_version:
return HttpResponseRedirect('/en/%s/%s' %
(version,
'/'.join(split_filename[1:])))
default_version = proj.get_default_version()
return HttpResponseRedirect('/en/%s/%s' % (default_version, filename))
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
import numpy as np
import os
import random
from .helpers.corner import corner
import matplotlib.pyplot as plt
from matplotlib import colorbar
def draw_spectra(model, dataset):
""" Generate best-fit spectra for all the test objects
Parameters
----------
model: CannonModel
The Cannon spectral model
dataset: Dataset
Dataset that needs label inference
Returns
-------
best_fluxes: ndarray
The best-fit test fluxes
best_ivars:
The best-fit test inverse variances
"""
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar
def overlay_spectra(model, dataset):
""" Run a series of diagnostics on the fitted spectra
Parameters
----------
model: model
best-fit Cannon spectral model
dataset: Dataset
original spectra
"""
best_flux, best_ivar = draw_spectra(model, dataset)
coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model
# Overplot original spectra with best-fit spectra
print("Overplotting spectra for ten random stars")
res = dataset.test_flux-best_flux
lambdas = dataset.wl
npix = len(lambdas)
nstars = best_flux.shape[0]
pickstars = []
for i in range(10):
pickstars.append(random.randrange(0, nstars-1))
for i in pickstars:
print("Star %s" % i)
ID = dataset.test_ID[i]
spec_orig = dataset.test_flux[i,:]
bad = dataset.test_flux[i,:] == 0
lambdas = np.ma.array(lambdas, mask=bad, dtype=float)
npix = len(lambdas.compressed())
spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad)
spec_fit = np.ma.array(best_flux[i,:], mask=bad)
ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad)
ivars_fit = np.ma.array(best_ivar[i,:], mask=bad)
red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1])
red_chisq = np.round(red_chisq, 2)
fig,axarr = plt.subplots(2)
ax1 = axarr[0]
im = ax1.scatter(lambdas, spec_orig, label="Orig Spec",
c=1 / np.sqrt(ivars_orig), s=10)
ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10)
ax1.errorbar(lambdas, spec_fit,
yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7)
ax1.set_xlabel(r"Wavelength $\lambda (\AA)$")
ax1.set_ylabel("Normalized flux")
ax1.set_title("Spectrum Fit: %s" % ID)
ax1.set_title("Spectrum Fit")
ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10)
ax1.legend(loc='lower center', fancybox=True, shadow=True)
ax2 = axarr[1]
ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7)
ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit),
ecolor='k', fmt="none", ms=1, alpha=0.7)
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar()
#fig.colorbar(
# im, cax=cbar_ax,
# label="Uncertainties on the Fluxes from the Original Spectrum")
xlims = ax2.get_xlim()
ylims = ax2.get_ylim()
lims = [np.min([xlims, ylims]), np.max([xlims, ylims])]
ax2.plot(lims, lims, 'k-', alpha=0.75)
textstr = "Red Chi Sq: %s" % red_chisq
props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5)
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.set_xlim(xlims)
ax2.set_ylim(ylims)
ax2.set_xlabel("Orig Fluxes")
ax2.set_ylabel("Fitted Fluxes")
plt.tight_layout()
filename = "best_fit_spec_Star%s.png" % i
print("Saved as %s" % filename)
fig.savefig(filename)
plt.close(fig)
def residuals(cannon_set, dataset):
""" Stack spectrum fit residuals, sort by each label. Include histogram of
the RMS at each pixel.
Parameters
----------
cannon_set: Dataset
best-fit Cannon spectra
dataset: Dataset
original spectra
"""
print("Stacking spectrum fit residuals")
res = dataset.test_fluxes - cannon_set.test_fluxes
bad = dataset.test_ivars == SMALL**2
err = np.zeros(len(dataset.test_ivars))
err = np.sqrt(1. / dataset.test_ivars + 1. / cannon_set.test_ivars)
res_norm = res / err
res_norm = np.ma.array(res_norm,
mask=(np.ones_like(res_norm) *
(np.std(res_norm,axis=0) == 0)))
res_norm = np.ma.compress_cols(res_norm)
for i in range(len(cannon_set.get_plotting_labels())):
label_name = cannon_set.get_plotting_labels()[i]
print("Plotting residuals sorted by %s" % label_name)
label_vals = cannon_set.tr_label_vals[:,i]
sorted_res = res_norm[np.argsort(label_vals)]
mu = np.mean(sorted_res.flatten())
sigma = np.std(sorted_res.flatten())
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.1
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.1, height]
plt.figure()
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
im = axScatter.imshow(sorted_res, cmap=plt.cm.bwr_r,
interpolation="nearest", vmin=mu - 3. * sigma,
vmax=mu + 3. * sigma, aspect='auto',
origin='lower', extent=[0, len(dataset.wl),
min(label_vals),
max(label_vals)])
cax, kw = colorbar.make_axes(axScatter.axes, location='bottom')
plt.colorbar(im, cax=cax, orientation='horizontal')
axScatter.set_title(
r"Spectral Residuals Sorted by ${0:s}$".format(label_name))
axScatter.set_xlabel("Pixels")
axScatter.set_ylabel(r"$%s$" % label_name)
axHisty.hist(np.std(res_norm,axis=1)[~np.isnan(np.std(res_norm, axis=1))], orientation='horizontal', range=[0,2])
axHisty.axhline(y=1, c='k', linewidth=3, label="y=1")
axHisty.legend(bbox_to_anchor=(0., 0.8, 1., .102),
prop={'family':'serif', 'size':'small'})
axHisty.text(1.0, 0.5, "Distribution of Stdev of Star Residuals",
verticalalignment='center', transform=axHisty.transAxes,
rotation=270)
axHisty.set_ylabel("Standard Deviation")
start, end = axHisty.get_xlim()
axHisty.xaxis.set_ticks(np.linspace(start, end, 3))
axHisty.set_xlabel("Number of Stars")
axHisty.xaxis.set_label_position("top")
axHistx.hist(np.std(res_norm, axis=0)[~np.isnan(np.std(res_norm, axis=0))], range=[0.8,1.1])
axHistx.axvline(x=1, c='k', linewidth=3, label="x=1")
axHistx.set_title("Distribution of Stdev of Pixel Residuals")
axHistx.set_xlabel("Standard Deviation")
axHistx.set_ylabel("Number of Pixels")
start, end = axHistx.get_ylim()
axHistx.yaxis.set_ticks(np.linspace(start, end, 3))
axHistx.legend()
filename = "residuals_sorted_by_label_%s.png" % i
plt.savefig(filename)
print("File saved as %s" % filename)
plt.close()
# Auto-correlation of mean residuals
print("Plotting Auto-Correlation of Mean Residuals")
mean_res = res_norm.mean(axis=0)
autocorr = np.correlate(mean_res, mean_res, mode="full")
pkwidth = int(len(autocorr)/2-np.argmin(autocorr))
xmin = int(len(autocorr)/2)-pkwidth
xmax = int(len(autocorr)/2)+pkwidth
zoom_x = np.linspace(xmin, xmax, len(autocorr[xmin:xmax]))
fig, axarr = plt.subplots(2)
axarr[0].plot(autocorr)
axarr[0].set_title("Autocorrelation of Mean Spectral Residual")
axarr[0].set_xlabel("Lag (# Pixels)")
axarr[0].set_ylabel("Autocorrelation")
axarr[1].plot(zoom_x, autocorr[xmin:xmax])
axarr[1].set_title("Central Peak, Zoomed")
axarr[1].set_xlabel("Lag (# Pixels)")
axarr[1].set_ylabel("Autocorrelation")
filename = "residuals_autocorr.png"
plt.savefig(filename)
print("saved %s" % filename)
plt.close()
not sure
from __future__ import (absolute_import, division, print_function, unicode_literals)
import numpy as np
import os
import random
from .helpers.corner import corner
import matplotlib.pyplot as plt
from matplotlib import colorbar
def draw_spectra(md, ds):
""" Generate best-fit spectra for all the test objects
Parameters
----------
md: model
The Cannon spectral model
ds: Dataset
Dataset object
Returns
-------
best_fluxes: ndarray
The best-fit test fluxes
best_ivars:
The best-fit test inverse variances
"""
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar
def overlay_spectra(model, dataset):
""" Run a series of diagnostics on the fitted spectra
Parameters
----------
model: model
best-fit Cannon spectral model
dataset: Dataset
original spectra
"""
best_flux, best_ivar = draw_spectra(model, dataset)
coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model
# Overplot original spectra with best-fit spectra
print("Overplotting spectra for ten random stars")
res = dataset.test_flux-best_flux
lambdas = dataset.wl
npix = len(lambdas)
nstars = best_flux.shape[0]
pickstars = []
for i in range(10):
pickstars.append(random.randrange(0, nstars-1))
for i in pickstars:
print("Star %s" % i)
ID = dataset.test_ID[i]
spec_orig = dataset.test_flux[i,:]
bad = dataset.test_flux[i,:] == 0
lambdas = np.ma.array(lambdas, mask=bad, dtype=float)
npix = len(lambdas.compressed())
spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad)
spec_fit = np.ma.array(best_flux[i,:], mask=bad)
ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad)
ivars_fit = np.ma.array(best_ivar[i,:], mask=bad)
red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1])
red_chisq = np.round(red_chisq, 2)
fig,axarr = plt.subplots(2)
ax1 = axarr[0]
im = ax1.scatter(lambdas, spec_orig, label="Orig Spec",
c=1 / np.sqrt(ivars_orig), s=10)
ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10)
ax1.errorbar(lambdas, spec_fit,
yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7)
ax1.set_xlabel(r"Wavelength $\lambda (\AA)$")
ax1.set_ylabel("Normalized flux")
ax1.set_title("Spectrum Fit: %s" % ID)
ax1.set_title("Spectrum Fit")
ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10)
ax1.legend(loc='lower center', fancybox=True, shadow=True)
ax2 = axarr[1]
ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7)
ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit),
ecolor='k', fmt="none", ms=1, alpha=0.7)
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar()
#fig.colorbar(
# im, cax=cbar_ax,
# label="Uncertainties on the Fluxes from the Original Spectrum")
xlims = ax2.get_xlim()
ylims = ax2.get_ylim()
lims = [np.min([xlims, ylims]), np.max([xlims, ylims])]
ax2.plot(lims, lims, 'k-', alpha=0.75)
textstr = "Red Chi Sq: %s" % red_chisq
props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5)
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.set_xlim(xlims)
ax2.set_ylim(ylims)
ax2.set_xlabel("Orig Fluxes")
ax2.set_ylabel("Fitted Fluxes")
plt.tight_layout()
filename = "best_fit_spec_Star%s.png" % i
print("Saved as %s" % filename)
fig.savefig(filename)
plt.close(fig)
def residuals(cannon_set, dataset):
""" Stack spectrum fit residuals, sort by each label. Include histogram of
the RMS at each pixel.
Parameters
----------
cannon_set: Dataset
best-fit Cannon spectra
dataset: Dataset
original spectra
"""
print("Stacking spectrum fit residuals")
res = dataset.test_fluxes - cannon_set.test_fluxes
bad = dataset.test_ivars == SMALL**2
err = np.zeros(len(dataset.test_ivars))
err = np.sqrt(1. / dataset.test_ivars + 1. / cannon_set.test_ivars)
res_norm = res / err
res_norm = np.ma.array(res_norm,
mask=(np.ones_like(res_norm) *
(np.std(res_norm,axis=0) == 0)))
res_norm = np.ma.compress_cols(res_norm)
for i in range(len(cannon_set.get_plotting_labels())):
label_name = cannon_set.get_plotting_labels()[i]
print("Plotting residuals sorted by %s" % label_name)
label_vals = cannon_set.tr_label_vals[:,i]
sorted_res = res_norm[np.argsort(label_vals)]
mu = np.mean(sorted_res.flatten())
sigma = np.std(sorted_res.flatten())
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.1
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.1, height]
plt.figure()
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
im = axScatter.imshow(sorted_res, cmap=plt.cm.bwr_r,
interpolation="nearest", vmin=mu - 3. * sigma,
vmax=mu + 3. * sigma, aspect='auto',
origin='lower', extent=[0, len(dataset.wl),
min(label_vals),
max(label_vals)])
cax, kw = colorbar.make_axes(axScatter.axes, location='bottom')
plt.colorbar(im, cax=cax, orientation='horizontal')
axScatter.set_title(
r"Spectral Residuals Sorted by ${0:s}$".format(label_name))
axScatter.set_xlabel("Pixels")
axScatter.set_ylabel(r"$%s$" % label_name)
axHisty.hist(np.std(res_norm,axis=1)[~np.isnan(np.std(res_norm, axis=1))], orientation='horizontal', range=[0,2])
axHisty.axhline(y=1, c='k', linewidth=3, label="y=1")
axHisty.legend(bbox_to_anchor=(0., 0.8, 1., .102),
prop={'family':'serif', 'size':'small'})
axHisty.text(1.0, 0.5, "Distribution of Stdev of Star Residuals",
verticalalignment='center', transform=axHisty.transAxes,
rotation=270)
axHisty.set_ylabel("Standard Deviation")
start, end = axHisty.get_xlim()
axHisty.xaxis.set_ticks(np.linspace(start, end, 3))
axHisty.set_xlabel("Number of Stars")
axHisty.xaxis.set_label_position("top")
axHistx.hist(np.std(res_norm, axis=0)[~np.isnan(np.std(res_norm, axis=0))], range=[0.8,1.1])
axHistx.axvline(x=1, c='k', linewidth=3, label="x=1")
axHistx.set_title("Distribution of Stdev of Pixel Residuals")
axHistx.set_xlabel("Standard Deviation")
axHistx.set_ylabel("Number of Pixels")
start, end = axHistx.get_ylim()
axHistx.yaxis.set_ticks(np.linspace(start, end, 3))
axHistx.legend()
filename = "residuals_sorted_by_label_%s.png" % i
plt.savefig(filename)
print("File saved as %s" % filename)
plt.close()
# Auto-correlation of mean residuals
print("Plotting Auto-Correlation of Mean Residuals")
mean_res = res_norm.mean(axis=0)
autocorr = np.correlate(mean_res, mean_res, mode="full")
pkwidth = int(len(autocorr)/2-np.argmin(autocorr))
xmin = int(len(autocorr)/2)-pkwidth
xmax = int(len(autocorr)/2)+pkwidth
zoom_x = np.linspace(xmin, xmax, len(autocorr[xmin:xmax]))
fig, axarr = plt.subplots(2)
axarr[0].plot(autocorr)
axarr[0].set_title("Autocorrelation of Mean Spectral Residual")
axarr[0].set_xlabel("Lag (# Pixels)")
axarr[0].set_ylabel("Autocorrelation")
axarr[1].plot(zoom_x, autocorr[xmin:xmax])
axarr[1].set_title("Central Peak, Zoomed")
axarr[1].set_xlabel("Lag (# Pixels)")
axarr[1].set_ylabel("Autocorrelation")
filename = "residuals_autocorr.png"
plt.savefig(filename)
print("saved %s" % filename)
plt.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interact with a local/remote libvirt daemon
"""
from defaults import TEMPLATES
import docker
from distutils.spawn import find_executable
from iptools import IpRange
from netaddr import IPAddress, IPNetwork
from libvirt import open as libvirtopen
import os
import socket
import string
import xml.etree.ElementTree as ET
__version__ = "4.1"
KB = 1024 * 1024
MB = 1024 * KB
guestrhel532 = "rhel_5"
guestrhel564 = "rhel_5x64"
guestrhel632 = "rhel_6"
guestrhel664 = "rhel_6x64"
guestrhel764 = "rhel_7x64"
guestother = "other"
guestotherlinux = "other_linux"
guestwindowsxp = "windows_xp"
guestwindows7 = "windows_7"
guestwindows764 = "windows_7x64"
guestwindows2003 = "windows_2003"
guestwindows200364 = "windows_2003x64"
guestwindows2008 = "windows_2008"
guestwindows200864 = "windows_2008x64"
class Kvirt:
def __init__(self, host='127.0.0.1', port=None, user='root', protocol='ssh', url=None):
if url is None:
if host == '127.0.0.1' or host == 'localhost':
url = "qemu:///system"
elif protocol == 'ssh':
url = "qemu+%s://%s@%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, user, host)
elif user and port:
url = "qemu+%s://%s@%s:%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, user, host, port)
elif port:
url = "qemu+%s://%s:%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, host, port)
else:
url = "qemu:///system"
try:
self.conn = libvirtopen(url)
except Exception:
self.conn = None
self.host = host
self.user = user
self.port = port
self.protocol = protocol
if self.protocol == 'ssh' and port is None:
self.port = '22'
def close(self):
conn = self.conn
conn.close()
self.conn = None
def exists(self, name):
conn = self.conn
for vm in conn.listAllDomains():
if vm.name() == name:
return True
return False
def net_exists(self, name):
conn = self.conn
try:
conn.networkLookupByName(name)
return True
except:
return False
def disk_exists(self, pool, name):
conn = self.conn
try:
storage = conn.storagePoolLookupByName(pool)
storage.refresh()
for stor in sorted(storage.listVolumes()):
if stor == name:
return True
except:
return False
def create(self, name, virttype='kvm', title='', description='kvirt', numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, start=True, keys=None, cmds=None, ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None):
default_diskinterface = diskinterface
default_diskthin = diskthin
default_disksize = disksize
default_pool = pool
conn = self.conn
try:
default_storagepool = conn.storagePoolLookupByName(default_pool)
except:
return {'result': 'failure', 'reason': "Pool %s not found" % default_pool}
default_poolxml = default_storagepool.XMLDesc(0)
root = ET.fromstring(default_poolxml)
default_pooltype = root.getiterator('pool')[0].get('type')
default_poolpath = None
for element in root.getiterator('path'):
default_poolpath = element.text
break
if vnc:
display = 'vnc'
else:
display = 'spice'
volumes = {}
volumespaths = {}
for p in conn.listStoragePools():
poo = conn.storagePoolLookupByName(p)
poo.refresh(0)
for vol in poo.listAllVolumes():
volumes[vol.name()] = {'pool': poo, 'object': vol}
volumespaths[vol.path()] = {'pool': poo, 'object': vol}
networks = []
bridges = []
for net in conn.listNetworks():
networks.append(net)
for net in conn.listInterfaces():
if net != 'lo':
bridges.append(net)
machine = 'pc'
sysinfo = "<smbios mode='sysinfo'/>"
disksxml = ''
volsxml = {}
for index, disk in enumerate(disks):
if disk is None:
disksize = default_disksize
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
diskpooltype = default_pooltype
diskpoolpath = default_poolpath
elif isinstance(disk, int):
disksize = disk
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
diskpooltype = default_pooltype
diskpoolpath = default_poolpath
elif isinstance(disk, dict):
disksize = disk.get('size', default_disksize)
diskthin = disk.get('thin', default_diskthin)
diskinterface = disk.get('interface', default_diskinterface)
diskpool = disk.get('pool', default_pool)
try:
storagediskpool = conn.storagePoolLookupByName(diskpool)
except:
return {'result': 'failure', 'reason': "Pool %s not found" % diskpool}
diskpoolxml = storagediskpool.XMLDesc(0)
root = ET.fromstring(diskpoolxml)
diskpooltype = root.getiterator('pool')[0].get('type')
diskpoolpath = None
for element in root.getiterator('path'):
diskpoolpath = element.text
break
else:
return {'result': 'failure', 'reason': "Invalid disk entry"}
letter = chr(index + ord('a'))
diskdev, diskbus = 'vd%s' % letter, 'virtio'
if diskinterface != 'virtio':
diskdev, diskbus = 'hd%s' % letter, 'ide'
diskformat = 'qcow2'
if not diskthin:
diskformat = 'raw'
storagename = "%s_%d.img" % (name, index + 1)
diskpath = "%s/%s" % (diskpoolpath, storagename)
if template is not None and index == 0:
try:
default_storagepool.refresh(0)
if '/' in template:
backingvolume = volumespaths[template]['object']
else:
backingvolume = volumes[template]['object']
backingxml = backingvolume.XMLDesc(0)
root = ET.fromstring(backingxml)
except:
return {'result': 'failure', 'reason': "Invalid template %s" % template}
backing = backingvolume.path()
if '/dev' in backing and diskpooltype == 'dir':
return {'result': 'failure', 'reason': "lvm template can not be used with a dir pool.Leaving..."}
if '/dev' not in backing and diskpooltype == 'logical':
return {'result': 'failure', 'reason': "file template can not be used with a lvm pool.Leaving..."}
backingxml = """<backingStore type='file' index='1'>
<format type='qcow2'/>
<source file='%s'/>
<backingStore/>
</backingStore>""" % backing
else:
backing = None
backingxml = '<backingStore/>'
volxml = self._xmlvolume(path=diskpath, size=disksize, pooltype=diskpooltype, backing=backing, diskformat=diskformat)
if diskpool in volsxml:
volsxml[diskpool].append(volxml)
else:
volsxml[diskpool] = [volxml]
if diskpooltype == 'logical':
diskformat = 'raw'
disksxml = """%s<disk type='file' device='disk'>
<driver name='qemu' type='%s'/>
<source file='%s'/>
%s
<target dev='%s' bus='%s'/>
</disk>""" % (disksxml, diskformat, diskpath, backingxml, diskdev, diskbus)
netxml = ''
version = ''
for index, net in enumerate(nets):
macxml = ''
if isinstance(net, str):
netname = net
elif isinstance(net, dict) and 'name' in net:
netname = net['name']
ip = None
if ips and len(ips) > index and ips[index] is not None:
ip = ips[index]
nets[index]['ip'] = ip
elif 'ip' in nets[index]:
ip = nets[index]['ip']
if 'mac' in nets[index]:
mac = nets[index]['mac']
macxml = "<mac address='%s'/>" % mac
if index == 0 and ip is not None:
version = "<entry name='version'>%s</entry>" % ip
if netname in bridges:
sourcenet = 'bridge'
elif netname in networks:
sourcenet = 'network'
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
netxml = """%s
<interface type='%s'>
%s
<source %s='%s'/>
<model type='virtio'/>
</interface>""" % (netxml, sourcenet, macxml, sourcenet, netname)
version = """<sysinfo type='smbios'>
<system>
%s
<entry name='product'>%s</entry>
</system>
</sysinfo>""" % (version, title)
if iso is None:
if cloudinit:
iso = "%s/%s.iso" % (default_poolpath, name)
else:
iso = ''
else:
try:
if os.path.isabs(iso):
shortiso = os.path.basename(iso)
else:
shortiso = iso
isovolume = volumes[shortiso]['object']
iso = isovolume.path()
# iso = "%s/%s" % (default_poolpath, iso)
# iso = "%s/%s" % (isopath, iso)
except:
return {'result': 'failure', 'reason': "Invalid iso %s" % iso}
isoxml = """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='hdc' bus='ide'/>
<readonly/>
</disk>""" % (iso)
displayxml = """<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='%s' port='-1' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<memballoon model='virtio'/>""" % (display)
if nested and virttype == 'kvm':
nestedxml = """<cpu match='exact'>
<model>Westmere</model>
<feature policy='require' name='vmx'/>
</cpu>"""
else:
nestedxml = ""
if self.host in ['localhost', '127.0.0.1']:
serialxml = """<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>"""
else:
serialxml = """ <serial type="tcp">
<source mode="bind" host="127.0.0.1" service="%s"/>
<protocol type="telnet"/>
<target port="0"/>
</serial>""" % self._get_free_port()
vmxml = """<domain type='%s'>
<name>%s</name>
<description>%s</description>
%s
<memory unit='MiB'>%d</memory>
<vcpu>%d</vcpu>
<os>
<type arch='x86_64' machine='%s'>hvm</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
<bootmenu enable='yes'/>
%s
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
%s
%s
%s
%s
%s
</devices>
%s
</domain>""" % (virttype, name, description, version, memory, numcpus, machine, sysinfo, disksxml, netxml, isoxml, displayxml, serialxml, nestedxml)
for pool in volsxml:
storagepool = conn.storagePoolLookupByName(pool)
storagepool.refresh(0)
for volxml in volsxml[pool]:
storagepool.createXML(volxml, 0)
conn.defineXML(vmxml)
vm = conn.lookupByName(name)
vm.setAutostart(1)
if cloudinit:
self._cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip)
self._uploadimage(name, pool=default_storagepool)
if reserveip:
xml = vm.XMLDesc(0)
vmxml = ET.fromstring(xml)
macs = []
for element in vmxml.getiterator('interface'):
mac = element.find('mac').get('address')
macs.append(mac)
self._reserve_ip(name, nets, macs)
if start:
vm.create()
return {'result': 'success'}
def start(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
vm = conn.lookupByName(name)
if status[vm.isActive()] == "up":
return {'result': 'success'}
else:
vm.create()
return {'result': 'success'}
except:
return {'result': 'failure', 'reason': "VM %s not found" % name}
def stop(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
if status[vm.isActive()] == "down":
return {'result': 'success'}
else:
vm.destroy()
return {'result': 'success'}
except:
return {'result': 'failure', 'reason': "VM %s not found" % name}
def restart(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
vm = conn.lookupByName(name)
if status[vm.isActive()] == "down":
return {'result': 'success'}
else:
vm.restart()
return {'result': 'success'}
def report(self):
conn = self.conn
hostname = conn.getHostname()
cpus = conn.getCPUMap()[0]
memory = conn.getInfo()[1]
print("Host:%s Cpu:%s Memory:%sMB\n" % (hostname, cpus, memory))
for pool in conn.listStoragePools():
poolname = pool
pool = conn.storagePoolLookupByName(pool)
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
pooltype = root.getiterator('pool')[0].get('type')
if pooltype == 'dir':
poolpath = root.getiterator('path')[0].text
else:
poolpath = root.getiterator('device')[0].get('path')
s = pool.info()
used = "%.2f" % (float(s[2]) / 1024 / 1024 / 1024)
available = "%.2f" % (float(s[3]) / 1024 / 1024 / 1024)
# Type,Status, Total space in Gb, Available space in Gb
used = float(used)
available = float(available)
print("Storage:%s Type:%s Path:%s Used space:%sGB Available space:%sGB" % (poolname, pooltype, poolpath, used, available))
print
for interface in conn.listAllInterfaces():
interfacename = interface.name()
if interfacename == 'lo':
continue
print("Network:%s Type:bridged" % (interfacename))
for network in conn.listAllNetworks():
networkname = network.name()
netxml = network.XMLDesc(0)
cidr = 'N/A'
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
ip = IPNetwork('%s/%s' % (firstip, netmask))
cidr = ip.cidr
dhcp = root.getiterator('dhcp')
if dhcp:
dhcp = True
else:
dhcp = False
print("Network:%s Type:routed Cidr:%s Dhcp:%s" % (networkname, cidr, dhcp))
def status(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
except:
return None
return status[vm.isActive()]
def list(self):
vms = []
leases = {}
conn = self.conn
for network in conn.listAllNetworks():
for lease in network.DHCPLeases():
ip = lease['ipaddr']
mac = lease['mac']
leases[mac] = ip
status = {0: 'down', 1: 'up'}
for vm in conn.listAllDomains(0):
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
description = root.getiterator('description')
if description:
description = description[0].text
else:
description = ''
name = vm.name()
state = status[vm.isActive()]
ips = []
title = ''
for element in root.getiterator('interface'):
mac = element.find('mac').get('address')
if vm.isActive():
if mac in leases:
ips.append(leases[mac])
if ips:
ip = ips[-1]
else:
ip = ''
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
ip = entry.text
if attributes['name'] == 'product':
title = entry.text
source = ''
for element in root.getiterator('backingStore'):
s = element.find('source')
if s is not None:
source = os.path.basename(s.get('file'))
break
vms.append([name, state, ip, source, description, title])
return vms
def console(self, name):
conn = self.conn
vm = conn.lookupByName(name)
if not vm.isActive():
print("VM down")
return
else:
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('graphics'):
attributes = element.attrib
if attributes['listen'] == '127.0.0.1':
host = '127.0.0.1'
else:
host = self.host
protocol = attributes['type']
port = attributes['port']
url = "%s://%s:%s" % (protocol, host, port)
os.popen("remote-viewer %s &" % url)
def serialconsole(self, name):
conn = self.conn
vm = conn.lookupByName(name)
if not vm.isActive():
print("VM down")
return
else:
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
serial = root.getiterator('serial')
if not serial:
print("No serial Console found. Leaving...")
return
elif self.host in ['localhost', '127.0.0.1']:
os.system('virsh console %s' % name)
else:
for element in serial:
serialport = element.find('source').get('service')
if serialport:
if self.protocol != 'ssh':
print("Remote serial Console requires using ssh . Leaving...")
return
else:
serialcommand = "ssh -p %s %s@%s nc 127.0.0.1 %s" % (self.port, self.user, self.host, serialport)
os.system(serialcommand)
def info(self, name):
# ips = []
leases = {}
conn = self.conn
for network in conn.listAllNetworks():
for lease in network.DHCPLeases():
ip = lease['ipaddr']
mac = lease['mac']
leases[mac] = ip
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
state = 'down'
memory = root.getiterator('memory')[0]
unit = memory.attrib['unit']
memory = memory.text
if unit == 'KiB':
memory = float(memory) / 1024
memory = int(memory)
numcpus = root.getiterator('vcpu')[0]
numcpus = numcpus.text
if vm.isActive():
state = 'up'
print("name: %s" % name)
print("status: %s" % state)
description = root.getiterator('description')
if description:
description = description[0].text
else:
description = ''
title = None
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'product':
title = entry.text
print("description: %s" % description)
if title is not None:
print("profile: %s" % title)
print("cpus: %s" % numcpus)
print("memory: %sMB" % memory)
nicnumber = 0
for element in root.getiterator('interface'):
networktype = element.get('type')
device = "eth%s" % nicnumber
mac = element.find('mac').get('address')
if networktype == 'bridge':
bridge = element.find('source').get('bridge')
print("net interfaces: %s mac: %s net: %s type: bridge" % (device, mac, bridge))
else:
network = element.find('source').get('network')
print("net interfaces:%s mac: %s net: %s type: routed" % (device, mac, network))
network = conn.networkLookupByName(network)
if vm.isActive():
if mac in leases:
# ips.append(leases[mac])
print("ip: %s" % leases[mac])
nicnumber = nicnumber + 1
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
ip = entry.text
print("ip: %s" % ip)
break
for element in root.getiterator('disk'):
disktype = element.get('device')
if disktype == 'cdrom':
continue
device = element.find('target').get('dev')
diskformat = 'file'
drivertype = element.find('driver').get('type')
path = element.find('source').get('file')
volume = conn.storageVolLookupByPath(path)
disksize = int(float(volume.info()[1]) / 1024 / 1024 / 1024)
print("diskname: %s disksize: %sGB diskformat: %s type: %s path: %s" % (device, disksize, diskformat, drivertype, path))
def volumes(self, iso=False):
isos = []
templates = []
default_templates = [os.path.basename(t) for t in TEMPLATES.values()]
conn = self.conn
for storage in conn.listStoragePools():
storage = conn.storagePoolLookupByName(storage)
storage.refresh(0)
storagexml = storage.XMLDesc(0)
root = ET.fromstring(storagexml)
for element in root.getiterator('path'):
storagepath = element.text
break
for volume in storage.listVolumes():
if volume.endswith('iso'):
isos.append("%s/%s" % (storagepath, volume))
elif volume.endswith('qcow2') or volume in default_templates:
templates.append("%s/%s" % (storagepath, volume))
if iso:
return isos
else:
return templates
def delete(self, name):
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
return
status = {0: 'down', 1: 'up'}
vmxml = vm.XMLDesc(0)
root = ET.fromstring(vmxml)
disks = []
for element in root.getiterator('disk'):
source = element.find('source')
if source is not None:
imagefile = element.find('source').get('file')
if imagefile == "%s.iso" % name or name in imagefile:
disks.append(imagefile)
else:
continue
if status[vm.isActive()] != "down":
vm.destroy()
vm.undefine()
for storage in conn.listStoragePools():
deleted = False
storage = conn.storagePoolLookupByName(storage)
storage.refresh(0)
for stor in storage.listVolumes():
for disk in disks:
if stor in disk:
try:
volume = storage.storageVolLookupByName(stor)
except:
continue
volume.delete(0)
deleted = True
if deleted:
storage.refresh(0)
for element in root.getiterator('interface'):
mac = element.find('mac').get('address')
networktype = element.get('type')
if networktype != 'bridge':
network = element.find('source').get('network')
network = conn.networkLookupByName(network)
netxml = network.XMLDesc(0)
root = ET.fromstring(netxml)
for host in root.getiterator('host'):
hostmac = host.get('mac')
ip = host.get('ip')
name = host.get('name')
if hostmac == mac:
hostentry = "<host mac='%s' name='%s' ip='%s'/>" % (mac, name, ip)
network.update(2, 4, 0, hostentry, 1)
def _xmldisk(self, diskpath, diskdev, diskbus='virtio', diskformat='qcow2', shareable=False):
if shareable:
sharexml = '<shareable/>'
else:
sharexml = ''
diskxml = """<disk type='file' device='disk'>
<driver name='qemu' type='%s' cache='none'/>
<source file='%s'/>
<target bus='%s' dev='%s'/>
%s
</disk>""" % (diskformat, diskpath, diskbus, diskdev, sharexml)
return diskxml
def _xmlvolume(self, path, size, pooltype='file', backing=None, diskformat='qcow2'):
size = int(size) * MB
if int(size) == 0:
size = 500 * 1024
name = os.path.basename(path)
if pooltype == 'block':
volume = """<volume type='block'>
<name>%s</name>
<capacity unit="bytes">%d</capacity>
<target>
<path>%s</path>
<compat>1.1</compat>
</target>
</volume>""" % (name, size, path)
return volume
if backing is not None:
backingstore = """
<backingStore>
<path>%s</path>
<format type='%s'/>
</backingStore>""" % (backing, diskformat)
else:
backingstore = "<backingStore/>"
volume = """
<volume type='file'>
<name>%s</name>
<capacity unit="bytes">%d</capacity>
<target>
<path>%s</path>
<format type='%s'/>
<permissions>
<mode>0644</mode>
</permissions>
<compat>1.1</compat>
</target>
%s
</volume>""" % (name, size, path, diskformat, backingstore)
return volume
def clone(self, old, new, full=False, start=False):
conn = self.conn
oldvm = conn.lookupByName(old)
oldxml = oldvm.XMLDesc(0)
tree = ET.fromstring(oldxml)
uuid = tree.getiterator('uuid')[0]
tree.remove(uuid)
for vmname in tree.getiterator('name'):
vmname.text = new
firstdisk = True
for disk in tree.getiterator('disk'):
if firstdisk or full:
source = disk.find('source')
oldpath = source.get('file')
backingstore = disk.find('backingStore')
backing = None
for b in backingstore.getiterator():
backingstoresource = b.find('source')
if backingstoresource is not None:
backing = backingstoresource.get('file')
newpath = oldpath.replace(old, new)
source.set('file', newpath)
oldvolume = conn.storageVolLookupByPath(oldpath)
oldinfo = oldvolume.info()
oldvolumesize = (float(oldinfo[1]) / 1024 / 1024 / 1024)
newvolumexml = self._xmlvolume(newpath, oldvolumesize, backing)
pool = oldvolume.storagePoolLookupByVolume()
pool.createXMLFrom(newvolumexml, oldvolume, 0)
firstdisk = False
else:
devices = tree.getiterator('devices')[0]
devices.remove(disk)
for interface in tree.getiterator('interface'):
mac = interface.find('mac')
interface.remove(mac)
if self.host not in ['127.0.0.1', 'localhost']:
for serial in tree.getiterator('serial'):
source = serial.find('source')
source.set('service', str(self._get_free_port()))
newxml = ET.tostring(tree)
conn.defineXML(newxml)
vm = conn.lookupByName(new)
if start:
vm.setAutostart(1)
vm.create()
def _reserve_ip(self, name, nets, macs):
conn = self.conn
for index, net in enumerate(nets):
if not isinstance(net, dict):
continue
ip = net.get('ip')
network = net.get('name')
mac = macs[index]
if ip is None or network is None:
continue
network = conn.networkLookupByName(network)
oldnetxml = network.XMLDesc()
root = ET.fromstring(oldnetxml)
ipentry = root.getiterator('ip')
if ipentry:
attributes = ipentry[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
netip = IPNetwork('%s/%s' % (firstip, netmask))
dhcp = root.getiterator('dhcp')
if not dhcp:
continue
if not IPAddress(ip) in netip:
continue
network.update(4, 4, 0, '<host mac="%s" name="%s" ip="%s" />' % (mac, name, ip), 1)
def _cloudinit(self, name, keys=None, cmds=None, nets=[], gateway=None, dns=None, domain=None, reserveip=False):
default_gateway = gateway
with open('/tmp/meta-data', 'w') as metadatafile:
if domain is not None:
localhostname = "%s.%s" % (name, domain)
else:
localhostname = name
metadatafile.write('instance-id: XXX\nlocal-hostname: %s\n' % localhostname)
metadata = ''
if nets:
for index, net in enumerate(nets):
if isinstance(net, str):
if index == 0:
continue
nicname = "eth%d" % index
ip = None
netmask = None
elif isinstance(net, dict):
nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = net.get('mask')
metadata += " auto %s\n" % nicname
if ip is not None and netmask is not None and not reserveip:
metadata += " iface %s inet static\n" % nicname
metadata += " address %s\n" % ip
metadata += " netmask %s\n" % netmask
gateway = net.get('gateway')
if index == 0 and default_gateway is not None:
metadata += " gateway %s\n" % default_gateway
elif gateway is not None:
metadata += " gateway %s\n" % gateway
dns = net.get('dns')
if dns is not None:
metadata += " dns-nameservers %s\n" % dns
domain = net.get('domain')
if domain is not None:
metadatafile.write(" dns-search %s\n" % domain)
else:
metadata += " iface %s inet dhcp\n" % nicname
if metadata:
metadatafile.write("network-interfaces: |\n")
metadatafile.write(metadata)
# if dns is not None:
# metadatafile.write(" dns-nameservers %s\n" % dns)
# if domain is not None:
# metadatafile.write(" dns-search %s\n" % domain)
with open('/tmp/user-data', 'w') as userdata:
userdata.write('#cloud-config\nhostname: %s\n' % name)
if domain is not None:
userdata.write("fqdn: %s.%s\n" % (name, domain))
if keys is not None:
userdata.write("ssh_authorized_keys:\n")
for key in keys:
userdata.write("- %s\n" % key)
elif os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_rsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("ssh_authorized_keys:\n")
userdata.write("- %s\n" % key)
elif os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_dsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("ssh_authorized_keys:\n")
userdata.write("- %s\n" % key)
else:
print("neither id_rsa.pub or id_dsa public keys found in your .ssh directory, you might have trouble accessing the vm")
if cmds is not None:
userdata.write("runcmd:\n")
for cmd in cmds:
if cmd.startswith('#'):
continue
else:
userdata.write("- %s\n" % cmd)
isocmd = 'mkisofs'
if find_executable('genisoimage') is not None:
isocmd = 'genisoimage'
os.system("%s --quiet -o /tmp/%s.iso --volid cidata --joliet --rock /tmp/user-data /tmp/meta-data" % (isocmd, name))
def handler(self, stream, data, file_):
return file_.read(data)
def _uploadimage(self, name, pool='default', origin='/tmp', suffix='.iso'):
name = "%s%s" % (name, suffix)
conn = self.conn
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
for element in root.getiterator('path'):
poolpath = element.text
break
imagepath = "%s/%s" % (poolpath, name)
imagexml = self._xmlvolume(path=imagepath, size=0, diskformat='raw')
pool.createXML(imagexml, 0)
imagevolume = conn.storageVolLookupByPath(imagepath)
stream = conn.newStream(0)
imagevolume.upload(stream, 0, 0)
with open("%s/%s" % (origin, name)) as ori:
stream.sendAll(self.handler, ori)
stream.finish()
def update_ip(self, name, ip):
conn = self.conn
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
if not vm:
print("VM %s not found" % name)
if vm.isActive() == 1:
print("Machine up. Change will only appear upon next reboot")
osentry = root.getiterator('os')[0]
smbios = osentry.find('smbios')
if smbios is None:
newsmbios = ET.Element("smbios", mode="sysinfo")
osentry.append(newsmbios)
sysinfo = root.getiterator('sysinfo')
system = root.getiterator('system')
if not sysinfo:
sysinfo = ET.Element("sysinfo", type="smbios")
root.append(sysinfo)
sysinfo = root.getiterator('sysinfo')[0]
if not system:
system = ET.Element("system")
sysinfo.append(system)
system = root.getiterator('system')[0]
versionfound = False
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
entry.text = ip
versionfound = True
if not versionfound:
version = ET.Element("entry", name="version")
version.text = ip
system.append(version)
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_memory(self, name, memory):
conn = self.conn
memory = str(int(memory) * 1024)
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
memorynode = root.getiterator('memory')[0]
memorynode.text = memory
currentmemory = root.getiterator('currentMemory')[0]
currentmemory.text = memory
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_cpu(self, name, numcpus):
conn = self.conn
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
cpunode = root.getiterator('vcpu')[0]
cpunode.text = numcpus
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_start(self, name, start=True):
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return {'result': 'failure', 'reason': "VM %s not found" % name}
if start:
vm.setAutostart(1)
else:
vm.setAutostart(0)
return {'result': 'success'}
def create_disk(self, name, size, pool=None, thin=True, template=None):
conn = self.conn
diskformat = 'qcow2'
if size < 1:
print("Incorrect size.Leaving...")
return
if not thin:
diskformat = 'raw'
if pool is not None:
pool = conn.storagePoolLookupByName(pool)
poolxml = pool.XMLDesc(0)
poolroot = ET.fromstring(poolxml)
pooltype = poolroot.getiterator('pool')[0].get('type')
for element in poolroot.getiterator('path'):
poolpath = element.text
break
else:
print("Pool not found. Leaving....")
return
if template is not None:
volumes = {}
for p in conn.listStoragePools():
poo = conn.storagePoolLookupByName(p)
for vol in poo.listAllVolumes():
volumes[vol.name()] = vol.path()
if template not in volumes and template not in volumes.values():
print("Invalid template %s.Leaving..." % template)
if template in volumes:
template = volumes[template]
pool.refresh(0)
diskpath = "%s/%s" % (poolpath, name)
if pooltype == 'logical':
diskformat = 'raw'
volxml = self._xmlvolume(path=diskpath, size=size, pooltype=pooltype,
diskformat=diskformat, backing=template)
pool.createXML(volxml, 0)
return diskpath
# def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False):
# conn = self.conn
# diskformat = 'qcow2'
# diskbus = 'virtio'
# if size < 1:
# print("Incorrect size.Leaving...")
# return
# if not thin:
# diskformat = 'raw'
# try:
# vm = conn.lookupByName(name)
# xml = vm.XMLDesc(0)
# root = ET.fromstring(xml)
# except:
# print("VM %s not found" % name)
# return
# currentdisk = 0
# for element in root.getiterator('disk'):
# disktype = element.get('device')
# if disktype == 'cdrom':
# continue
# currentdisk = currentdisk + 1
# diskindex = currentdisk + 1
# diskdev = "vd%s" % string.ascii_lowercase[currentdisk]
# if pool is not None:
# pool = conn.storagePoolLookupByName(pool)
# poolxml = pool.XMLDesc(0)
# poolroot = ET.fromstring(poolxml)
# pooltype = poolroot.getiterator('pool')[0].get('type')
# for element in poolroot.getiterator('path'):
# poolpath = element.text
# break
# else:
# print("Pool not found. Leaving....")
# return
# if template is not None:
# volumes = {}
# for p in conn.listStoragePools():
# poo = conn.storagePoolLookupByName(p)
# for vol in poo.listAllVolumes():
# volumes[vol.name()] = vol.path()
# if template not in volumes and template not in volumes.values():
# print("Invalid template %s.Leaving..." % template)
# if template in volumes:
# template = volumes[template]
# pool.refresh(0)
# storagename = "%s_%d.img" % (name, diskindex)
# diskpath = "%s/%s" % (poolpath, storagename)
# volxml = self._xmlvolume(path=diskpath, size=size, pooltype=pooltype,
# diskformat=diskformat, backing=template)
# if pooltype == 'logical':
# diskformat = 'raw'
# diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat, shareable=shareable)
# pool.createXML(volxml, 0)
# vm.attachDevice(diskxml)
# vm = conn.lookupByName(name)
# vmxml = vm.XMLDesc(0)
# conn.defineXML(vmxml)
def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False, existing=None):
conn = self.conn
diskformat = 'qcow2'
diskbus = 'virtio'
if size < 1:
print("Incorrect size.Leaving...")
return
if not thin:
diskformat = 'raw'
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
currentdisk = 0
for element in root.getiterator('disk'):
disktype = element.get('device')
if disktype == 'cdrom':
continue
currentdisk = currentdisk + 1
diskindex = currentdisk + 1
diskdev = "vd%s" % string.ascii_lowercase[currentdisk]
if existing is None:
storagename = "%s_%d.img" % (name, diskindex)
diskpath = self.create_disk(name=storagename, size=size, pool=pool, thin=thin, template=template)
else:
diskpath = existing
diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat, shareable=shareable)
vm.attachDevice(diskxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def delete_disk(self, name, diskname):
conn = self.conn
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
for element in root.getiterator('disk'):
disktype = element.get('device')
diskdev = element.find('target').get('dev')
diskbus = element.find('target').get('bus')
diskformat = element.find('driver').get('type')
if disktype == 'cdrom':
continue
diskpath = element.find('source').get('file')
volume = self.conn.storageVolLookupByPath(diskpath)
if volume.name() == diskname or volume.path() == diskname:
diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat)
vm.detachDevice(diskxml)
volume.delete(0)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
return
print("Disk %s not found in %s" % (diskname, name))
def list_disks(self):
volumes = {}
for p in self.conn.listStoragePools():
poo = self.conn.storagePoolLookupByName(p)
for volume in poo.listAllVolumes():
volumes[volume.name()] = {'pool': poo.name(), 'path': volume.path()}
return volumes
def add_nic(self, name, network):
conn = self.conn
networks = {}
for interface in conn.listAllInterfaces():
networks[interface.name()] = 'bridge'
for net in conn.listAllNetworks():
networks[net.name()] = 'network'
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
if network not in networks:
print("Network %s not found" % network)
return
else:
networktype = networks[network]
source = "<source %s='%s'/>" % (networktype, network)
nicxml = """<interface type='%s'>
%s
<model type='virtio'/>
</interface>""" % (networktype, source)
vm.attachDevice(nicxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def delete_nic(self, name, interface):
conn = self.conn
networks = {}
nicnumber = 0
for n in conn.listAllInterfaces():
networks[n.name()] = 'bridge'
for n in conn.listAllNetworks():
networks[n.name()] = 'network'
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
for element in root.getiterator('interface'):
device = "eth%s" % nicnumber
if device == interface:
mac = element.find('mac').get('address')
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
source = "<source %s='%s'/>" % (networktype, network)
else:
network = element.find('source').get('network')
source = "<source %s='%s'/>" % (networktype, network)
break
else:
nicnumber += 1
nicxml = """<interface type='%s'>
<mac address='%s'/>
%s
<model type='virtio'/>
</interface>""" % (networktype, mac, source)
print nicxml
vm.detachDevice(nicxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def ssh(self, name, local=None, remote=None):
ubuntus = ['utopic', 'vivid', 'wily', 'xenial', 'yakkety']
user = 'root'
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
if vm.isActive() != 1:
print("Machine down. Cannot ssh...")
return
vm = [v for v in self.list() if v[0] == name][0]
template = vm[3]
if template != '':
if 'centos' in template.lower():
user = 'centos'
elif 'cirros' in template.lower():
user = 'cirros'
elif [x for x in ubuntus if x in template.lower()]:
user = 'ubuntu'
elif 'fedora' in template.lower():
user = 'fedora'
elif 'rhel' in template.lower():
user = 'cloud-user'
elif 'debian' in template.lower():
user = 'debian'
elif 'arch' in template.lower():
user = 'arch'
ip = vm[2]
if ip == '':
print("No ip found. Cannot ssh...")
else:
sshcommand = "%s@%s" % (user, ip)
if local is not None:
sshcommand = "-L %s %s" % (local, sshcommand)
if remote is not None:
sshcommand = "-R %s %s" % (remote, sshcommand)
sshcommand = "ssh %s" % sshcommand
os.system(sshcommand)
def _get_free_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def create_pool(self, name, poolpath, pooltype='dir', user='qemu'):
conn = self.conn
for pool in conn.listStoragePools():
if pool == name:
print("Pool %s already there.Leaving..." % name)
return
if pooltype == 'dir':
if self.host == 'localhost' or self.host == '127.0.0.1':
if not os.path.exists(poolpath):
os.makedirs(poolpath)
elif self.protocol == 'ssh':
cmd1 = 'ssh -p %s %s@%s "test -d %s || mkdir %s"' % (self.port, self.user, self.host, poolpath, poolpath)
cmd2 = 'ssh %s@%s "chown %s %s"' % (self.user, self.host, user, poolpath)
os.system(cmd1)
os.system(cmd2)
else:
print("Make sur %s directory exists on hypervisor" % name)
poolxml = """<pool type='dir'>
<name>%s</name>
<source>
</source>
<target>
<path>%s</path>
</target>
</pool>""" % (name, poolpath)
elif pooltype == 'logical':
poolxml = """<pool type='logical'>
<name>%s</name>
<source>
<device path='%s'/>
<name>%s</name>
<format type='lvm2'/>
</source>
<target>
<path>/dev/%s</path>
</target>
</pool>""" % (name, poolpath, name, name)
else:
print("Invalid pool type %s.Leaving..." % pooltype)
return
pool = conn.storagePoolDefineXML(poolxml, 0)
pool.setAutostart(True)
if pooltype == 'logical':
pool.build()
pool.create()
def add_image(self, image, pool):
poolname = pool
shortimage = os.path.basename(image)
conn = self.conn
volumes = []
try:
pool = conn.storagePoolLookupByName(pool)
for vol in pool.listAllVolumes():
volumes.append(vol.name())
except:
return {'result': 'failure', 'reason': "Pool %s not found" % poolname}
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
pooltype = root.getiterator('pool')[0].get('type')
if pooltype == 'dir':
poolpath = root.getiterator('path')[0].text
else:
poolpath = root.getiterator('device')[0].get('path')
return {'result': 'failure', 'reason': "Upload to a lvm pool not implemented not found"}
if shortimage in volumes:
return {'result': 'failure', 'reason': "Template %s already exists in pool %s" % (shortimage, poolname)}
if self.host == 'localhost' or self.host == '127.0.0.1':
cmd = 'wget -P %s %s' % (poolpath, image)
elif self.protocol == 'ssh':
cmd = 'ssh -p %s %s@%s "wget -P %s %s"' % (self.port, self.user, self.host, poolpath, image)
os.system(cmd)
pool.refresh()
# self._uploadimage(shortimage, pool=pool, suffix='')
return {'result': 'success'}
def create_network(self, name, cidr, dhcp=True, nat=True):
conn = self.conn
networks = self.list_networks()
cidrs = [network['cidr'] for network in networks.values()]
if name in networks:
return {'result': 'failure', 'reason': "Network %s already exists" % name}
try:
range = IpRange(cidr)
except TypeError:
return {'result': 'failure', 'reason': "Invalid Cidr %s" % cidr}
if IPNetwork(cidr) in cidrs:
return {'result': 'failure', 'reason': "Cidr %s already exists" % cidr}
netmask = IPNetwork(cidr).netmask
gateway = range[1]
if dhcp:
start = range[2]
end = range[-2]
dhcpxml = """<dhcp>
<range start='%s' end='%s'/>
</dhcp>""" % (start, end)
else:
dhcpxml = ''
if nat:
natxml = "<forward mode='nat'><nat><port start='1024' end='65535'/></nat></forward>"
else:
natxml = ''
networkxml = """<network><name>%s</name>
%s
<domain name='%s'/>
<ip address='%s' netmask='%s'>
%s
</ip>
</network>""" % (name, natxml, name, gateway, netmask, dhcpxml)
new_net = conn.networkDefineXML(networkxml)
new_net.setAutostart(True)
new_net.create()
return {'result': 'success'}
def delete_network(self, name=None):
conn = self.conn
try:
network = conn.networkLookupByName(name)
except:
return {'result': 'failure', 'reason': "Network %s not found" % name}
machines = self.network_ports(name)
if machines:
machines = ','.join(machines)
return {'result': 'failure', 'reason': "Network %s is beeing used by %s" % (name, machines)}
if network.isActive():
network.destroy()
network.undefine()
return {'result': 'success'}
def list_pools(self):
pools = []
conn = self.conn
for pool in conn.listStoragePools():
pools.append(pool)
return pools
def list_networks(self):
networks = {}
conn = self.conn
for network in conn.listAllNetworks():
networkname = network.name()
netxml = network.XMLDesc(0)
cidr = 'N/A'
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
ip = IPNetwork('%s/%s' % (firstip, netmask))
cidr = ip.cidr
dhcp = root.getiterator('dhcp')
if dhcp:
dhcp = True
else:
dhcp = False
forward = root.getiterator('forward')
if forward:
attributes = forward[0].attrib
mode = attributes.get('mode')
else:
mode = 'isolated'
networks[networkname] = {'cidr': cidr, 'dhcp': dhcp, 'type': 'routed', 'mode': mode}
for interface in conn.listAllInterfaces():
interfacename = interface.name()
if interfacename == 'lo':
continue
netxml = interface.XMLDesc(0)
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
ip = attributes.get('address')
prefix = attributes.get('prefix')
ip = IPNetwork('%s/%s' % (ip, prefix))
cidr = ip.cidr
else:
cidr = 'N/A'
networks[interfacename] = {'cidr': cidr, 'dhcp': 'N/A', 'type': 'bridged', 'mode': 'N/A'}
return networks
def delete_pool(self, name, full=False):
conn = self.conn
try:
pool = conn.storagePoolLookupByName(name)
except:
print("Pool %s not found. Leaving..." % name)
return
if full:
for vol in pool.listAllVolumes():
vol.delete(0)
if pool.isActive():
pool.destroy()
pool.undefine()
def bootstrap(self, pool=None, poolpath=None, pooltype='dir', nets={}, image=None):
conn = self.conn
volumes = {}
try:
poolname = pool
pool = conn.storagePoolLookupByName(pool)
for vol in pool.listAllVolumes():
volumes[vol.name()] = {'object': vol}
except:
if poolpath is not None:
print("Pool %s not found...Creating it" % pool)
self.create_pool(name=pool, poolpath=poolpath, pooltype=pooltype)
if image is not None and os.path.basename(image) not in volumes:
self.add_image(image, poolname)
networks = []
for net in conn.listNetworks():
networks.append(net)
for net in nets:
if net not in networks:
print("Network %s not found...Creating it" % net)
cidr = nets[net].get('cidr')
dhcp = bool(nets[net].get('dchp', True))
self.create_network(name=net, cidr=cidr, dhcp=dhcp)
def network_ports(self, name):
conn = self.conn
machines = []
for vm in conn.listAllDomains(0):
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('interface'):
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
else:
network = element.find('source').get('network')
if network == name:
machines.append(vm.name())
return machines
def vm_ports(self, name):
conn = self.conn
networks = []
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('interface'):
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
else:
network = element.find('source').get('network')
networks.append(network)
return networks
def create_container(self, name, image, nets=None, cmd=None, ports=[], volumes=[], label=None):
# if not nets:
# return
# for i, net in enumerate(nets):
# print net
# if isinstance(net, str):
# netname = net
# elif isinstance(net, dict) and 'name' in net:
# netname = net['name']
# nets[i] = self._get_bridge(netname)
if self.host == '127.0.0.1':
for i, volume in enumerate(volumes):
if isinstance(volume, str):
if len(volume.split(':')) == 2:
origin, destination = volume.split(':')
volumes[i] = {origin: {'bind': destination, 'mode': 'rw'}}
else:
volumes[i] = {volume: {'bind': volume, 'mode': 'rw'}}
elif isinstance(volume, dict):
path = volume.get('path')
origin = volume.get('origin')
destination = volume.get('destination')
mode = volume.get('mode', 'rw')
if origin is None or destination is None:
if path is None:
continue
volumes[i] = {path: {'bind': path, 'mode': mode}}
else:
volumes[i] = {origin: {'bind': destination, 'mode': mode}}
if ports is not None:
ports = {'%s/tcp' % k: k for k in ports}
if label is not None and isinstance(label, str) and len(label.split('=')) == 2:
key, value = label.split('=')
labels = {key: value}
else:
labels = None
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
# d.containers.run(image, name=name, command=cmd, networks=nets, detach=True, ports=ports)
d.containers.run(image, name=name, command=cmd, detach=True, ports=ports, volumes=volumes, stdin_open=True, tty=True, labels=labels)
else:
# netinfo = ''
# for net in nets:
# netinfo = "%s --net=%s" % (netinfo, net)
portinfo = ''
if ports is not None:
for port in ports:
if isinstance(port, int):
oriport = port
destport = port
elif isinstance(port, str):
if len(port.split(':')) == 2:
oriport, destport = port.split(':')
else:
oriport = port
destport = port
elif isinstance(port, dict) and 'origin' in port and 'destination' in port:
oriport = port['origin']
destport = port['destination']
else:
continue
portinfo = "%s -p %s:%s" % (portinfo, oriport, destport)
volumeinfo = ''
if volumes is not None:
for volume in volumes:
if isinstance(volume, str):
if len(volume.split(':')) == 2:
origin, destination = volume.split(':')
else:
origin = volume
destination = volume
elif isinstance(volume, dict):
path = volume.get('path')
origin = volume.get('origin')
destination = volume.get('destination')
if origin is None or destination is None:
if path is None:
continue
origin = path
destination = path
volumeinfo = "%s -v %s:%s" % (volumeinfo, origin, destination)
dockercommand = "docker run -it %s %s --name %s -l %s -d %s" % (volumeinfo, portinfo, name, label, image)
if cmd is not None:
dockercommand = "%s %s" % (dockercommand, cmd)
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def delete_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list() if container.name == name]
if containers:
for container in containers:
container.remove(force=True)
else:
dockercommand = "docker rm -f %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def start_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list(all=True) if container.name == name]
if containers:
for container in containers:
container.start()
else:
dockercommand = "docker start %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def stop_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list() if container.name == name]
if containers:
for container in containers:
container.stop()
else:
dockercommand = "docker stop %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def console_container(self, name):
if self.host == '127.0.0.1':
# base_url = 'unix://var/run/docker.sock'
dockercommand = "docker attach %s" % name
os.system(dockercommand)
# d = docker.DockerClient(base_url=base_url)
# containers = [container.id for container in d.containers.list() if container.name == name]
# if containers:
# for container in containers:
# container.attach()
else:
dockercommand = "docker attach %s" % name
command = "ssh -t -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def list_containers(self):
containers = []
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
# containers = [container.name for container in d.containers.list()]
for container in d.containers.list(all=True):
name = container.name
state = container.status
state = state.split(' ')[0]
source = container.attrs['Config']['Image']
labels = container.attrs['Config']['Labels']
if 'plan' in labels:
plan = labels['plan']
else:
plan = ''
command = container.attrs['Config']['Cmd']
if command is None:
command = ''
else:
command = command[0]
ports = container.attrs['NetworkSettings']['Ports']
if ports:
portinfo = []
for port in ports:
if ports[port] is None:
newport = port
else:
hostport = ports[port][0]['HostPort']
hostip = ports[port][0]['HostIp']
newport = "%s:%s->%s" % (hostip, hostport, port)
portinfo.append(newport)
portinfo = ','.join(portinfo)
else:
portinfo = ''
containers.append([name, state, source, plan, command, portinfo])
else:
containers = []
# dockercommand = "docker ps --format '{{.Names}}'"
dockercommand = "docker ps -a --format \"'{{.Names}}?{{.Status}}?{{.Image}}?{{.Command}}?{{.Ports}}?{{.Label \\\"plan\\\"}}'\""
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
results = os.popen(command).readlines()
for container in results:
# containers.append(container.strip())
name, state, source, command, ports, plan = container.split('?')
if state.startswith('Up'):
state = 'up'
else:
state = 'down'
# labels = {i.split('=')[0]: i.split('=')[1] for i in labels.split(',')}
# if 'plan' in labels:
# plan = labels['plan']
# else:
# plan = ''
command = command.strip().replace('"', '')
containers.append([name, state, source, plan, command, ports])
return containers
def exists_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container.id for container in d.containers.list(all=True) if container.name == name]
if containers:
return True
else:
dockercommand = "docker ps -a --format '{{.Names}}'"
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
results = os.popen(command).readlines()
for container in results:
containername = container.strip()
if containername == name:
return True
return False
def _get_bridge(self, name):
conn = self.conn
bridges = [interface.name() for interface in conn.listAllInterfaces()]
if name in bridges:
return name
try:
net = self.conn.networkLookupByName(name)
except:
return None
netxml = net.XMLDesc(0)
root = ET.fromstring(netxml)
bridge = root.getiterator('bridge')
if bridge:
attributes = bridge[0].attrib
bridge = attributes.get('name')
return bridge
refactored most stuff to ease command . move kcli create to kcli vm in particular!!! created a kcli container command and applied some container fix when running locally with the API
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interact with a local/remote libvirt daemon
"""
from defaults import TEMPLATES
import docker
from distutils.spawn import find_executable
from iptools import IpRange
from netaddr import IPAddress, IPNetwork
from libvirt import open as libvirtopen
import os
import socket
import string
import xml.etree.ElementTree as ET
__version__ = "4.1"
KB = 1024 * 1024
MB = 1024 * KB
guestrhel532 = "rhel_5"
guestrhel564 = "rhel_5x64"
guestrhel632 = "rhel_6"
guestrhel664 = "rhel_6x64"
guestrhel764 = "rhel_7x64"
guestother = "other"
guestotherlinux = "other_linux"
guestwindowsxp = "windows_xp"
guestwindows7 = "windows_7"
guestwindows764 = "windows_7x64"
guestwindows2003 = "windows_2003"
guestwindows200364 = "windows_2003x64"
guestwindows2008 = "windows_2008"
guestwindows200864 = "windows_2008x64"
class Kvirt:
def __init__(self, host='127.0.0.1', port=None, user='root', protocol='ssh', url=None):
if url is None:
if host == '127.0.0.1' or host == 'localhost':
url = "qemu:///system"
elif protocol == 'ssh':
url = "qemu+%s://%s@%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, user, host)
elif user and port:
url = "qemu+%s://%s@%s:%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, user, host, port)
elif port:
url = "qemu+%s://%s:%s/system?socket=/var/run/libvirt/libvirt-sock" % (protocol, host, port)
else:
url = "qemu:///system"
try:
self.conn = libvirtopen(url)
except Exception:
self.conn = None
self.host = host
self.user = user
self.port = port
self.protocol = protocol
if self.protocol == 'ssh' and port is None:
self.port = '22'
def close(self):
conn = self.conn
conn.close()
self.conn = None
def exists(self, name):
conn = self.conn
for vm in conn.listAllDomains():
if vm.name() == name:
return True
return False
def net_exists(self, name):
conn = self.conn
try:
conn.networkLookupByName(name)
return True
except:
return False
def disk_exists(self, pool, name):
conn = self.conn
try:
storage = conn.storagePoolLookupByName(pool)
storage.refresh()
for stor in sorted(storage.listVolumes()):
if stor == name:
return True
except:
return False
def create(self, name, virttype='kvm', title='', description='kvirt', numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, start=True, keys=None, cmds=None, ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None):
default_diskinterface = diskinterface
default_diskthin = diskthin
default_disksize = disksize
default_pool = pool
conn = self.conn
try:
default_storagepool = conn.storagePoolLookupByName(default_pool)
except:
return {'result': 'failure', 'reason': "Pool %s not found" % default_pool}
default_poolxml = default_storagepool.XMLDesc(0)
root = ET.fromstring(default_poolxml)
default_pooltype = root.getiterator('pool')[0].get('type')
default_poolpath = None
for element in root.getiterator('path'):
default_poolpath = element.text
break
if vnc:
display = 'vnc'
else:
display = 'spice'
volumes = {}
volumespaths = {}
for p in conn.listStoragePools():
poo = conn.storagePoolLookupByName(p)
poo.refresh(0)
for vol in poo.listAllVolumes():
volumes[vol.name()] = {'pool': poo, 'object': vol}
volumespaths[vol.path()] = {'pool': poo, 'object': vol}
networks = []
bridges = []
for net in conn.listNetworks():
networks.append(net)
for net in conn.listInterfaces():
if net != 'lo':
bridges.append(net)
machine = 'pc'
sysinfo = "<smbios mode='sysinfo'/>"
disksxml = ''
volsxml = {}
for index, disk in enumerate(disks):
if disk is None:
disksize = default_disksize
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
diskpooltype = default_pooltype
diskpoolpath = default_poolpath
elif isinstance(disk, int):
disksize = disk
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
diskpooltype = default_pooltype
diskpoolpath = default_poolpath
elif isinstance(disk, dict):
disksize = disk.get('size', default_disksize)
diskthin = disk.get('thin', default_diskthin)
diskinterface = disk.get('interface', default_diskinterface)
diskpool = disk.get('pool', default_pool)
try:
storagediskpool = conn.storagePoolLookupByName(diskpool)
except:
return {'result': 'failure', 'reason': "Pool %s not found" % diskpool}
diskpoolxml = storagediskpool.XMLDesc(0)
root = ET.fromstring(diskpoolxml)
diskpooltype = root.getiterator('pool')[0].get('type')
diskpoolpath = None
for element in root.getiterator('path'):
diskpoolpath = element.text
break
else:
return {'result': 'failure', 'reason': "Invalid disk entry"}
letter = chr(index + ord('a'))
diskdev, diskbus = 'vd%s' % letter, 'virtio'
if diskinterface != 'virtio':
diskdev, diskbus = 'hd%s' % letter, 'ide'
diskformat = 'qcow2'
if not diskthin:
diskformat = 'raw'
storagename = "%s_%d.img" % (name, index + 1)
diskpath = "%s/%s" % (diskpoolpath, storagename)
if template is not None and index == 0:
try:
default_storagepool.refresh(0)
if '/' in template:
backingvolume = volumespaths[template]['object']
else:
backingvolume = volumes[template]['object']
backingxml = backingvolume.XMLDesc(0)
root = ET.fromstring(backingxml)
except:
return {'result': 'failure', 'reason': "Invalid template %s" % template}
backing = backingvolume.path()
if '/dev' in backing and diskpooltype == 'dir':
return {'result': 'failure', 'reason': "lvm template can not be used with a dir pool.Leaving..."}
if '/dev' not in backing and diskpooltype == 'logical':
return {'result': 'failure', 'reason': "file template can not be used with a lvm pool.Leaving..."}
backingxml = """<backingStore type='file' index='1'>
<format type='qcow2'/>
<source file='%s'/>
<backingStore/>
</backingStore>""" % backing
else:
backing = None
backingxml = '<backingStore/>'
volxml = self._xmlvolume(path=diskpath, size=disksize, pooltype=diskpooltype, backing=backing, diskformat=diskformat)
if diskpool in volsxml:
volsxml[diskpool].append(volxml)
else:
volsxml[diskpool] = [volxml]
if diskpooltype == 'logical':
diskformat = 'raw'
disksxml = """%s<disk type='file' device='disk'>
<driver name='qemu' type='%s'/>
<source file='%s'/>
%s
<target dev='%s' bus='%s'/>
</disk>""" % (disksxml, diskformat, diskpath, backingxml, diskdev, diskbus)
netxml = ''
version = ''
for index, net in enumerate(nets):
macxml = ''
if isinstance(net, str):
netname = net
elif isinstance(net, dict) and 'name' in net:
netname = net['name']
ip = None
if ips and len(ips) > index and ips[index] is not None:
ip = ips[index]
nets[index]['ip'] = ip
elif 'ip' in nets[index]:
ip = nets[index]['ip']
if 'mac' in nets[index]:
mac = nets[index]['mac']
macxml = "<mac address='%s'/>" % mac
if index == 0 and ip is not None:
version = "<entry name='version'>%s</entry>" % ip
if netname in bridges:
sourcenet = 'bridge'
elif netname in networks:
sourcenet = 'network'
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
netxml = """%s
<interface type='%s'>
%s
<source %s='%s'/>
<model type='virtio'/>
</interface>""" % (netxml, sourcenet, macxml, sourcenet, netname)
version = """<sysinfo type='smbios'>
<system>
%s
<entry name='product'>%s</entry>
</system>
</sysinfo>""" % (version, title)
if iso is None:
if cloudinit:
iso = "%s/%s.iso" % (default_poolpath, name)
else:
iso = ''
else:
try:
if os.path.isabs(iso):
shortiso = os.path.basename(iso)
else:
shortiso = iso
isovolume = volumes[shortiso]['object']
iso = isovolume.path()
# iso = "%s/%s" % (default_poolpath, iso)
# iso = "%s/%s" % (isopath, iso)
except:
return {'result': 'failure', 'reason': "Invalid iso %s" % iso}
isoxml = """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='hdc' bus='ide'/>
<readonly/>
</disk>""" % (iso)
displayxml = """<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='%s' port='-1' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<memballoon model='virtio'/>""" % (display)
if nested and virttype == 'kvm':
nestedxml = """<cpu match='exact'>
<model>Westmere</model>
<feature policy='require' name='vmx'/>
</cpu>"""
else:
nestedxml = ""
if self.host in ['localhost', '127.0.0.1']:
serialxml = """<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>"""
else:
serialxml = """ <serial type="tcp">
<source mode="bind" host="127.0.0.1" service="%s"/>
<protocol type="telnet"/>
<target port="0"/>
</serial>""" % self._get_free_port()
vmxml = """<domain type='%s'>
<name>%s</name>
<description>%s</description>
%s
<memory unit='MiB'>%d</memory>
<vcpu>%d</vcpu>
<os>
<type arch='x86_64' machine='%s'>hvm</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
<bootmenu enable='yes'/>
%s
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
%s
%s
%s
%s
%s
</devices>
%s
</domain>""" % (virttype, name, description, version, memory, numcpus, machine, sysinfo, disksxml, netxml, isoxml, displayxml, serialxml, nestedxml)
for pool in volsxml:
storagepool = conn.storagePoolLookupByName(pool)
storagepool.refresh(0)
for volxml in volsxml[pool]:
storagepool.createXML(volxml, 0)
conn.defineXML(vmxml)
vm = conn.lookupByName(name)
vm.setAutostart(1)
if cloudinit:
self._cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip)
self._uploadimage(name, pool=default_storagepool)
if reserveip:
xml = vm.XMLDesc(0)
vmxml = ET.fromstring(xml)
macs = []
for element in vmxml.getiterator('interface'):
mac = element.find('mac').get('address')
macs.append(mac)
self._reserve_ip(name, nets, macs)
if start:
vm.create()
return {'result': 'success'}
def start(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
vm = conn.lookupByName(name)
if status[vm.isActive()] == "up":
return {'result': 'success'}
else:
vm.create()
return {'result': 'success'}
except:
return {'result': 'failure', 'reason': "VM %s not found" % name}
def stop(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
if status[vm.isActive()] == "down":
return {'result': 'success'}
else:
vm.destroy()
return {'result': 'success'}
except:
return {'result': 'failure', 'reason': "VM %s not found" % name}
def restart(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
vm = conn.lookupByName(name)
if status[vm.isActive()] == "down":
return {'result': 'success'}
else:
vm.restart()
return {'result': 'success'}
def report(self):
conn = self.conn
hostname = conn.getHostname()
cpus = conn.getCPUMap()[0]
memory = conn.getInfo()[1]
print("Host:%s Cpu:%s Memory:%sMB\n" % (hostname, cpus, memory))
for pool in conn.listStoragePools():
poolname = pool
pool = conn.storagePoolLookupByName(pool)
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
pooltype = root.getiterator('pool')[0].get('type')
if pooltype == 'dir':
poolpath = root.getiterator('path')[0].text
else:
poolpath = root.getiterator('device')[0].get('path')
s = pool.info()
used = "%.2f" % (float(s[2]) / 1024 / 1024 / 1024)
available = "%.2f" % (float(s[3]) / 1024 / 1024 / 1024)
# Type,Status, Total space in Gb, Available space in Gb
used = float(used)
available = float(available)
print("Storage:%s Type:%s Path:%s Used space:%sGB Available space:%sGB" % (poolname, pooltype, poolpath, used, available))
print
for interface in conn.listAllInterfaces():
interfacename = interface.name()
if interfacename == 'lo':
continue
print("Network:%s Type:bridged" % (interfacename))
for network in conn.listAllNetworks():
networkname = network.name()
netxml = network.XMLDesc(0)
cidr = 'N/A'
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
ip = IPNetwork('%s/%s' % (firstip, netmask))
cidr = ip.cidr
dhcp = root.getiterator('dhcp')
if dhcp:
dhcp = True
else:
dhcp = False
print("Network:%s Type:routed Cidr:%s Dhcp:%s" % (networkname, cidr, dhcp))
def status(self, name):
conn = self.conn
status = {0: 'down', 1: 'up'}
try:
vm = conn.lookupByName(name)
except:
return None
return status[vm.isActive()]
def list(self):
vms = []
leases = {}
conn = self.conn
for network in conn.listAllNetworks():
for lease in network.DHCPLeases():
ip = lease['ipaddr']
mac = lease['mac']
leases[mac] = ip
status = {0: 'down', 1: 'up'}
for vm in conn.listAllDomains(0):
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
description = root.getiterator('description')
if description:
description = description[0].text
else:
description = ''
name = vm.name()
state = status[vm.isActive()]
ips = []
title = ''
for element in root.getiterator('interface'):
mac = element.find('mac').get('address')
if vm.isActive():
if mac in leases:
ips.append(leases[mac])
if ips:
ip = ips[-1]
else:
ip = ''
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
ip = entry.text
if attributes['name'] == 'product':
title = entry.text
source = ''
for element in root.getiterator('backingStore'):
s = element.find('source')
if s is not None:
source = os.path.basename(s.get('file'))
break
vms.append([name, state, ip, source, description, title])
return vms
def console(self, name):
conn = self.conn
vm = conn.lookupByName(name)
if not vm.isActive():
print("VM down")
return
else:
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('graphics'):
attributes = element.attrib
if attributes['listen'] == '127.0.0.1':
host = '127.0.0.1'
else:
host = self.host
protocol = attributes['type']
port = attributes['port']
url = "%s://%s:%s" % (protocol, host, port)
os.popen("remote-viewer %s &" % url)
def serialconsole(self, name):
conn = self.conn
vm = conn.lookupByName(name)
if not vm.isActive():
print("VM down")
return
else:
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
serial = root.getiterator('serial')
if not serial:
print("No serial Console found. Leaving...")
return
elif self.host in ['localhost', '127.0.0.1']:
os.system('virsh console %s' % name)
else:
for element in serial:
serialport = element.find('source').get('service')
if serialport:
if self.protocol != 'ssh':
print("Remote serial Console requires using ssh . Leaving...")
return
else:
serialcommand = "ssh -p %s %s@%s nc 127.0.0.1 %s" % (self.port, self.user, self.host, serialport)
os.system(serialcommand)
def info(self, name):
# ips = []
leases = {}
conn = self.conn
for network in conn.listAllNetworks():
for lease in network.DHCPLeases():
ip = lease['ipaddr']
mac = lease['mac']
leases[mac] = ip
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
state = 'down'
memory = root.getiterator('memory')[0]
unit = memory.attrib['unit']
memory = memory.text
if unit == 'KiB':
memory = float(memory) / 1024
memory = int(memory)
numcpus = root.getiterator('vcpu')[0]
numcpus = numcpus.text
if vm.isActive():
state = 'up'
print("name: %s" % name)
print("status: %s" % state)
description = root.getiterator('description')
if description:
description = description[0].text
else:
description = ''
title = None
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'product':
title = entry.text
print("description: %s" % description)
if title is not None:
print("profile: %s" % title)
print("cpus: %s" % numcpus)
print("memory: %sMB" % memory)
nicnumber = 0
for element in root.getiterator('interface'):
networktype = element.get('type')
device = "eth%s" % nicnumber
mac = element.find('mac').get('address')
if networktype == 'bridge':
bridge = element.find('source').get('bridge')
print("net interfaces: %s mac: %s net: %s type: bridge" % (device, mac, bridge))
else:
network = element.find('source').get('network')
print("net interfaces:%s mac: %s net: %s type: routed" % (device, mac, network))
network = conn.networkLookupByName(network)
if vm.isActive():
if mac in leases:
# ips.append(leases[mac])
print("ip: %s" % leases[mac])
nicnumber = nicnumber + 1
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
ip = entry.text
print("ip: %s" % ip)
break
for element in root.getiterator('disk'):
disktype = element.get('device')
if disktype == 'cdrom':
continue
device = element.find('target').get('dev')
diskformat = 'file'
drivertype = element.find('driver').get('type')
path = element.find('source').get('file')
volume = conn.storageVolLookupByPath(path)
disksize = int(float(volume.info()[1]) / 1024 / 1024 / 1024)
print("diskname: %s disksize: %sGB diskformat: %s type: %s path: %s" % (device, disksize, diskformat, drivertype, path))
def volumes(self, iso=False):
isos = []
templates = []
default_templates = [os.path.basename(t) for t in TEMPLATES.values()]
conn = self.conn
for storage in conn.listStoragePools():
storage = conn.storagePoolLookupByName(storage)
storage.refresh(0)
storagexml = storage.XMLDesc(0)
root = ET.fromstring(storagexml)
for element in root.getiterator('path'):
storagepath = element.text
break
for volume in storage.listVolumes():
if volume.endswith('iso'):
isos.append("%s/%s" % (storagepath, volume))
elif volume.endswith('qcow2') or volume in default_templates:
templates.append("%s/%s" % (storagepath, volume))
if iso:
return isos
else:
return templates
def delete(self, name):
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
return
status = {0: 'down', 1: 'up'}
vmxml = vm.XMLDesc(0)
root = ET.fromstring(vmxml)
disks = []
for element in root.getiterator('disk'):
source = element.find('source')
if source is not None:
imagefile = element.find('source').get('file')
if imagefile == "%s.iso" % name or name in imagefile:
disks.append(imagefile)
else:
continue
if status[vm.isActive()] != "down":
vm.destroy()
vm.undefine()
for storage in conn.listStoragePools():
deleted = False
storage = conn.storagePoolLookupByName(storage)
storage.refresh(0)
for stor in storage.listVolumes():
for disk in disks:
if stor in disk:
try:
volume = storage.storageVolLookupByName(stor)
except:
continue
volume.delete(0)
deleted = True
if deleted:
storage.refresh(0)
for element in root.getiterator('interface'):
mac = element.find('mac').get('address')
networktype = element.get('type')
if networktype != 'bridge':
network = element.find('source').get('network')
network = conn.networkLookupByName(network)
netxml = network.XMLDesc(0)
root = ET.fromstring(netxml)
for host in root.getiterator('host'):
hostmac = host.get('mac')
ip = host.get('ip')
name = host.get('name')
if hostmac == mac:
hostentry = "<host mac='%s' name='%s' ip='%s'/>" % (mac, name, ip)
network.update(2, 4, 0, hostentry, 1)
def _xmldisk(self, diskpath, diskdev, diskbus='virtio', diskformat='qcow2', shareable=False):
if shareable:
sharexml = '<shareable/>'
else:
sharexml = ''
diskxml = """<disk type='file' device='disk'>
<driver name='qemu' type='%s' cache='none'/>
<source file='%s'/>
<target bus='%s' dev='%s'/>
%s
</disk>""" % (diskformat, diskpath, diskbus, diskdev, sharexml)
return diskxml
def _xmlvolume(self, path, size, pooltype='file', backing=None, diskformat='qcow2'):
size = int(size) * MB
if int(size) == 0:
size = 500 * 1024
name = os.path.basename(path)
if pooltype == 'block':
volume = """<volume type='block'>
<name>%s</name>
<capacity unit="bytes">%d</capacity>
<target>
<path>%s</path>
<compat>1.1</compat>
</target>
</volume>""" % (name, size, path)
return volume
if backing is not None:
backingstore = """
<backingStore>
<path>%s</path>
<format type='%s'/>
</backingStore>""" % (backing, diskformat)
else:
backingstore = "<backingStore/>"
volume = """
<volume type='file'>
<name>%s</name>
<capacity unit="bytes">%d</capacity>
<target>
<path>%s</path>
<format type='%s'/>
<permissions>
<mode>0644</mode>
</permissions>
<compat>1.1</compat>
</target>
%s
</volume>""" % (name, size, path, diskformat, backingstore)
return volume
def clone(self, old, new, full=False, start=False):
conn = self.conn
oldvm = conn.lookupByName(old)
oldxml = oldvm.XMLDesc(0)
tree = ET.fromstring(oldxml)
uuid = tree.getiterator('uuid')[0]
tree.remove(uuid)
for vmname in tree.getiterator('name'):
vmname.text = new
firstdisk = True
for disk in tree.getiterator('disk'):
if firstdisk or full:
source = disk.find('source')
oldpath = source.get('file')
backingstore = disk.find('backingStore')
backing = None
for b in backingstore.getiterator():
backingstoresource = b.find('source')
if backingstoresource is not None:
backing = backingstoresource.get('file')
newpath = oldpath.replace(old, new)
source.set('file', newpath)
oldvolume = conn.storageVolLookupByPath(oldpath)
oldinfo = oldvolume.info()
oldvolumesize = (float(oldinfo[1]) / 1024 / 1024 / 1024)
newvolumexml = self._xmlvolume(newpath, oldvolumesize, backing)
pool = oldvolume.storagePoolLookupByVolume()
pool.createXMLFrom(newvolumexml, oldvolume, 0)
firstdisk = False
else:
devices = tree.getiterator('devices')[0]
devices.remove(disk)
for interface in tree.getiterator('interface'):
mac = interface.find('mac')
interface.remove(mac)
if self.host not in ['127.0.0.1', 'localhost']:
for serial in tree.getiterator('serial'):
source = serial.find('source')
source.set('service', str(self._get_free_port()))
newxml = ET.tostring(tree)
conn.defineXML(newxml)
vm = conn.lookupByName(new)
if start:
vm.setAutostart(1)
vm.create()
def _reserve_ip(self, name, nets, macs):
conn = self.conn
for index, net in enumerate(nets):
if not isinstance(net, dict):
continue
ip = net.get('ip')
network = net.get('name')
mac = macs[index]
if ip is None or network is None:
continue
network = conn.networkLookupByName(network)
oldnetxml = network.XMLDesc()
root = ET.fromstring(oldnetxml)
ipentry = root.getiterator('ip')
if ipentry:
attributes = ipentry[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
netip = IPNetwork('%s/%s' % (firstip, netmask))
dhcp = root.getiterator('dhcp')
if not dhcp:
continue
if not IPAddress(ip) in netip:
continue
network.update(4, 4, 0, '<host mac="%s" name="%s" ip="%s" />' % (mac, name, ip), 1)
def _cloudinit(self, name, keys=None, cmds=None, nets=[], gateway=None, dns=None, domain=None, reserveip=False):
default_gateway = gateway
with open('/tmp/meta-data', 'w') as metadatafile:
if domain is not None:
localhostname = "%s.%s" % (name, domain)
else:
localhostname = name
metadatafile.write('instance-id: XXX\nlocal-hostname: %s\n' % localhostname)
metadata = ''
if nets:
for index, net in enumerate(nets):
if isinstance(net, str):
if index == 0:
continue
nicname = "eth%d" % index
ip = None
netmask = None
elif isinstance(net, dict):
nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = net.get('mask')
metadata += " auto %s\n" % nicname
if ip is not None and netmask is not None and not reserveip:
metadata += " iface %s inet static\n" % nicname
metadata += " address %s\n" % ip
metadata += " netmask %s\n" % netmask
gateway = net.get('gateway')
if index == 0 and default_gateway is not None:
metadata += " gateway %s\n" % default_gateway
elif gateway is not None:
metadata += " gateway %s\n" % gateway
dns = net.get('dns')
if dns is not None:
metadata += " dns-nameservers %s\n" % dns
domain = net.get('domain')
if domain is not None:
metadatafile.write(" dns-search %s\n" % domain)
else:
metadata += " iface %s inet dhcp\n" % nicname
if metadata:
metadatafile.write("network-interfaces: |\n")
metadatafile.write(metadata)
# if dns is not None:
# metadatafile.write(" dns-nameservers %s\n" % dns)
# if domain is not None:
# metadatafile.write(" dns-search %s\n" % domain)
with open('/tmp/user-data', 'w') as userdata:
userdata.write('#cloud-config\nhostname: %s\n' % name)
if domain is not None:
userdata.write("fqdn: %s.%s\n" % (name, domain))
if keys is not None:
userdata.write("ssh_authorized_keys:\n")
for key in keys:
userdata.write("- %s\n" % key)
elif os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_rsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("ssh_authorized_keys:\n")
userdata.write("- %s\n" % key)
elif os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']):
publickeyfile = "%s/.ssh/id_dsa.pub" % os.environ['HOME']
with open(publickeyfile, 'r') as ssh:
key = ssh.read().rstrip()
userdata.write("ssh_authorized_keys:\n")
userdata.write("- %s\n" % key)
else:
print("neither id_rsa.pub or id_dsa public keys found in your .ssh directory, you might have trouble accessing the vm")
if cmds is not None:
userdata.write("runcmd:\n")
for cmd in cmds:
if cmd.startswith('#'):
continue
else:
userdata.write("- %s\n" % cmd)
isocmd = 'mkisofs'
if find_executable('genisoimage') is not None:
isocmd = 'genisoimage'
os.system("%s --quiet -o /tmp/%s.iso --volid cidata --joliet --rock /tmp/user-data /tmp/meta-data" % (isocmd, name))
def handler(self, stream, data, file_):
return file_.read(data)
def _uploadimage(self, name, pool='default', origin='/tmp', suffix='.iso'):
name = "%s%s" % (name, suffix)
conn = self.conn
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
for element in root.getiterator('path'):
poolpath = element.text
break
imagepath = "%s/%s" % (poolpath, name)
imagexml = self._xmlvolume(path=imagepath, size=0, diskformat='raw')
pool.createXML(imagexml, 0)
imagevolume = conn.storageVolLookupByPath(imagepath)
stream = conn.newStream(0)
imagevolume.upload(stream, 0, 0)
with open("%s/%s" % (origin, name)) as ori:
stream.sendAll(self.handler, ori)
stream.finish()
def update_ip(self, name, ip):
conn = self.conn
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
if not vm:
print("VM %s not found" % name)
if vm.isActive() == 1:
print("Machine up. Change will only appear upon next reboot")
osentry = root.getiterator('os')[0]
smbios = osentry.find('smbios')
if smbios is None:
newsmbios = ET.Element("smbios", mode="sysinfo")
osentry.append(newsmbios)
sysinfo = root.getiterator('sysinfo')
system = root.getiterator('system')
if not sysinfo:
sysinfo = ET.Element("sysinfo", type="smbios")
root.append(sysinfo)
sysinfo = root.getiterator('sysinfo')[0]
if not system:
system = ET.Element("system")
sysinfo.append(system)
system = root.getiterator('system')[0]
versionfound = False
for entry in root.getiterator('entry'):
attributes = entry.attrib
if attributes['name'] == 'version':
entry.text = ip
versionfound = True
if not versionfound:
version = ET.Element("entry", name="version")
version.text = ip
system.append(version)
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_memory(self, name, memory):
conn = self.conn
memory = str(int(memory) * 1024)
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
memorynode = root.getiterator('memory')[0]
memorynode.text = memory
currentmemory = root.getiterator('currentMemory')[0]
currentmemory.text = memory
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_cpu(self, name, numcpus):
conn = self.conn
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
cpunode = root.getiterator('vcpu')[0]
cpunode.text = numcpus
newxml = ET.tostring(root)
conn.defineXML(newxml)
def update_start(self, name, start=True):
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return {'result': 'failure', 'reason': "VM %s not found" % name}
if start:
vm.setAutostart(1)
else:
vm.setAutostart(0)
return {'result': 'success'}
def create_disk(self, name, size, pool=None, thin=True, template=None):
conn = self.conn
diskformat = 'qcow2'
if size < 1:
print("Incorrect size.Leaving...")
return
if not thin:
diskformat = 'raw'
if pool is not None:
pool = conn.storagePoolLookupByName(pool)
poolxml = pool.XMLDesc(0)
poolroot = ET.fromstring(poolxml)
pooltype = poolroot.getiterator('pool')[0].get('type')
for element in poolroot.getiterator('path'):
poolpath = element.text
break
else:
print("Pool not found. Leaving....")
return
if template is not None:
volumes = {}
for p in conn.listStoragePools():
poo = conn.storagePoolLookupByName(p)
for vol in poo.listAllVolumes():
volumes[vol.name()] = vol.path()
if template not in volumes and template not in volumes.values():
print("Invalid template %s.Leaving..." % template)
if template in volumes:
template = volumes[template]
pool.refresh(0)
diskpath = "%s/%s" % (poolpath, name)
if pooltype == 'logical':
diskformat = 'raw'
volxml = self._xmlvolume(path=diskpath, size=size, pooltype=pooltype,
diskformat=diskformat, backing=template)
pool.createXML(volxml, 0)
return diskpath
# def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False):
# conn = self.conn
# diskformat = 'qcow2'
# diskbus = 'virtio'
# if size < 1:
# print("Incorrect size.Leaving...")
# return
# if not thin:
# diskformat = 'raw'
# try:
# vm = conn.lookupByName(name)
# xml = vm.XMLDesc(0)
# root = ET.fromstring(xml)
# except:
# print("VM %s not found" % name)
# return
# currentdisk = 0
# for element in root.getiterator('disk'):
# disktype = element.get('device')
# if disktype == 'cdrom':
# continue
# currentdisk = currentdisk + 1
# diskindex = currentdisk + 1
# diskdev = "vd%s" % string.ascii_lowercase[currentdisk]
# if pool is not None:
# pool = conn.storagePoolLookupByName(pool)
# poolxml = pool.XMLDesc(0)
# poolroot = ET.fromstring(poolxml)
# pooltype = poolroot.getiterator('pool')[0].get('type')
# for element in poolroot.getiterator('path'):
# poolpath = element.text
# break
# else:
# print("Pool not found. Leaving....")
# return
# if template is not None:
# volumes = {}
# for p in conn.listStoragePools():
# poo = conn.storagePoolLookupByName(p)
# for vol in poo.listAllVolumes():
# volumes[vol.name()] = vol.path()
# if template not in volumes and template not in volumes.values():
# print("Invalid template %s.Leaving..." % template)
# if template in volumes:
# template = volumes[template]
# pool.refresh(0)
# storagename = "%s_%d.img" % (name, diskindex)
# diskpath = "%s/%s" % (poolpath, storagename)
# volxml = self._xmlvolume(path=diskpath, size=size, pooltype=pooltype,
# diskformat=diskformat, backing=template)
# if pooltype == 'logical':
# diskformat = 'raw'
# diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat, shareable=shareable)
# pool.createXML(volxml, 0)
# vm.attachDevice(diskxml)
# vm = conn.lookupByName(name)
# vmxml = vm.XMLDesc(0)
# conn.defineXML(vmxml)
def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False, existing=None):
conn = self.conn
diskformat = 'qcow2'
diskbus = 'virtio'
if size < 1:
print("Incorrect size.Leaving...")
return
if not thin:
diskformat = 'raw'
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
currentdisk = 0
for element in root.getiterator('disk'):
disktype = element.get('device')
if disktype == 'cdrom':
continue
currentdisk = currentdisk + 1
diskindex = currentdisk + 1
diskdev = "vd%s" % string.ascii_lowercase[currentdisk]
if existing is None:
storagename = "%s_%d.img" % (name, diskindex)
diskpath = self.create_disk(name=storagename, size=size, pool=pool, thin=thin, template=template)
else:
diskpath = existing
diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat, shareable=shareable)
vm.attachDevice(diskxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def delete_disk(self, name, diskname):
conn = self.conn
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
for element in root.getiterator('disk'):
disktype = element.get('device')
diskdev = element.find('target').get('dev')
diskbus = element.find('target').get('bus')
diskformat = element.find('driver').get('type')
if disktype == 'cdrom':
continue
diskpath = element.find('source').get('file')
volume = self.conn.storageVolLookupByPath(diskpath)
if volume.name() == diskname or volume.path() == diskname:
diskxml = self._xmldisk(diskpath=diskpath, diskdev=diskdev, diskbus=diskbus, diskformat=diskformat)
vm.detachDevice(diskxml)
volume.delete(0)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
return
print("Disk %s not found in %s" % (diskname, name))
def list_disks(self):
volumes = {}
for p in self.conn.listStoragePools():
poo = self.conn.storagePoolLookupByName(p)
for volume in poo.listAllVolumes():
volumes[volume.name()] = {'pool': poo.name(), 'path': volume.path()}
return volumes
def add_nic(self, name, network):
conn = self.conn
networks = {}
for interface in conn.listAllInterfaces():
networks[interface.name()] = 'bridge'
for net in conn.listAllNetworks():
networks[net.name()] = 'network'
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
if network not in networks:
print("Network %s not found" % network)
return
else:
networktype = networks[network]
source = "<source %s='%s'/>" % (networktype, network)
nicxml = """<interface type='%s'>
%s
<model type='virtio'/>
</interface>""" % (networktype, source)
vm.attachDevice(nicxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def delete_nic(self, name, interface):
conn = self.conn
networks = {}
nicnumber = 0
for n in conn.listAllInterfaces():
networks[n.name()] = 'bridge'
for n in conn.listAllNetworks():
networks[n.name()] = 'network'
try:
vm = conn.lookupByName(name)
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
except:
print("VM %s not found" % name)
return
for element in root.getiterator('interface'):
device = "eth%s" % nicnumber
if device == interface:
mac = element.find('mac').get('address')
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
source = "<source %s='%s'/>" % (networktype, network)
else:
network = element.find('source').get('network')
source = "<source %s='%s'/>" % (networktype, network)
break
else:
nicnumber += 1
nicxml = """<interface type='%s'>
<mac address='%s'/>
%s
<model type='virtio'/>
</interface>""" % (networktype, mac, source)
print nicxml
vm.detachDevice(nicxml)
vm = conn.lookupByName(name)
vmxml = vm.XMLDesc(0)
conn.defineXML(vmxml)
def ssh(self, name, local=None, remote=None):
ubuntus = ['utopic', 'vivid', 'wily', 'xenial', 'yakkety']
user = 'root'
conn = self.conn
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
if vm.isActive() != 1:
print("Machine down. Cannot ssh...")
return
vm = [v for v in self.list() if v[0] == name][0]
template = vm[3]
if template != '':
if 'centos' in template.lower():
user = 'centos'
elif 'cirros' in template.lower():
user = 'cirros'
elif [x for x in ubuntus if x in template.lower()]:
user = 'ubuntu'
elif 'fedora' in template.lower():
user = 'fedora'
elif 'rhel' in template.lower():
user = 'cloud-user'
elif 'debian' in template.lower():
user = 'debian'
elif 'arch' in template.lower():
user = 'arch'
ip = vm[2]
if ip == '':
print("No ip found. Cannot ssh...")
else:
sshcommand = "%s@%s" % (user, ip)
if local is not None:
sshcommand = "-L %s %s" % (local, sshcommand)
if remote is not None:
sshcommand = "-R %s %s" % (remote, sshcommand)
sshcommand = "ssh %s" % sshcommand
os.system(sshcommand)
def _get_free_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def create_pool(self, name, poolpath, pooltype='dir', user='qemu'):
conn = self.conn
for pool in conn.listStoragePools():
if pool == name:
print("Pool %s already there.Leaving..." % name)
return
if pooltype == 'dir':
if self.host == 'localhost' or self.host == '127.0.0.1':
if not os.path.exists(poolpath):
os.makedirs(poolpath)
elif self.protocol == 'ssh':
cmd1 = 'ssh -p %s %s@%s "test -d %s || mkdir %s"' % (self.port, self.user, self.host, poolpath, poolpath)
cmd2 = 'ssh %s@%s "chown %s %s"' % (self.user, self.host, user, poolpath)
os.system(cmd1)
os.system(cmd2)
else:
print("Make sur %s directory exists on hypervisor" % name)
poolxml = """<pool type='dir'>
<name>%s</name>
<source>
</source>
<target>
<path>%s</path>
</target>
</pool>""" % (name, poolpath)
elif pooltype == 'logical':
poolxml = """<pool type='logical'>
<name>%s</name>
<source>
<device path='%s'/>
<name>%s</name>
<format type='lvm2'/>
</source>
<target>
<path>/dev/%s</path>
</target>
</pool>""" % (name, poolpath, name, name)
else:
print("Invalid pool type %s.Leaving..." % pooltype)
return
pool = conn.storagePoolDefineXML(poolxml, 0)
pool.setAutostart(True)
if pooltype == 'logical':
pool.build()
pool.create()
def add_image(self, image, pool):
poolname = pool
shortimage = os.path.basename(image)
conn = self.conn
volumes = []
try:
pool = conn.storagePoolLookupByName(pool)
for vol in pool.listAllVolumes():
volumes.append(vol.name())
except:
return {'result': 'failure', 'reason': "Pool %s not found" % poolname}
poolxml = pool.XMLDesc(0)
root = ET.fromstring(poolxml)
pooltype = root.getiterator('pool')[0].get('type')
if pooltype == 'dir':
poolpath = root.getiterator('path')[0].text
else:
poolpath = root.getiterator('device')[0].get('path')
return {'result': 'failure', 'reason': "Upload to a lvm pool not implemented not found"}
if shortimage in volumes:
return {'result': 'failure', 'reason': "Template %s already exists in pool %s" % (shortimage, poolname)}
if self.host == 'localhost' or self.host == '127.0.0.1':
cmd = 'wget -P %s %s' % (poolpath, image)
elif self.protocol == 'ssh':
cmd = 'ssh -p %s %s@%s "wget -P %s %s"' % (self.port, self.user, self.host, poolpath, image)
os.system(cmd)
pool.refresh()
# self._uploadimage(shortimage, pool=pool, suffix='')
return {'result': 'success'}
def create_network(self, name, cidr, dhcp=True, nat=True):
conn = self.conn
networks = self.list_networks()
cidrs = [network['cidr'] for network in networks.values()]
if name in networks:
return {'result': 'failure', 'reason': "Network %s already exists" % name}
try:
range = IpRange(cidr)
except TypeError:
return {'result': 'failure', 'reason': "Invalid Cidr %s" % cidr}
if IPNetwork(cidr) in cidrs:
return {'result': 'failure', 'reason': "Cidr %s already exists" % cidr}
netmask = IPNetwork(cidr).netmask
gateway = range[1]
if dhcp:
start = range[2]
end = range[-2]
dhcpxml = """<dhcp>
<range start='%s' end='%s'/>
</dhcp>""" % (start, end)
else:
dhcpxml = ''
if nat:
natxml = "<forward mode='nat'><nat><port start='1024' end='65535'/></nat></forward>"
else:
natxml = ''
networkxml = """<network><name>%s</name>
%s
<domain name='%s'/>
<ip address='%s' netmask='%s'>
%s
</ip>
</network>""" % (name, natxml, name, gateway, netmask, dhcpxml)
new_net = conn.networkDefineXML(networkxml)
new_net.setAutostart(True)
new_net.create()
return {'result': 'success'}
def delete_network(self, name=None):
conn = self.conn
try:
network = conn.networkLookupByName(name)
except:
return {'result': 'failure', 'reason': "Network %s not found" % name}
machines = self.network_ports(name)
if machines:
machines = ','.join(machines)
return {'result': 'failure', 'reason': "Network %s is beeing used by %s" % (name, machines)}
if network.isActive():
network.destroy()
network.undefine()
return {'result': 'success'}
def list_pools(self):
pools = []
conn = self.conn
for pool in conn.listStoragePools():
pools.append(pool)
return pools
def list_networks(self):
networks = {}
conn = self.conn
for network in conn.listAllNetworks():
networkname = network.name()
netxml = network.XMLDesc(0)
cidr = 'N/A'
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
firstip = attributes.get('address')
netmask = attributes.get('netmask')
ip = IPNetwork('%s/%s' % (firstip, netmask))
cidr = ip.cidr
dhcp = root.getiterator('dhcp')
if dhcp:
dhcp = True
else:
dhcp = False
forward = root.getiterator('forward')
if forward:
attributes = forward[0].attrib
mode = attributes.get('mode')
else:
mode = 'isolated'
networks[networkname] = {'cidr': cidr, 'dhcp': dhcp, 'type': 'routed', 'mode': mode}
for interface in conn.listAllInterfaces():
interfacename = interface.name()
if interfacename == 'lo':
continue
netxml = interface.XMLDesc(0)
root = ET.fromstring(netxml)
ip = root.getiterator('ip')
if ip:
attributes = ip[0].attrib
ip = attributes.get('address')
prefix = attributes.get('prefix')
ip = IPNetwork('%s/%s' % (ip, prefix))
cidr = ip.cidr
else:
cidr = 'N/A'
networks[interfacename] = {'cidr': cidr, 'dhcp': 'N/A', 'type': 'bridged', 'mode': 'N/A'}
return networks
def delete_pool(self, name, full=False):
conn = self.conn
try:
pool = conn.storagePoolLookupByName(name)
except:
print("Pool %s not found. Leaving..." % name)
return
if full:
for vol in pool.listAllVolumes():
vol.delete(0)
if pool.isActive():
pool.destroy()
pool.undefine()
def bootstrap(self, pool=None, poolpath=None, pooltype='dir', nets={}, image=None):
conn = self.conn
volumes = {}
try:
poolname = pool
pool = conn.storagePoolLookupByName(pool)
for vol in pool.listAllVolumes():
volumes[vol.name()] = {'object': vol}
except:
if poolpath is not None:
print("Pool %s not found...Creating it" % pool)
self.create_pool(name=pool, poolpath=poolpath, pooltype=pooltype)
if image is not None and os.path.basename(image) not in volumes:
self.add_image(image, poolname)
networks = []
for net in conn.listNetworks():
networks.append(net)
for net in nets:
if net not in networks:
print("Network %s not found...Creating it" % net)
cidr = nets[net].get('cidr')
dhcp = bool(nets[net].get('dchp', True))
self.create_network(name=net, cidr=cidr, dhcp=dhcp)
def network_ports(self, name):
conn = self.conn
machines = []
for vm in conn.listAllDomains(0):
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('interface'):
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
else:
network = element.find('source').get('network')
if network == name:
machines.append(vm.name())
return machines
def vm_ports(self, name):
conn = self.conn
networks = []
try:
vm = conn.lookupByName(name)
except:
print("VM %s not found" % name)
return
xml = vm.XMLDesc(0)
root = ET.fromstring(xml)
for element in root.getiterator('interface'):
networktype = element.get('type')
if networktype == 'bridge':
network = element.find('source').get('bridge')
else:
network = element.find('source').get('network')
networks.append(network)
return networks
def create_container(self, name, image, nets=None, cmd=None, ports=[], volumes=[], label=None):
# if not nets:
# return
# for i, net in enumerate(nets):
# print net
# if isinstance(net, str):
# netname = net
# elif isinstance(net, dict) and 'name' in net:
# netname = net['name']
# nets[i] = self._get_bridge(netname)
if self.host == '127.0.0.1':
for i, volume in enumerate(volumes):
if isinstance(volume, str):
if len(volume.split(':')) == 2:
origin, destination = volume.split(':')
volumes[i] = {origin: {'bind': destination, 'mode': 'rw'}}
else:
volumes[i] = {volume: {'bind': volume, 'mode': 'rw'}}
elif isinstance(volume, dict):
path = volume.get('path')
origin = volume.get('origin')
destination = volume.get('destination')
mode = volume.get('mode', 'rw')
if origin is None or destination is None:
if path is None:
continue
volumes[i] = {path: {'bind': path, 'mode': mode}}
else:
volumes[i] = {origin: {'bind': destination, 'mode': mode}}
if ports is not None:
ports = {'%s/tcp' % k: k for k in ports}
if label is not None and isinstance(label, str) and len(label.split('=')) == 2:
key, value = label.split('=')
labels = {key: value}
else:
labels = None
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
# d.containers.run(image, name=name, command=cmd, networks=nets, detach=True, ports=ports)
d.containers.run(image, name=name, command=cmd, detach=True, ports=ports, volumes=volumes, stdin_open=True, tty=True, labels=labels)
else:
# netinfo = ''
# for net in nets:
# netinfo = "%s --net=%s" % (netinfo, net)
portinfo = ''
if ports is not None:
for port in ports:
if isinstance(port, int):
oriport = port
destport = port
elif isinstance(port, str):
if len(port.split(':')) == 2:
oriport, destport = port.split(':')
else:
oriport = port
destport = port
elif isinstance(port, dict) and 'origin' in port and 'destination' in port:
oriport = port['origin']
destport = port['destination']
else:
continue
portinfo = "%s -p %s:%s" % (portinfo, oriport, destport)
volumeinfo = ''
if volumes is not None:
for volume in volumes:
if isinstance(volume, str):
if len(volume.split(':')) == 2:
origin, destination = volume.split(':')
else:
origin = volume
destination = volume
elif isinstance(volume, dict):
path = volume.get('path')
origin = volume.get('origin')
destination = volume.get('destination')
if origin is None or destination is None:
if path is None:
continue
origin = path
destination = path
volumeinfo = "%s -v %s:%s" % (volumeinfo, origin, destination)
dockercommand = "docker run -it %s %s --name %s -l %s -d %s" % (volumeinfo, portinfo, name, label, image)
if cmd is not None:
dockercommand = "%s %s" % (dockercommand, cmd)
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def delete_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list() if container.name == name]
if containers:
for container in containers:
container.remove(force=True)
else:
dockercommand = "docker rm -f %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def start_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list(all=True) if container.name == name]
if containers:
for container in containers:
container.start()
else:
dockercommand = "docker start %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def stop_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container for container in d.containers.list() if container.name == name]
if containers:
for container in containers:
container.stop()
else:
dockercommand = "docker stop %s" % name
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def console_container(self, name):
if self.host == '127.0.0.1':
# base_url = 'unix://var/run/docker.sock'
dockercommand = "docker attach %s" % name
os.system(dockercommand)
# d = docker.DockerClient(base_url=base_url)
# containers = [container.id for container in d.containers.list() if container.name == name]
# if containers:
# for container in containers:
# container.attach()
else:
dockercommand = "docker attach %s" % name
command = "ssh -t -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
os.system(command)
def list_containers(self):
containers = []
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
# containers = [container.name for container in d.containers.list()]
for container in d.containers.list(all=True):
name = container.name
state = container.status
state = state.split(' ')[0]
if state.startswith('running'):
state = 'up'
else:
state = 'down'
source = container.attrs['Config']['Image']
labels = container.attrs['Config']['Labels']
if 'plan' in labels:
plan = labels['plan']
else:
plan = ''
command = container.attrs['Config']['Cmd']
if command is None:
command = ''
else:
command = command[0]
ports = container.attrs['NetworkSettings']['Ports']
if ports:
portinfo = []
for port in ports:
if ports[port] is None:
newport = port
else:
hostport = ports[port][0]['HostPort']
hostip = ports[port][0]['HostIp']
newport = "%s:%s->%s" % (hostip, hostport, port)
portinfo.append(newport)
portinfo = ','.join(portinfo)
else:
portinfo = ''
containers.append([name, state, source, plan, command, portinfo])
else:
containers = []
# dockercommand = "docker ps --format '{{.Names}}'"
dockercommand = "docker ps -a --format \"'{{.Names}}?{{.Status}}?{{.Image}}?{{.Command}}?{{.Ports}}?{{.Label \\\"plan\\\"}}'\""
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
results = os.popen(command).readlines()
for container in results:
# containers.append(container.strip())
name, state, source, command, ports, plan = container.split('?')
if state.startswith('Up'):
state = 'up'
else:
state = 'down'
# labels = {i.split('=')[0]: i.split('=')[1] for i in labels.split(',')}
# if 'plan' in labels:
# plan = labels['plan']
# else:
# plan = ''
command = command.strip().replace('"', '')
containers.append([name, state, source, plan, command, ports])
return containers
def exists_container(self, name):
if self.host == '127.0.0.1':
base_url = 'unix://var/run/docker.sock'
d = docker.DockerClient(base_url=base_url, version='1.22')
containers = [container.id for container in d.containers.list(all=True) if container.name == name]
if containers:
return True
else:
dockercommand = "docker ps -a --format '{{.Names}}'"
command = "ssh -p %s %s@%s %s" % (self.port, self.user, self.host, dockercommand)
results = os.popen(command).readlines()
for container in results:
containername = container.strip()
if containername == name:
return True
return False
def _get_bridge(self, name):
conn = self.conn
bridges = [interface.name() for interface in conn.listAllInterfaces()]
if name in bridges:
return name
try:
net = self.conn.networkLookupByName(name)
except:
return None
netxml = net.XMLDesc(0)
root = ET.fromstring(netxml)
bridge = root.getiterator('bridge')
if bridge:
attributes = bridge[0].attrib
bridge = attributes.get('name')
return bridge
|
# Copyright (C) 2019-2020 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from vyos.configsession import ConfigSession
from netifaces import ifaddresses, AF_INET, AF_INET6
from vyos.validate import is_intf_addr_assigned, is_ipv6_link_local
from vyos.ifconfig import Interface
class BasicInterfaceTest:
class BaseTest(unittest.TestCase):
_test_mtu = False
_test_vlan = False
_test_qinq = False
_base_path = []
_options = {}
_interfaces = []
_vlan_range = ['100', '200', '300', '2000']
# choose IPv6 minimum MTU value for tests - this must always work
_mtu = '1280'
def setUp(self):
self.session = ConfigSession(os.getpid())
self._test_addr = ['192.0.2.1/26', '192.0.2.255/31', '192.0.2.64/32',
'2001:db8:1::ffff/64', '2001:db8:101::1/112']
self._test_mtu = False
self._options = {}
def tearDown(self):
# we should not remove ethernet from the overall CLI
if 'ethernet' in self._base_path:
for intf in self._interfaces:
# when using a dedicated interface to test via TEST_ETH environment
# variable only this one will be cleared in the end - usable to test
# ethernet interfaces via SSH
self.session.delete(self._base_path + [intf])
self.session.set(self._base_path + [intf])
else:
self.session.delete(self._base_path)
self.session.commit()
del self.session
def test_add_description(self):
"""
Check if description can be added to interface
"""
for intf in self._interfaces:
test_string='Description-Test-{}'.format(intf)
self.session.set(self._base_path + [intf, 'description', test_string])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
# Validate interface description
for intf in self._interfaces:
test_string='Description-Test-{}'.format(intf)
with open('/sys/class/net/{}/ifalias'.format(intf), 'r') as f:
tmp = f.read().rstrip()
self.assertTrue(tmp, test_string)
def test_add_address_single(self):
"""
Check if a single address can be added to interface.
"""
addr = '192.0.2.0/31'
for intf in self._interfaces:
self.session.set(self._base_path + [intf, 'address', addr])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
for intf in self._interfaces:
self.assertTrue(is_intf_addr_assigned(intf, addr))
def test_add_address_multi(self):
"""
Check if IPv4/IPv6 addresses can be added to interface.
"""
# Add address
for intf in self._interfaces:
for addr in self._test_addr:
self.session.set(self._base_path + [intf, 'address', addr])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
# Validate address
for intf in self._interfaces:
for af in AF_INET, AF_INET6:
for addr in ifaddresses(intf)[af]:
# checking link local addresses makes no sense
if is_ipv6_link_local(addr['addr']):
continue
self.assertTrue(is_intf_addr_assigned(intf, addr['addr']))
def _mtu_test(self, intf):
""" helper function to verify MTU size """
with open('/sys/class/net/{}/mtu'.format(intf), 'r') as f:
tmp = f.read().rstrip()
self.assertEqual(tmp, self._mtu)
def test_change_mtu(self):
""" Testcase if MTU can be changed on interface """
if not self._test_mtu:
return None
for intf in self._interfaces:
base = self._base_path + [intf]
self.session.set(base + ['mtu', self._mtu])
for option in self._options.get(intf, []):
self.session.set(base + option.split())
self.session.commit()
for intf in self._interfaces:
self._mtu_test(intf)
def _vlan_config(self, intf):
for vlan in self._vlan_range:
base = self._base_path + [intf, 'vif', vlan]
self.session.set(base + ['mtu', self._mtu])
for address in self._test_addr:
self.session.set(base + ['address', address])
def _vlan_test(self, intf):
for vlan in self._vlan_range:
vif = f'{intf}.{vlan}'
for address in self._test_addr:
self.assertTrue(is_intf_addr_assigned(vif, address))
with open(f'/sys/class/net/{vif}/mtu', 'r') as f:
tmp = f.read().rstrip()
self.assertEqual(tmp, self._mtu)
def test_8021q_vlan(self):
if not self._test_vlan:
return None
for intf in self._interfaces:
self._vlan_config(intf)
self.session.commit()
for intf in self._interfaces:
self._vlan_test(intf)
interfaces: VLAN tests can not be "functionized" any further
# Copyright (C) 2019-2020 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from vyos.configsession import ConfigSession
from netifaces import ifaddresses, AF_INET, AF_INET6
from vyos.validate import is_intf_addr_assigned, is_ipv6_link_local
from vyos.ifconfig import Interface
class BasicInterfaceTest:
class BaseTest(unittest.TestCase):
_test_mtu = False
_test_vlan = False
_test_qinq = False
_base_path = []
_options = {}
_interfaces = []
_vlan_range = ['100', '200', '300', '2000']
# choose IPv6 minimum MTU value for tests - this must always work
_mtu = '1280'
def setUp(self):
self.session = ConfigSession(os.getpid())
self._test_addr = ['192.0.2.1/26', '192.0.2.255/31', '192.0.2.64/32',
'2001:db8:1::ffff/64', '2001:db8:101::1/112']
self._test_mtu = False
self._options = {}
def tearDown(self):
# we should not remove ethernet from the overall CLI
if 'ethernet' in self._base_path:
for intf in self._interfaces:
# when using a dedicated interface to test via TEST_ETH environment
# variable only this one will be cleared in the end - usable to test
# ethernet interfaces via SSH
self.session.delete(self._base_path + [intf])
self.session.set(self._base_path + [intf])
else:
self.session.delete(self._base_path)
self.session.commit()
del self.session
def test_add_description(self):
"""
Check if description can be added to interface
"""
for intf in self._interfaces:
test_string='Description-Test-{}'.format(intf)
self.session.set(self._base_path + [intf, 'description', test_string])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
# Validate interface description
for intf in self._interfaces:
test_string='Description-Test-{}'.format(intf)
with open('/sys/class/net/{}/ifalias'.format(intf), 'r') as f:
tmp = f.read().rstrip()
self.assertTrue(tmp, test_string)
def test_add_address_single(self):
"""
Check if a single address can be added to interface.
"""
addr = '192.0.2.0/31'
for intf in self._interfaces:
self.session.set(self._base_path + [intf, 'address', addr])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
for intf in self._interfaces:
self.assertTrue(is_intf_addr_assigned(intf, addr))
def test_add_address_multi(self):
"""
Check if IPv4/IPv6 addresses can be added to interface.
"""
# Add address
for intf in self._interfaces:
for addr in self._test_addr:
self.session.set(self._base_path + [intf, 'address', addr])
for option in self._options.get(intf, []):
self.session.set(self._base_path + [intf] + option.split())
self.session.commit()
# Validate address
for intf in self._interfaces:
for af in AF_INET, AF_INET6:
for addr in ifaddresses(intf)[af]:
# checking link local addresses makes no sense
if is_ipv6_link_local(addr['addr']):
continue
self.assertTrue(is_intf_addr_assigned(intf, addr['addr']))
def _mtu_test(self, intf):
""" helper function to verify MTU size """
with open('/sys/class/net/{}/mtu'.format(intf), 'r') as f:
tmp = f.read().rstrip()
self.assertEqual(tmp, self._mtu)
def test_change_mtu(self):
""" Testcase if MTU can be changed on interface """
if not self._test_mtu:
return None
for intf in self._interfaces:
base = self._base_path + [intf]
self.session.set(base + ['mtu', self._mtu])
for option in self._options.get(intf, []):
self.session.set(base + option.split())
self.session.commit()
for intf in self._interfaces:
self._mtu_test(intf)
def test_8021q_vlan(self):
""" Testcase for 802.1q VLAN interfaces """
if not self._test_vlan:
return None
for intf in self._interfaces:
for vlan in self._vlan_range:
base = self._base_path + [intf, 'vif', vlan]
self.session.set(base + ['mtu', self._mtu])
for address in self._test_addr:
self.session.set(base + ['address', address])
self.session.commit()
for intf in self._interfaces:
for vlan in self._vlan_range:
vif = f'{intf}.{vlan}'
for address in self._test_addr:
self.assertTrue(is_intf_addr_assigned(vif, address))
self._mtu_test(vif)
|
# Generated by Django 2.2.9 on 2020-01-21 08:09
from django.db import migrations
NOTIFICATION_TYPES = (
'unpublished_event_deleted',
'event_published',
'draft_posted'
)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
UNPUBLISHED_EVENT_DELETED_SUBJECT_FI = 'Tapahtumailmoituksesi "{{ event.name }}" on poistettu – Helsingin kaupunki\n\n'
UNPUBLISHED_EVENT_DELETED_HTML_BODY_FI = \
"""Helsingin kaupungille {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }} ilmoittamasi tapahtuma "{{ event.name }}" on poistettu.
Ilmoituksesi poistettiin, joko 1) koska se ei noudattanut Helsingin kaupungin <a href="https://linkedevents.hel.fi/terms">tapahtumarajapinnan käyttöehtoja</a> tai 2) koska tapahtuman ei muusta syystä katsottu sopivan kaupungin tapahtumarajapintaan.
Jos haluat lisätietoja, voit jättää asiasta kysymyksen osoitteessa <a href="https://hel.fi/palaute">hel.fi/palaute.</a> Mainitse palautteessasi tapahtuman nimi ja julkaisuaika: "{{ event.name }}", {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }}"""
EVENT_PUBLISHED_SUBJECT_FI = 'Tapahtumailmoituksesi "{{ event.name }}" on julkaistu – Helsingin kaupunki'
EVENT_PUBLISHED_HTML_BODY_FI = \
"""Helsingin kaupungille {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }} ilmoittamasi tapahtuma "{{ event.name }}" on julkaistu.
Tapahtuma tulee näkyville <a href="https://hel.fi/tapahtumat">Helsingin tapahtumakalenteriin</a> enintään tunnin viiveellä. Tapahtuma voidaan näyttää myös muissa kalentereissa, jotka hakevat tietoja Helsingin kaupungin tapahtumarajapinnasta.
Et voi enää muokata tapahtumaa julkaisun jälkeen. Jos haluat muuttaa tapahtuman tietoja, jätä muutospyyntö osoitteessa <a href="https://hel.fi/palaute">hel.fi/palaute</a>. Mainitse palautteessasi tapahtuman nimi ja julkaisuaika:"{{ event.name }}", {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }}"""
DRAFT_POSTED_SUBJECT_FI = 'Uusi tapahtumaluonnos "{{ event.name }}", {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }} – Helsingin kaupungin tapahtumarajapinta'
DRAFT_POSTED_HTML_BODY_FI = \
"""Helsingin kaupungin tapahtumarajapintaan on luotu uusi tapahtumaluonnos:
"{{ event.name }}", {{ event.created_at|date:"j.n.Y \\k\\l\\o H:i" }}
<a href="https://linkedevents.hel.fi/event/{{ event.id }}">Siirry moderoimaan tapahtumia »</a>
Sait tämän viestin, koska olet moderaattori organisaatiossa {{ event.publisher.name }}."""
def _append_footer(text, language):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return '{}\n\n{}'.format(text, footer)
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
Update Finnish default templates
# Generated by Django 2.2.9 on 2020-01-21 08:09
from django.db import migrations
NOTIFICATION_TYPES = (
'unpublished_event_deleted',
'event_published',
'draft_posted'
)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
UNPUBLISHED_EVENT_DELETED_SUBJECT_FI = 'Tapahtumailmoituksesi "{{ event.name }}" on poistettu – Helsingin kaupunki'
UNPUBLISHED_EVENT_DELETED_HTML_BODY_FI = \
"""Helsingin kaupungille {{ event.created_time|format_datetime('fi') }} ilmoittamasi tapahtuma "{{ event.name }}" on poistettu.
<br/><br/>
Ilmoituksesi poistettiin, joko 1) koska se ei noudattanut Helsingin kaupungin <a href="https://linkedevents.hel.fi/terms">tapahtumarajapinnan käyttöehtoja</a> tai 2) koska tapahtuman ei muusta syystä katsottu sopivan kaupungin tapahtumarajapintaan.
<br/><br/>
Jos haluat lisätietoja, voit jättää asiasta kysymyksen osoitteessa <a href="https://hel.fi/palaute">hel.fi/palaute.</a> Mainitse palautteessasi tapahtuman nimi ja julkaisuaika: "{{ event.name }}", {{ event.created_time|format_datetime('fi') }}"""
EVENT_PUBLISHED_SUBJECT_FI = 'Tapahtumailmoituksesi "{{ event.name }}" on julkaistu – Helsingin kaupunki'
EVENT_PUBLISHED_HTML_BODY_FI = \
"""Helsingin kaupungille {{ event.created_time|format_datetime('fi') }} ilmoittamasi tapahtuma "{{ event.name }}" on julkaistu.
<br/><br/>
Tapahtuma tulee näkyville <a href="https://hel.fi/tapahtumat">Helsingin tapahtumakalenteriin</a> enintään tunnin viiveellä. Tapahtuma voidaan näyttää myös muissa kalentereissa, jotka hakevat tietoja Helsingin kaupungin tapahtumarajapinnasta.
<br/><br/>
Et voi enää muokata tapahtumaa julkaisun jälkeen. Jos haluat muuttaa tapahtuman tietoja, jätä muutospyyntö osoitteessa <a href="https://hel.fi/palaute">hel.fi/palaute</a>. Mainitse palautteessasi tapahtuman nimi ja julkaisuaika:"{{ event.name }}", {{ event.created_time|format_datetime('fi') }}"""
DRAFT_POSTED_SUBJECT_FI = 'Uusi tapahtumaluonnos "{{ event.name }}", {{ event.created_time|format_datetime(\'fi\') }} – Helsingin kaupungin tapahtumarajapinta'
DRAFT_POSTED_HTML_BODY_FI = \
"""Helsingin kaupungin tapahtumarajapintaan on luotu uusi tapahtumaluonnos:
<br/><br/>
"{{ event.name }}", {{ event.created_time|format_datetime('fi') }}
<br/><br/>
<a href="https://linkedevents.hel.fi/event/{{ event.id }}">Siirry moderoimaan tapahtumia »</a>
<br/><br/>
Sait tämän viestin, koska olet moderaattori organisaatiossa {{ event.publisher.name }}."""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
import numpy as np
import os
# imports for type hinting
from typing import Any, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from .network import Network
from functools import wraps
try:
from . import plotting # will perform the correct setup for matplotlib before it is called below
import matplotlib.pyplot as mplt
from matplotlib.ticker import EngFormatter
except ImportError:
mplt = None
import logging
import warnings
from timeit import default_timer as timer
def check_plotting(func):
"""
This decorator checks if matplotlib.pyplot is available under the name mplt.
If not, raise an RuntimeError.
Raises
------
RuntimeError
When trying to run the decorated function without matplotlib
"""
@wraps(func)
def wrapper(*args, **kwargs):
if mplt is None:
raise RuntimeError('Plotting is not available')
func(*args, **kwargs)
return wrapper
class VectorFitting:
"""
This class provides a Python implementation of the Vector Fitting algorithm and various functions for the fit
analysis, passivity evaluation and enforcement, and export of SPICE equivalent circuits.
Parameters
----------
network : :class:`skrf.network.Network`
Network instance of the :math:`N`-port holding the frequency responses to be fitted, for example a
scattering, impedance or admittance matrix.
Examples
--------
Load the `Network`, create a `VectorFitting` instance, perform the fit with a given number of real and
complex-conjugate starting poles:
>>> nw_3port = skrf.Network('my3port.s3p')
>>> vf = skrf.VectorFitting(nw_3port)
>>> vf.vector_fit(n_poles_real=1, n_poles_cmplx=4)
Notes
-----
The fitting code is based on the original algorithm [#Gustavsen_vectfit]_ and on two improvements for relaxed pole
relocation [#Gustavsen_relaxed]_ and efficient (fast) solving [#Deschrijver_fast]_. See also the Vector Fitting
website [#vectfit_website]_ for further information and download of the papers listed below. A Matlab implementation
is also available there for reference.
References
----------
.. [#Gustavsen_vectfit] B. Gustavsen, A. Semlyen, "Rational Approximation of Frequency Domain Responses by Vector
Fitting", IEEE Transactions on Power Delivery, vol. 14, no. 3, pp. 1052-1061, July 1999,
DOI: https://doi.org/10.1109/61.772353
.. [#Gustavsen_relaxed] B. Gustavsen, "Improving the Pole Relocating Properties of Vector Fitting", IEEE
Transactions on Power Delivery, vol. 21, no. 3, pp. 1587-1592, July 2006,
DOI: https://doi.org/10.1109/TPWRD.2005.860281
.. [#Deschrijver_fast] D. Deschrijver, M. Mrozowski, T. Dhaene, D. De Zutter, "Marcomodeling of Multiport Systems
Using a Fast Implementation of the Vector Fitting Method", IEEE Microwave and Wireless Components Letters,
vol. 18, no. 6, pp. 383-385, June 2008, DOI: https://doi.org/10.1109/LMWC.2008.922585
.. [#vectfit_website] Vector Fitting website: https://www.sintef.no/projectweb/vectorfitting/
"""
def __init__(self, network: 'Network'):
self.network = network
self.initial_poles = None
self.poles = None
""" Instance variable holding the list of fitted poles. Will be initialized by :func:`vector_fit`. """
self.residues = None
""" Instance variable holding the list of fitted residues. Will be initialized by :func:`vector_fit`. """
self.proportional_coeff = None
""" Instance variable holding the list of fitted proportional coefficients. Will be initialized by
:func:`vector_fit`. """
self.constant_coeff = None
""" Instance variable holding the list of fitted constants. Will be initialized by :func:`vector_fit`. """
self.max_iterations = 100
""" Instance variable specifying the maximum number of iterations for the fitting process and for the passivity
enforcement. To be changed by the user before calling :func:`vector_fit` and/or :func:`passivity_enforce`. """
self.max_tol = 1e-6
""" Instance variable specifying the convergence criterion in terms of relative tolerance. To be changed by the
user before calling :func:`vector_fit`. """
self.wall_clock_time = 0
""" Instance variable holding the wall-clock time (in seconds) consumed by the most recent fitting process with
:func:`vector_fit`. Subsequent calls of :func:`vector_fit` will overwrite this value. """
self.d_res_history = []
self.delta_max_history = []
self.history_max_sigma = []
self.history_cond_A = []
# legacy getter and setter methods to support deprecated 'zeros' attribute (now correctly called 'residues')
@property
def zeros(self):
"""
**Deprecated**; Please use :attr:`residues` instead.
"""
warnings.warn('Attribute `zeros` is deprecated and will be removed in a future version. Please use the new '
'attribute `residues` instead.', DeprecationWarning, stacklevel=2)
return self.residues
@zeros.setter
def zeros(self, value):
warnings.warn('Attribute `zeros` is deprecated and will be removed in a future version. Please use the new '
'attribute `residues` instead.', DeprecationWarning, stacklevel=2)
self.residues = value
def _get_real_pole_mask(self, poles: np.ndarray) -> np.ndarray:
"""
Returns a mask need for addressing the A matrices
"""
A_sub_real_mask = []
for rm in poles.imag == 0:
if rm:
A_sub_real_mask += [True]
else:
A_sub_real_mask += [False, False]
return np.array(A_sub_real_mask)
def vector_fit(self, n_poles_real: int = 2, n_poles_cmplx: int = 2, init_pole_spacing: str = 'lin',
parameter_type: str = 's', fit_constant: bool = True, fit_proportional: bool = False) -> None:
"""
Main work routine performing the vector fit. The results will be stored in the class variables
:attr:`poles`, :attr:`residues`, :attr:`proportional_coeff` and :attr:`constant_coeff`.
Parameters
----------
n_poles_real : int, optional
Number of initial real poles. See notes.
n_poles_cmplx : int, optional
Number of initial complex conjugate poles. See notes.
init_pole_spacing : str, optional
Type of initial pole spacing across the frequency interval of the S-matrix. Either linear (lin) or
logarithmic (log).
parameter_type : str, optional
Representation type of the frequency responses to be fitted. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). As scikit-rf can currently
only read S parameters from a Touchstone file, the fit should also be performed on the original S
parameters. Otherwise, scikit-rf will convert the responses from S to Z or Y, which might work for the fit
but can cause other issues.
fit_constant : bool, optional
Include a constant term **d** in the fit.
fit_proportional : bool, optional
Include a proportional term **e** in the fit.
Returns
-------
None
No return value.
Notes
-----
The required number of real or complex conjugate starting poles depends on the behaviour of the frequency
responses. To fit a smooth response such as a low-pass characteristic, 1-3 real poles and no complex conjugate
poles is usually sufficient. If resonances or other types of peaks are present in some or all of the responses,
a similar number of complex conjugate poles is required. Be careful not to use too many poles, as excessive
poles will not only increase the computation workload during the fitting and the subsequent use of the model,
but they can also introduce unwanted resonances at frequencies well outside the fit interval.
"""
timer_start = timer()
# create initial poles and space them across the frequencies in the provided Touchstone file
# use normalized frequencies during the iterations (seems to be more stable during least-squares fit)
norm = np.average(self.network.f)
freqs_norm = np.array(self.network.f) / norm
fmin = np.amin(freqs_norm)
fmax = np.amax(freqs_norm)
if init_pole_spacing == 'log':
pole_freqs_real = np.geomspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.geomspace(fmin, fmax, n_poles_cmplx)
elif init_pole_spacing == 'lin':
pole_freqs_real = np.linspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.linspace(fmin, fmax, n_poles_cmplx)
else:
warnings.warn('Invalid choice of initial pole spacing; proceeding with linear spacing.', UserWarning,
stacklevel=2)
pole_freqs_real = np.linspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.linspace(fmin, fmax, n_poles_cmplx)
# init poles array of correct length
poles = np.zeros(n_poles_real + n_poles_cmplx, dtype=complex)
# add real poles
for i, f in enumerate(pole_freqs_real):
omega = 2 * np.pi * f
poles[i] = - omega
# add complex-conjugate poles (store only positive imaginary parts)
i_offset = len(pole_freqs_real)
for i, f in enumerate(pole_freqs_cmplx):
omega = 2 * np.pi * f
poles[i_offset + i] = (-0.01 + 1j) * omega
# save initial poles (un-normalize first)
self.initial_poles = poles * norm
max_singular = 1
logging.info('### Starting pole relocation process.\n')
# stack frequency responses as a single vector
# stacking order (row-major):
# s11, s12, s13, ..., s21, s22, s23, ...
freq_responses = []
for i in range(self.network.nports):
for j in range(self.network.nports):
if parameter_type.lower() == 's':
freq_responses.append(self.network.s[:, i, j])
elif parameter_type.lower() == 'z':
freq_responses.append(self.network.z[:, i, j])
elif parameter_type.lower() == 'y':
freq_responses.append(self.network.y[:, i, j])
else:
warnings.warn('Invalid choice of matrix parameter type (S, Z, or Y); proceeding with scattering '
'representation.', UserWarning, stacklevel=2)
freq_responses.append(self.network.s[:, i, j])
freq_responses = np.array(freq_responses)
# ITERATIVE FITTING OF POLES to the provided frequency responses
# initial set of poles will be replaced with new poles after every iteration
iterations = self.max_iterations
self.d_res_history = []
self.delta_max_history = []
self.history_cond_A = []
converged = False
omega = 2 * np.pi * freqs_norm
while iterations > 0:
logging.info('Iteration {}'.format(self.max_iterations - iterations + 1))
# count number of rows and columns in final coefficient matrix to solve for (c_res, d_res)
# (ratio #real/#complex poles might change during iterations)
n_cols_unused = np.sum((poles.imag != 0) + 1)
n_cols_used = n_cols_unused
n_cols_used += 1
if fit_constant:
n_cols_unused += 1
if fit_proportional:
n_cols_unused += 1
n_rows_A = n_cols_used * len(freq_responses)
# generate coefficients of approximation function for each target frequency response
# responses will be reduced independently using QR decomposition
# simplified coeff. matrices of all responses will be stacked in matrix A for least-squares solver
A = np.empty((n_rows_A, n_cols_used))
b = np.zeros(n_rows_A)
for i_response, freq_response in enumerate(freq_responses):
# calculate coefficients for each frequency response
# A_sub which will be reduced first (QR decomposition) and then filled into the main
# coefficient matrix A
A_sub = np.empty((len(freqs_norm), n_cols_unused + n_cols_used), dtype=complex)
A_row_extra = np.empty(n_cols_used)
# responses will be weighted according to their norm;
# alternative: equal weights with weight_response = 1.0
# or anti-proportional weights with weight_response = 1 / np.linalg.norm(freq_response)
weight_response = np.linalg.norm(freq_response)
# Split up real and complex poles and store the correspondend column from A_sub
real_mask = poles.imag == 0
poles_real = poles[np.nonzero(real_mask)]
poles_cplx = poles[np.nonzero(~real_mask)]
A_sub_real_idx = np.nonzero(self._get_real_pole_mask(poles))[0]
A_sub_cplx_idx = np.nonzero(~self._get_real_pole_mask(poles))[0][::2]
# add coefficients for a pair of complex conjugate poles
# part 1: first sum of rational functions (residue variable c)
# merged with
# part 3: second sum of rational functions (variable c_res)
coeff = 1 / (1j * omega[:, None] - poles_real)
# part 1: coeff = 1 / (s_k - p') = coeff_re + j coeff_im
A_sub[:, A_sub_real_idx] = coeff
# part 3: coeff = -1 * H(s_k) / (s_k - pole)
# Re{coeff} = -1 * coeff_re * resp_re + coeff_im * resp_im
# Im{coeff} = -1 * coeff_re * resp_im - coeff_im * resp_re
A_sub[:, A_sub_real_idx + n_cols_unused] = - coeff * freq_response[:, None]
# extra equation to avoid trivial solution:
# coeff += Re(1 / (s_k - pole)) = coeff_re
A_row_extra[A_sub_real_idx] = np.sum(coeff.real, axis=0)
# coefficient for a complex pole of a conjugated pair: p = p' + jp''
# row 1: add coefficient for real part of residue
# part 1: coeff = 1 / (s_k - pole) + 1 / (s_k - conj(pole))
coeff = 1 / (1j * omega[:, None] - poles_cplx) + 1 / (1j * omega[:, None] - np.conj(poles_cplx))
A_sub[:, A_sub_cplx_idx] = coeff
# extra equation to avoid trivial solution:
# coeff += Re{1 / (s_k - pole) + 1 / (s_k - conj(pole))}
A_row_extra[A_sub_cplx_idx] = np.sum(coeff.real, axis=0)
# part 3: coeff = -1 * H(s_k) * [1 / (s_k - pole) + 1 / (s_k - conj(pole))]
A_sub[:, A_sub_cplx_idx + n_cols_unused] = - coeff * freq_response[:, None]
# part 1: coeff = 1j / (s_k - pole) - 1j / (s_k - conj(pole))
coeff = 1j / (1j * omega[:, None] - poles_cplx) - 1j / (1j * omega[:, None] - np.conj(poles_cplx))
A_sub[:, A_sub_cplx_idx + 1] = coeff
# extra equation to avoid trivial solution:
# coeff += Re(1j / (s_k - pole) - 1j / (s_k - conj(pole)))
A_row_extra[A_sub_cplx_idx + 1] = np.sum(coeff.real, axis=0)
# part 3: coeff = -1 * H(s_k) * [1j / (s_k - pole) - 1j / (s_k - conj(pole))]
A_sub[:, A_sub_cplx_idx + 1 + n_cols_unused] = -coeff * freq_response[:, None]
# part 4: constant (variable d_res)
# coeff = -1 * H(s_k)
A_sub[:,-1] = - freq_response
# part 2: constant (variable d) and proportional term (variable e)
offset = n_cols_unused - 1
if fit_constant:
# coeff = 1 + j0
A_sub[:, offset] = 1
offset -=1
if fit_proportional:
# coeff = s_k = j omega_k
A_sub[:, offset] = 1j * omega
A_row_extra[-1] = len(freqs_norm)
# QR decomposition
A_ri = np.empty((2 * A_sub.shape[0], A_sub.shape[1]))
A_ri[::2] = A_sub.real
A_ri[1::2] = A_sub.imag
R = np.linalg.qr(A_ri, 'r')
# only R22 is required to solve for c_res and d_res
R22 = R[n_cols_unused:, n_cols_unused:]
# similarly, only right half of Q is required (not used here, because RHS is zero)
# Q2 = Q[:, n_cols_unused:]
# apply weight of this response and add coefficients to the system matrix
A[i_response * n_cols_used:(i_response + 1) * n_cols_used, :] = np.sqrt(weight_response) * R22
# multiplication of Q2 by rhs=0 omitted; right-hand side would also require weighting
# b[i_response * n_cols_used:(i_response + 1) * n_cols_used] = np.matmul(np.transpose(Q2), rhs)
# add extra equation to avoid trivial solution
weight_extra = np.linalg.norm(weight_response * freq_response) / len(freq_response)
A[(i_response + 1) * n_cols_used - 1, :] = np.sqrt(weight_extra) * A_row_extra
b[(i_response + 1) * n_cols_used - 1] = np.sqrt(weight_extra) * len(freq_response)
cond_A = np.linalg.cond(A)
logging.info('Condition number of coeff. matrix A = {}'.format(cond_A))
self.history_cond_A.append(cond_A)
# solve least squares for real parts
x, residuals, rank, singular_vals = np.linalg.lstsq(A, b, rcond=None)
# assemble individual result vectors from single LS result x
c_res = x[:-1]
d_res = x[-1]
# check if d_res is suited for zeros calculation
tol_res = 1e-8
if np.abs(d_res) < tol_res:
# d_res is too small, discard solution and proceed the |d_res| = tol_res
d_res = tol_res * (d_res / np.abs(d_res))
warnings.warn('Replacing d_res solution as it was too small. This is not a good sign and probably '
'means that more starting poles are required', RuntimeWarning, stacklevel=2)
self.d_res_history.append(d_res)
logging.info('d_res = {}'.format(d_res))
# build test matrix H, which will hold the new poles as eigenvalues
H = np.zeros((len(c_res), len(c_res)))
i = 0
for i_pole in range(len(poles)):
# fill diagonal with previous poles
pole_re = poles.real[i_pole]
pole_im = poles.imag[i_pole]
if pole_im == 0.0:
# one row for a real pole
H[i, i] = pole_re
H[i] -= c_res / d_res
i += 1
else:
# two rows for a complex pole of a conjugated pair
H[i, i] = pole_re
H[i, i + 1] = pole_im
H[i + 1, i] = -1 * pole_im
H[i + 1, i + 1] = pole_re
H[i] -= 2 * c_res / d_res
i += 2
poles_new = np.linalg.eigvals(H)
# replace poles for next iteration
poles_lst = []
for k, pole in enumerate(poles_new):
if pole.imag >= 0.0:
# complex poles need to come in complex conjugate pairs; append only the positive part
poles_lst.append(pole)
poles = np.array(poles_lst)
# flip real part of unstable poles (real part needs to be negative for stability)
poles.real = - np.abs(poles.real)
# calculate relative changes in the singular values; stop iteration loop once poles have converged
new_max_singular = np.amax(singular_vals)
delta_max = np.abs(1 - new_max_singular / max_singular)
self.delta_max_history.append(delta_max)
logging.info('Max. relative change in residues = {}\n'.format(delta_max))
max_singular = new_max_singular
stop = False
if delta_max < self.max_tol:
if converged:
# is really converged, finish
logging.info('Pole relocation process converged after {} iterations.'.format(
self.max_iterations - iterations + 1))
stop = True
else:
# might be converged, but do one last run to be sure
converged = True
else:
if converged:
# is not really converged, continue
converged = False
iterations -= 1
if iterations == 0:
max_cond = np.amax(self.history_cond_A)
if max_cond > 1e10:
msg_illcond = 'Hint: the linear system was ill-conditioned (max. condition number = {}). ' \
'This often means that more poles are required.'.format(max_cond)
else:
msg_illcond = ''
if converged and stop is False:
warnings.warn('Vector Fitting: The pole relocation process barely converged to tolerance. '
'It took the max. number of iterations (N_max = {}). '
'The results might not have converged properly. '.format(self.max_iterations)
+ msg_illcond, RuntimeWarning, stacklevel=2)
else:
warnings.warn('Vector Fitting: The pole relocation process stopped after reaching the '
'maximum number of iterations (N_max = {}). '
'The results did not converge properly. '.format(self.max_iterations)
+ msg_illcond, RuntimeWarning, stacklevel=2)
if stop:
iterations = 0
# ITERATIONS DONE
logging.info('Initial poles before relocation:')
logging.info(self.initial_poles)
logging.info('Final poles:')
logging.info(poles * norm)
logging.info('\n### Starting residues calculation process.\n')
# finally, solve for the residues with the previously calculated poles
residues = []
constant_coeff = []
proportional_coeff = []
for freq_response in freq_responses:
# calculate coefficients (row A_k in matrix) for each frequency sample s_k of the target response
# row will be appended to submatrix A_sub of complete coeff matrix A_matrix
# 2 rows per pole in result vector (1st for real part, 2nd for imaginary part)
# --> 2 columns per pole in coeff matrix
n_cols = np.sum((poles.imag != 0) + 1)
if fit_constant:
n_cols += 1
if fit_proportional:
n_cols += 1
A_matrix = np.empty((len(freqs_norm), n_cols), dtype=complex)
# Split up real and complex poles and store the correspondend column from A_sub
real_mask = poles.imag == 0
poles_real = poles[np.nonzero(real_mask)]
poles_cplx = poles[np.nonzero(~real_mask)]
A_sub_real_idx = np.nonzero(self._get_real_pole_mask(poles))[0]
A_sub_cplx_idx = np.nonzero(~self._get_real_pole_mask(poles))[0][::2]
# add coefficients for a pair of complex conjugate poles
# part 1: first sum of rational functions (residue variable c)
A_matrix[:, A_sub_real_idx] = 1 / (1j * omega[:, None] - poles_real)
# coefficient for real part of residue
A_matrix[:, A_sub_cplx_idx] = (1 / (1j * omega[:, None] - poles_cplx) +
1 / (1j * omega[:, None] - np.conj(poles_cplx)))
# coefficient for imaginary part of residue
A_matrix[:, A_sub_cplx_idx + 1] = (1j / (1j * omega[:, None] - poles_cplx)
- 1j / (1j * omega[:, None] - np.conj(poles_cplx)))
offset = np.sum((poles.imag != 0) + 1)
if fit_constant:
A_matrix[:, offset] = 1
offset += 1
if fit_proportional:
A_matrix[:, offset] = 1j * omega
logging.info('A_matrix: condition number = {}'.format(np.linalg.cond(A_matrix)))
# solve least squares and obtain results as stack of real part vector and imaginary part vector
x, residuals, rank, singular_vals = np.linalg.lstsq(np.vstack((A_matrix.real, A_matrix.imag)), np.hstack((freq_response.real, freq_response.imag)), rcond=None)
i = 0
residues_response = []
for pole_im in poles.imag:
if pole_im == 0.0:
residues_response.append(x[i] + 0j)
i += 1
else:
residues_response.append(x[i] + 1j * x[i + 1])
i += 2
residues.append(residues_response)
if fit_constant and fit_proportional:
# both constant d and proportional e were fitted
constant_coeff.append(x[-2])
proportional_coeff.append(x[-1])
elif fit_constant:
# only constant d was fitted
constant_coeff.append(x[-1])
proportional_coeff.append(0.0)
elif fit_proportional:
# only proportional e was fitted
constant_coeff.append(0.0)
proportional_coeff.append(x[-1])
else:
# neither constant d nor proportional e was fitted
constant_coeff.append(0.0)
proportional_coeff.append(0.0)
# save poles, residues, d, e in actual frequencies (un-normalized)
self.poles = poles * norm
self.residues = np.array(residues) * norm
self.constant_coeff = np.array(constant_coeff)
self.proportional_coeff = np.array(proportional_coeff) / norm
timer_stop = timer()
self.wall_clock_time = timer_stop - timer_start
logging.info('\n### Vector fitting finished in {} seconds.\n'.format(self.wall_clock_time))
def get_rms_error(self, i=-1, j=-1, parameter_type: str = 's'):
r"""
Returns the root-mean-square (rms) error magnitude of the fit, i.e.
:math:`\sqrt{ \mathrm{mean}(|S - S_\mathrm{fit} |^2) }`,
either for an individual response :math:`S_{i+1,j+1}` or for larger slices of the network.
Parameters
----------
i : int, optional
Row indices of the responses to be evaluated. Either a single row selected by an integer
:math:`i \in [0, N_\mathrm{ports}-1]`, or multiple rows selected by a list of integers, or all rows
selected by :math:`i = -1` (*default*).
j : int, optional
Column indices of the responses to be evaluated. Either a single column selected by an integer
:math:`j \in [0, N_\mathrm{ports}-1]`, or multiple columns selected by a list of integers, or all columns
selected by :math:`j = -1` (*default*).
parameter_type: str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`).
Returns
-------
rms_error : ndarray
The rms error magnitude between the vector fitted model and the original network data.
Raises
------
ValueError
If the specified parameter representation type is not :attr:`s`, :attr:`z`, nor :attr:`y`.
"""
if i == -1:
list_i = range(self.network.nports)
elif isinstance(i, int):
list_i = [i]
else:
list_i = i
if j == -1:
list_j = range(self.network.nports)
elif isinstance(j, int):
list_j = [j]
else:
list_j = j
if parameter_type.lower() == 's':
nw_responses = self.network.s
elif parameter_type.lower() == 'z':
nw_responses = self.network.z
elif parameter_type.lower() == 'y':
nw_responses = self.network.y
else:
raise ValueError('Invalid parameter type `{}`. Valid options: `s`, `z`, or `y`'.format(parameter_type))
error_mean_squared = 0
for i in list_i:
for j in list_j:
nw_ij = nw_responses[:, i, j]
fit_ij = self.get_model_response(i, j, self.network.f)
error_mean_squared += np.mean(np.square(np.abs(nw_ij - fit_ij)))
return np.sqrt(error_mean_squared)
def _get_ABCDE(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Private method.
Returns the real-valued system matrices of the state-space representation of the current rational model, as
defined in [#]_.
Returns
-------
A : ndarray
State-space matrix A holding the poles on the diagonal as real values with imaginary parts on the sub-
diagonal
B : ndarray
State-space matrix B holding coefficients (1, 2, or 0), depending on the respective type of pole in A
C : ndarray
State-space matrix C holding the residues
D : ndarray
State-space matrix D holding the constants
E : ndarray
State-space matrix E holding the proportional coefficients (usually 0 in case of fitted S-parameters)
Raises
------
ValueError
If the model parameters have not been initialized (by running :func:`vector_fit()` or :func:`read_npz()`).
References
----------
.. [#] B. Gustavsen and A. Semlyen, "Fast Passivity Assessment for S-Parameter Rational Models Via a Half-Size
Test Matrix," in IEEE Transactions on Microwave Theory and Techniques, vol. 56, no. 12, pp. 2701-2708,
Dec. 2008, DOI: 10.1109/TMTT.2008.2007319.
"""
# initial checks
if self.poles is None:
raise ValueError('self.poles = None; nothing to do. You need to run vector_fit() first.')
if self.residues is None:
raise ValueError('self.residues = None; nothing to do. You need to run vector_fit() first.')
if self.proportional_coeff is None:
raise ValueError('self.proportional_coeff = None; nothing to do. You need to run vector_fit() first.')
if self.constant_coeff is None:
raise ValueError('self.constant_coeff = None; nothing to do. You need to run vector_fit() first.')
# assemble real-valued state-space matrices A, B, C, D, E from fitted complex-valued pole-residue model
# determine size of the matrix system
n_ports = int(np.sqrt(len(self.constant_coeff)))
n_poles_real = 0
n_poles_cplx = 0
for pole in self.poles:
if np.imag(pole) == 0.0:
n_poles_real += 1
else:
n_poles_cplx += 1
n_matrix = (n_poles_real + 2 * n_poles_cplx) * n_ports
# state-space matrix A holds the poles on the diagonal as real values with imaginary parts on the sub-diagonal
# state-space matrix B holds coefficients (1, 2, or 0), depending on the respective type of pole in A
# assemble A = [[poles_real, 0, 0],
# [0, real(poles_cplx), imag(poles_cplx],
# [0, -imag(poles_cplx), real(poles_cplx]]
A = np.identity(n_matrix)
B = np.zeros(shape=(n_matrix, n_ports))
i_A = 0 # index on diagonal of A
for j in range(n_ports):
for pole in self.poles:
if np.imag(pole) == 0.0:
# adding a real pole
A[i_A, i_A] = np.real(pole)
B[i_A, j] = 1
i_A += 1
else:
# adding a complex-conjugate pole
A[i_A, i_A] = np.real(pole)
A[i_A, i_A + 1] = np.imag(pole)
A[i_A + 1, i_A] = -1 * np.imag(pole)
A[i_A + 1, i_A + 1] = np.real(pole)
B[i_A, j] = 2
i_A += 2
# state-space matrix C holds the residues
# assemble C = [[R1.11, R1.12, R1.13, ...], [R2.11, R2.12, R2.13, ...], ...]
C = np.zeros(shape=(n_ports, n_matrix))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
j_residues = 0
for zero in self.residues[i_response]:
if np.imag(zero) == 0.0:
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues] = np.real(zero)
j_residues += 1
else:
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues] = np.real(zero)
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues + 1] = np.imag(zero)
j_residues += 2
# state-space matrix D holds the constants
# assemble D = [[d11, d12, ...], [d21, d22, ...], ...]
D = np.zeros(shape=(n_ports, n_ports))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
D[i, j] = self.constant_coeff[i_response]
# state-space matrix E holds the proportional coefficients (usually 0 in case of fitted S-parameters)
# assemble E = [[e11, e12, ...], [e21, e22, ...], ...]
E = np.zeros(shape=(n_ports, n_ports))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
E[i, j] = self.proportional_coeff[i_response]
return A, B, C, D, E
@staticmethod
def _get_s_from_ABCDE(freq: float,
A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray, E: np.ndarray) -> np.ndarray:
"""
Private method.
Returns the S-matrix of the vector fitted model calculated from the real-valued system matrices of the state-
space representation, as provided by `_get_ABCDE()`.
Parameters
----------
freq : float
Frequency (in Hz) at which to calculate the S-matrix.
A : ndarray
B : ndarray
C : ndarray
D : ndarray
E : ndarray
Returns
-------
ndarray
Complex-valued S-matrix (NxN) calculated at frequency `freq`.
"""
dim_A = np.shape(A)[0]
stsp_poles = np.linalg.inv(2j * np.pi * freq * np.identity(dim_A) - A)
stsp_S = np.matmul(np.matmul(C, stsp_poles), B)
stsp_S += D + 2j * np.pi * freq * E
return stsp_S
def passivity_test(self, parameter_type: str = 's') -> np.ndarray:
"""
Evaluates the passivity of reciprocal vector fitted models by means of a half-size test matrix [#]_. Any
existing frequency bands of passivity violations will be returned as a sorted list.
Parameters
----------
parameter_type: str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Raises
------
NotImplementedError
If the function is called for `parameter_type` different than `S` (scattering).
ValueError
If the function is used with a model containing nonzero proportional coefficients.
Returns
-------
violation_bands : ndarray
NumPy array with frequency bands of passivity violation:
`[[f_start_1, f_stop_1], [f_start_2, f_stop_2], ...]`.
See Also
--------
is_passive : Query the model passivity as a boolean value.
passivity_enforce : Enforces the passivity of the vector fitted model, if required.
References
----------
.. [#] B. Gustavsen and A. Semlyen, "Fast Passivity Assessment for S-Parameter Rational Models Via a Half-Size
Test Matrix," in IEEE Transactions on Microwave Theory and Techniques, vol. 56, no. 12, pp. 2701-2708,
Dec. 2008, DOI: 10.1109/TMTT.2008.2007319.
"""
if parameter_type.lower() != 's':
raise NotImplementedError('Passivity testing is currently only supported for scattering (S) parameters.')
if parameter_type.lower() == 's' and len(np.flatnonzero(self.proportional_coeff)) > 0:
raise ValueError('Passivity testing of scattering parameters with nonzero proportional coefficients does '
'not make any sense; you need to run vector_fit() with option `fit_proportional=False` '
'first.')
# # the network needs to be reciprocal for this passivity test method to work: S = transpose(S)
# if not np.allclose(self.residues, np.transpose(self.residues)) or \
# not np.allclose(self.constant_coeff, np.transpose(self.constant_coeff)) or \
# not np.allclose(self.proportional_coeff, np.transpose(self.proportional_coeff)):
# logging.error('Passivity testing with unsymmetrical model parameters is not supported. '
# 'The model needs to be reciprocal.')
# return
# get state-space matrices
A, B, C, D, E = self._get_ABCDE()
n_ports = np.shape(D)[0]
# build half-size test matrix P from state-space matrices A, B, C, D
inv_neg = np.linalg.inv(D - np.identity(n_ports))
inv_pos = np.linalg.inv(D + np.identity(n_ports))
prod_neg = np.matmul(np.matmul(B, inv_neg), C)
prod_pos = np.matmul(np.matmul(B, inv_pos), C)
P = np.matmul(A - prod_neg, A - prod_pos)
# extract eigenvalues of P
P_eigs = np.linalg.eigvals(P)
# purely imaginary square roots of eigenvalues identify frequencies (2*pi*f) of borders of passivity violations
freqs_violation = []
for sqrt_eigenval in np.sqrt(P_eigs):
if np.real(sqrt_eigenval) == 0.0:
freqs_violation.append(np.imag(sqrt_eigenval) / 2 / np.pi)
# sort the output from lower to higher frequencies
freqs_violation = np.sort(freqs_violation)
# identify frequency bands of passivity violations
# sweep the bands between crossover frequencies and identify bands of passivity violations
violation_bands = []
for i, freq in enumerate(freqs_violation):
if i == 0:
f_start = 0
f_stop = freq
else:
f_start = freqs_violation[i - 1]
f_stop = freq
# calculate singular values at the center frequency between crossover frequencies to identify violations
f_center = 0.5 * (f_start + f_stop)
s_center = self._get_s_from_ABCDE(f_center, A, B, C, D, E)
u, sigma, vh = np.linalg.svd(s_center)
passive = True
for singval in sigma:
if singval > 1:
# passivity violation in this band
passive = False
if not passive:
# add this band to the list of passivity violations
if violation_bands is None:
violation_bands = [[f_start, f_stop]]
else:
violation_bands.append([f_start, f_stop])
return np.array(violation_bands)
def is_passive(self, parameter_type: str = 's') -> bool:
"""
Returns the passivity status of the model as a boolean value.
Parameters
----------
parameter_type : str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Returns
-------
passivity : bool
:attr:`True` if model is passive, else :attr:`False`.
See Also
--------
passivity_test : Verbose passivity evaluation routine.
passivity_enforce : Enforces the passivity of the vector fitted model, if required.
"""
viol_bands = self.passivity_test(parameter_type)
if len(viol_bands) == 0:
return True
else:
return False
def passivity_enforce(self, n_samples: int = 100, parameter_type: str = 's') -> None:
"""
Enforces the passivity of the vector fitted model, if required. This is an implementation of the method
presented in [#]_.
Parameters
----------
n_samples : int, optional
Number of linearly spaced frequency samples at which passivity will be evaluated and enforced.
(Default: 100)
parameter_type : str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Returns
-------
None
Raises
------
NotImplementedError
If the function is called for `parameter_type` different than `S` (scattering).
ValueError
If the function is used with a model containing nonzero proportional coefficients.
See Also
--------
is_passive : Returns the passivity status of the model as a boolean value.
passivity_test : Verbose passivity evaluation routine.
plot_passivation : Convergence plot for passivity enforcement iterations.
References
----------
.. [#] T. Dhaene, D. Deschrijver and N. Stevens, "Efficient Algorithm for Passivity Enforcement of S-Parameter-
Based Macromodels," in IEEE Transactions on Microwave Theory and Techniques, vol. 57, no. 2, pp. 415-420,
Feb. 2009, DOI: 10.1109/TMTT.2008.2011201.
"""
if parameter_type.lower() != 's':
raise NotImplementedError('Passivity testing is currently only supported for scattering (S) parameters.')
if parameter_type.lower() == 's' and len(np.flatnonzero(self.proportional_coeff)) > 0:
raise ValueError('Passivity testing of scattering parameters with nonzero proportional coefficients does '
'not make any sense; you need to run vector_fit() with option `fit_proportional=False` '
'first.')
# always run passivity test first; this will write 'self.violation_bands'
violation_bands = self.passivity_test()
if len(violation_bands) == 0:
# model is already passive; do nothing and return
logging.info('Passivity enforcement: The model is already passive. Nothing to do.')
return
freqs_eval = np.linspace(0, 1.2 * violation_bands[-1, -1], n_samples)
A, B, C, D, E = self._get_ABCDE()
dim_A = np.shape(A)[0]
C_t = C
delta = 0.999 # predefined tolerance parameter (users should not need to change this)
# iterative compensation of passivity violations
t = 0
self.history_max_sigma = []
while t < self.max_iterations:
logging.info('Passivity enforcement; Iteration {}'.format(t + 1))
A_matrix = []
b_vector = []
sigma_max = 0
# sweep through evaluation frequencies
for i_eval, freq_eval in enumerate(freqs_eval):
# calculate S-matrix at this frequency
s_eval = self._get_s_from_ABCDE(freq_eval, A, B, C_t, D, E)
# singular value decomposition
u, sigma, vh = np.linalg.svd(s_eval)
# keep track of the greatest singular value in every iteration step
if np.amax(sigma) > sigma_max:
sigma_max = np.amax(sigma)
# prepare and fill the square matrices 'gamma' and 'psi' marking passivity violations
gamma = np.diag(sigma)
psi = np.diag(sigma)
for i, sigma_i in enumerate(sigma):
if sigma_i <= delta:
gamma[i, i] = 0
psi[i, i] = 0
else:
gamma[i, i] = 1
psi[i, i] = delta
# calculate violation S-matrix
# s_viol is again a complex NxN S-matrix (N: number of network ports)
sigma_viol = np.matmul(np.diag(sigma), gamma) - psi
s_viol = np.matmul(np.matmul(u, sigma_viol), vh)
# Laplace frequency of this sample in the sweep
s_k = 2j * np.pi * freq_eval
# build matrix system for least-squares fitting of new set of violation residues C_viol
# using rule for transpose of matrix products: transpose(A * B) = transpose(B) * transpose(A)
# hence, S = C * coeffs <===> transpose(S) = transpose(coeffs) * transpose(C)
coeffs = np.transpose(np.matmul(np.linalg.inv(s_k * np.identity(dim_A) - A), B))
if i_eval == 0:
A_matrix = np.vstack([np.real(coeffs), np.imag(coeffs)])
b_vector = np.vstack([np.real(np.transpose(s_viol)), np.imag(np.transpose(s_viol))])
else:
A_matrix = np.concatenate((A_matrix, np.vstack([np.real(coeffs),
np.imag(coeffs)])), axis=0)
b_vector = np.concatenate((b_vector, np.vstack([np.real(np.transpose(s_viol)),
np.imag(np.transpose(s_viol))])), axis=0)
# solve least squares
x, residuals, rank, singular_vals = np.linalg.lstsq(A_matrix, b_vector, rcond=None)
C_viol = np.transpose(x)
# calculate and update C_t for next iteration
C_t = C_t - C_viol
t += 1
self.history_max_sigma.append(sigma_max)
# stop iterations when model is passive
if sigma_max < 1.0:
break
# PASSIVATION PROCESS DONE; model is either passive or max. number of iterations have been exceeded
if t == self.max_iterations:
warnings.warn('Passivity enforcement: Aborting after the max. number of iterations has been exceeded.',
RuntimeWarning, stacklevel=2)
# save/update model parameters (perturbed residues)
self.history_max_sigma = np.array(self.history_max_sigma)
n_ports = np.shape(D)[0]
for i in range(n_ports):
k = 0 # column index in C_t
for j in range(n_ports):
i_response = i * n_ports + j
z = 0 # column index self.residues
for pole in self.poles:
if np.imag(pole) == 0.0:
# real pole --> real residue
self.residues[i_response, z] = C_t[i, k]
k += 1
else:
# complex-conjugate pole --> complex-conjugate residue
self.residues[i_response, z] = C_t[i, k] + 1j * C_t[i, k + 1]
k += 2
z += 1
def write_npz(self, path: str) -> None:
"""
Writes the model parameters in :attr:`poles`, :attr:`residues`,
:attr:`proportional_coeff` and :attr:`constant_coeff` to a labeled NumPy .npz file.
Parameters
----------
path : str
Target path without filename for the export. The filename will be added automatically based on the network
name in :attr:`network`
Returns
-------
None
See Also
--------
read_npz : Reads all model parameters from a .npz file
"""
if self.poles is None:
warnings.warn('Nothing to export; Poles have not been fitted.', RuntimeWarning, stacklevel=2)
return
if self.residues is None:
warnings.warn('Nothing to export; Residues have not been fitted.', RuntimeWarning, stacklevel=2)
return
if self.proportional_coeff is None:
warnings.warn('Nothing to export; Proportional coefficients have not been fitted.', RuntimeWarning,
stacklevel=2)
return
if self.constant_coeff is None:
warnings.warn('Nothing to export; Constants have not been fitted.', RuntimeWarning, stacklevel=2)
return
filename = self.network.name
logging.info('Exporting results as compressed NumPy array to {}'.format(path))
np.savez_compressed(os.path.join(path, 'coefficients_{}'.format(filename)),
poles=self.poles, residues=self.residues, proportionals=self.proportional_coeff,
constants=self.constant_coeff)
def read_npz(self, file: str) -> None:
"""
Reads all model parameters :attr:`poles`, :attr:`residues`, :attr:`proportional_coeff` and
:attr:`constant_coeff` from a labeled NumPy .npz file.
Parameters
----------
file : str
NumPy .npz file containing the parameters. See notes.
Returns
-------
None
Raises
------
ValueError
If the length of the parameters from the file does not match the size of the Network in :attr:`network`.
Notes
-----
The .npz file needs to include the model parameters as individual NumPy arrays (ndarray) labeled '*poles*',
'*residues*', '*proportionals*' and '*constants*'. The shapes of those arrays need to match the network
properties in :class:`network` (correct number of ports). Preferably, the .npz file was created by
:func:`write_npz`.
See Also
--------
write_npz : Writes all model parameters to a .npz file
"""
with np.load(file) as data:
poles = data['poles']
# legacy support for exported residues
if 'zeros' in data:
# old .npz file from deprecated write_npz() with residues called 'zeros'
residues = data['zeros']
else:
# new .npz file from current write_npz()
residues = data['residues']
proportional_coeff = data['proportionals']
constant_coeff = data['constants']
n_ports = int(np.sqrt(len(constant_coeff)))
n_resp = n_ports ** 2
if np.shape(residues)[0] == np.shape(proportional_coeff)[0] == np.shape(constant_coeff)[0] == n_resp:
self.poles = poles
self.residues = residues
self.proportional_coeff = proportional_coeff
self.constant_coeff = constant_coeff
else:
raise ValueError('Length of the provided parameters does not match the network size. Please initialize '
'VectorFitting with a suited Network first.')
def get_model_response(self, i: int, j: int, freqs: Any = None) -> np.ndarray:
"""
Returns one of the frequency responses :math:`H_{i+1,j+1}` of the fitted model :math:`H`.
Parameters
----------
i : int
Row index of the response in the response matrix.
j : int
Column index of the response in the response matrix.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
Returns
-------
response : ndarray
Model response :math:`H_{i+1,j+1}` at the frequencies specified in `freqs` (complex-valued Numpy array).
Examples
--------
Get fitted S11 at 101 frequencies from 0 Hz to 10 GHz:
>>> import skrf
>>> vf = skrf.VectorFitting(skrf.data.ring_slot)
>>> vf.vector_fit(3, 0)
>>> s11_fit = vf.get_model_response(0, 0, numpy.linspace(0, 10e9, 101))
"""
if self.poles is None:
warnings.warn('Returning a zero-vector; Poles have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if self.residues is None:
warnings.warn('Returning a zero-vector; Residues have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if self.proportional_coeff is None:
warnings.warn('Returning a zero-vector; Proportional coefficients have not been fitted.', RuntimeWarning,
stacklevel=2)
return np.zeros_like(freqs)
if self.constant_coeff is None:
warnings.warn('Returning a zero-vector; Constants have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
s = 2j * np.pi * np.array(freqs)
n_ports = int(np.sqrt(len(self.constant_coeff)))
i_response = i * n_ports + j
residues = self.residues[i_response]
resp = self.proportional_coeff[i_response] * s + self.constant_coeff[i_response]
for i, pole in enumerate(self.poles):
if np.imag(pole) == 0.0:
# real pole
resp += residues[i] / (s - pole)
else:
# complex conjugate pole
resp += residues[i] / (s - pole) + np.conjugate(residues[i]) / (s - np.conjugate(pole))
return resp
@check_plotting
def plot_s_db(self, i: int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the magnitude in dB of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, 20 * np.log10(np.abs(self.network.s[:, i, j])), color='r', label='Samples')
ax.plot(freqs, 20 * np.log10(np.abs(self.get_model_response(i, j, freqs))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude (dB)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_mag(self, i: int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the magnitude in linear scale of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.abs(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.abs(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_deg(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the phase in degrees of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.rad2deg(np.angle(self.network.s[:, i, j])), color='r', label='Samples')
ax.plot(freqs, np.rad2deg(np.angle(self.get_model_response(i, j, freqs))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Phase (Degrees)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_deg_unwrap(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the unwrapped phase in degrees of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.rad2deg(np.unwrap(np.angle(self.network.s[:, i, j]))), color='r', label='Samples')
ax.plot(freqs, np.rad2deg(np.unwrap(np.angle(self.get_model_response(i, j, freqs)))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Phase (Degrees)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_re(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the real part of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.real(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.real(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Real Part')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_im(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the imaginary part of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.imag(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.imag(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Imaginary Part')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_singular(self, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the singular values of the vector fitted S-matrix in linear scale.
Parameters
----------
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
# get system matrices of state-space representation
A, B, C, D, E = self._get_ABCDE()
n_ports = np.shape(D)[0]
singvals = np.zeros((n_ports, len(freqs)))
# calculate and save singular values for each frequency
for i, f in enumerate(freqs):
u, sigma, vh = np.linalg.svd(self._get_s_from_ABCDE(f, A, B, C, D, E))
singvals[:, i] = sigma
# plot the frequency response of each singular value
for n in range(n_ports):
ax.plot(freqs, singvals[n, :], label=r'$\sigma_{}$'.format(n + 1))
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude')
ax.legend(loc='best')
return ax
@check_plotting
def plot_convergence(self, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the history of the model residue parameter **d_res** during the iterative pole relocation process of the
vector fitting, which should eventually converge to a fixed value. Additionally, the relative change of the
maximum singular value of the coefficient matrix **A** are plotted, which serve as a convergence indicator.
Parameters
----------
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if ax is None:
ax = mplt.gca()
ax.semilogy(np.arange(len(self.delta_max_history)) + 1, self.delta_max_history, color='darkblue')
ax.set_xlabel('Iteration step')
ax.set_ylabel('Max. relative change', color='darkblue')
ax2 = ax.twinx()
ax2.plot(np.arange(len(self.d_res_history)) + 1, self.d_res_history, color='orangered')
ax2.set_ylabel('Residue', color='orangered')
return ax
@check_plotting
def plot_passivation(self, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the history of the greatest singular value during the iterative passivity enforcement process, which
should eventually converge to a value slightly lower than 1.0 or stop after reaching the maximum number of
iterations specified in the class variable :attr:`max_iterations`.
Parameters
----------
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if ax is None:
ax = mplt.gca()
ax.plot(np.arange(len(self.history_max_sigma)) + 1, self.history_max_sigma)
ax.set_xlabel('Iteration step')
ax.set_ylabel('Max. singular value')
return ax
def write_spice_subcircuit_s(self, file: str) -> None:
"""
Creates an equivalent N-port SPICE subcircuit based on its vector fitted S parameter responses.
Parameters
----------
file : str
Path and filename including file extension (usually .sp) for the SPICE subcircuit file.
Returns
-------
None
Notes
-----
In the SPICE subcircuit, all ports will share a common reference node (global SPICE ground on node 0). The
equivalent circuit uses linear dependent current sources on all ports, which are controlled by the currents
through equivalent admittances modelling the parameters from a vector fit. This approach is based on [#]_.
Examples
--------
Load and fit the `Network`, then export the equivalent SPICE subcircuit:
>>> nw_3port = skrf.Network('my3port.s3p')
>>> vf = skrf.VectorFitting(nw_3port)
>>> vf.vector_fit(n_poles_real=1, n_poles_cmplx=4)
>>> vf.write_spice_subcircuit_s('/my3port_model.sp')
References
----------
.. [#] G. Antonini, "SPICE Equivalent Circuits of Frequency-Domain Responses", IEEE Transactions on
Electromagnetic Compatibility, vol. 45, no. 3, pp. 502-512, August 2003,
DOI: https://doi.org/10.1109/TEMC.2003.815528
"""
# list of subcircuits for the equivalent admittances
subcircuits = []
# provides a unique SPICE subcircuit identifier (X1, X2, X3, ...)
def get_new_subckt_identifier():
subcircuits.append('X{}'.format(len(subcircuits) + 1))
return subcircuits[-1]
# use engineering notation for the numbers in the SPICE file (1000 --> 1k)
formatter = EngFormatter(sep="", places=3, usetex=False)
# replace "micron" sign by "u" and "mega" sign by "meg"
letters_dict = formatter.ENG_PREFIXES
letters_dict.update({-6: 'u', 6: 'meg'})
formatter.ENG_PREFIXES = letters_dict
with open(file, 'w') as f:
# write title line
f.write('* EQUIVALENT CIRCUIT FOR VECTOR FITTED S-MATRIX\n')
f.write('* Created using scikit-rf vectorFitting.py\n')
f.write('*\n')
# define the complete equivalent circuit as a subcircuit with one input node per port
# those port nodes are labeled p1, p2, p3, ...
# all ports share a common node for ground reference (node 0)
str_input_nodes = ''
for n in range(self.network.nports):
str_input_nodes += 'p{} '.format(n + 1)
f.write('.SUBCKT s_equivalent {}\n'.format(str_input_nodes))
for n in range(self.network.nports):
f.write('*\n')
f.write('* port {}\n'.format(n + 1))
# add port reference impedance z0 (has to be resistive, no imaginary part)
f.write('R{} a{} 0 {}\n'.format(n + 1, n + 1, np.real(self.network.z0[0, n])))
# add dummy voltage sources (V=0) to measure the input current
f.write('V{} p{} a{} 0\n'.format(n + 1, n + 1, n + 1))
# CCVS and VCVS driving the transfer admittances with a = V/2/sqrt(Z0) + I/2*sqrt(Z0)
# In
f.write('H{} nt{} nts{} V{} {}\n'.format(n + 1, n + 1, n + 1, n + 1, np.real(self.network.z0[0, n])))
# Vn
f.write('E{} nts{} 0 p{} 0 {}\n'.format(n + 1, n + 1, n + 1, 1))
for j in range(self.network.nports):
f.write('* transfer network for s{}{}\n'.format(n + 1, j + 1))
# stacking order in VectorFitting class variables:
# s11, s12, s13, ..., s21, s22, s23, ...
i_response = n * self.network.nports + j
# add CCCS to generate the scattered current I_nj at port n
# control current is measured by the dummy voltage source at the transfer network Y_nj
# the scattered current is injected into the port (source positive connected to ground)
f.write('F{}{} 0 a{} V{}{} {}\n'.format(n + 1, j + 1, n + 1, n + 1, j + 1,
formatter(1 / np.real(self.network.z0[0, n]))))
f.write('F{}{}_inv a{} 0 V{}{}_inv {}\n'.format(n + 1, j + 1, n + 1, n + 1, j + 1,
formatter(1 / np.real(self.network.z0[0, n]))))
# add dummy voltage source (V=0) in series with Y_nj to measure current through transfer admittance
f.write('V{}{} nt{} nt{}{} 0\n'.format(n + 1, j + 1, j + 1, n + 1, j + 1))
f.write('V{}{}_inv nt{} nt{}{}_inv 0\n'.format(n + 1, j + 1, j + 1, n + 1, j + 1))
# add corresponding transfer admittance Y_nj, which is modulating the control current
# the transfer admittance is a parallel circuit (sum) of individual admittances
f.write('* transfer admittances for S{}{}\n'.format(n + 1, j + 1))
# start with proportional and constant term of the model
# H(s) = d + s * e model
# Y(s) = G + s * C equivalent admittance
g = self.constant_coeff[i_response]
c = self.proportional_coeff[i_response]
# add R for constant term
if g < 0:
f.write('R{}{} nt{}{}_inv 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(np.abs(1 / g))))
elif g > 0:
f.write('R{}{} nt{}{} 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(1 / g)))
# add C for proportional term
if c < 0:
f.write('C{}{} nt{}{}_inv 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(np.abs(c))))
elif c > 0:
f.write('C{}{} nt{}{} 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(c)))
# add pairs of poles and residues
for i_pole in range(len(self.poles)):
pole = self.poles[i_pole]
residue = self.residues[i_response, i_pole]
node = get_new_subckt_identifier() + ' nt{}{}'.format(n + 1, j + 1)
if np.real(residue) < 0.0:
# multiplication with -1 required, otherwise the values for RLC would be negative
# this gets compensated by inverting the transfer current direction for this subcircuit
residue = -1 * residue
node += '_inv'
if np.imag(pole) == 0.0:
# real pole; add rl_admittance
l = 1 / np.real(residue)
r = -1 * np.real(pole) / np.real(residue)
f.write(node + ' 0 rl_admittance res={} ind={}\n'.format(formatter(r), formatter(l)))
else:
# complex pole of a conjugate pair; add rcl_vccs_admittance
l = 1 / (2 * np.real(residue))
b = -2 * (np.real(residue) * np.real(pole) + np.imag(residue) * np.imag(pole))
r = -1 * np.real(pole) / np.real(residue)
c = 2 * np.real(residue) / (np.abs(pole) ** 2)
gm_add = b * l * c
if gm_add < 0:
m = -1
else:
m = 1
f.write(node + ' 0 rcl_vccs_admittance res={} cap={} ind={} gm={} mult={}\n'.format(
formatter(r),
formatter(c),
formatter(l),
formatter(np.abs(gm_add)),
int(m)))
f.write('.ENDS s_equivalent\n')
f.write('*\n')
# subcircuit for an active RCL+VCCS equivalent admittance Y(s) of a complex-conjugate pole-residue pair H(s)
# Residue: c = c' + j * c"
# Pole: p = p' + j * p"
# H(s) = c / (s - p) + conj(c) / (s - conj(p))
# = (2 * c' * s - 2 * (c'p' + c"p")) / (s ** 2 - 2 * p' * s + |p| ** 2)
# Y(S) = (1 / L * s + b) / (s ** 2 + R / L * s + 1 / (L * C))
f.write('.SUBCKT rcl_vccs_admittance n_pos n_neg res=1k cap=1n ind=100p gm=1m mult=1\n')
f.write('L1 n_pos 1 {ind}\n')
f.write('C1 1 2 {cap}\n')
f.write('R1 2 n_neg {res}\n')
f.write('G1 n_pos n_neg 1 2 {gm} m={mult}\n')
f.write('.ENDS rcl_vccs_admittance\n')
f.write('*\n')
# subcircuit for a passive RL equivalent admittance Y(s) of a real pole-residue pair H(s)
# H(s) = c / (s - p)
# Y(s) = 1 / L / (s + s * R / L)
f.write('.SUBCKT rl_admittance n_pos n_neg res=1k ind=100p\n')
f.write('L1 n_pos 1 {ind}\n')
f.write('R1 1 n_neg {res}\n')
f.write('.ENDS rl_admittance\n')
Define s instead of omega
import numpy as np
import os
# imports for type hinting
from typing import Any, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from .network import Network
from functools import wraps
try:
from . import plotting # will perform the correct setup for matplotlib before it is called below
import matplotlib.pyplot as mplt
from matplotlib.ticker import EngFormatter
except ImportError:
mplt = None
import logging
import warnings
from timeit import default_timer as timer
def check_plotting(func):
"""
This decorator checks if matplotlib.pyplot is available under the name mplt.
If not, raise an RuntimeError.
Raises
------
RuntimeError
When trying to run the decorated function without matplotlib
"""
@wraps(func)
def wrapper(*args, **kwargs):
if mplt is None:
raise RuntimeError('Plotting is not available')
func(*args, **kwargs)
return wrapper
class VectorFitting:
"""
This class provides a Python implementation of the Vector Fitting algorithm and various functions for the fit
analysis, passivity evaluation and enforcement, and export of SPICE equivalent circuits.
Parameters
----------
network : :class:`skrf.network.Network`
Network instance of the :math:`N`-port holding the frequency responses to be fitted, for example a
scattering, impedance or admittance matrix.
Examples
--------
Load the `Network`, create a `VectorFitting` instance, perform the fit with a given number of real and
complex-conjugate starting poles:
>>> nw_3port = skrf.Network('my3port.s3p')
>>> vf = skrf.VectorFitting(nw_3port)
>>> vf.vector_fit(n_poles_real=1, n_poles_cmplx=4)
Notes
-----
The fitting code is based on the original algorithm [#Gustavsen_vectfit]_ and on two improvements for relaxed pole
relocation [#Gustavsen_relaxed]_ and efficient (fast) solving [#Deschrijver_fast]_. See also the Vector Fitting
website [#vectfit_website]_ for further information and download of the papers listed below. A Matlab implementation
is also available there for reference.
References
----------
.. [#Gustavsen_vectfit] B. Gustavsen, A. Semlyen, "Rational Approximation of Frequency Domain Responses by Vector
Fitting", IEEE Transactions on Power Delivery, vol. 14, no. 3, pp. 1052-1061, July 1999,
DOI: https://doi.org/10.1109/61.772353
.. [#Gustavsen_relaxed] B. Gustavsen, "Improving the Pole Relocating Properties of Vector Fitting", IEEE
Transactions on Power Delivery, vol. 21, no. 3, pp. 1587-1592, July 2006,
DOI: https://doi.org/10.1109/TPWRD.2005.860281
.. [#Deschrijver_fast] D. Deschrijver, M. Mrozowski, T. Dhaene, D. De Zutter, "Marcomodeling of Multiport Systems
Using a Fast Implementation of the Vector Fitting Method", IEEE Microwave and Wireless Components Letters,
vol. 18, no. 6, pp. 383-385, June 2008, DOI: https://doi.org/10.1109/LMWC.2008.922585
.. [#vectfit_website] Vector Fitting website: https://www.sintef.no/projectweb/vectorfitting/
"""
def __init__(self, network: 'Network'):
self.network = network
self.initial_poles = None
self.poles = None
""" Instance variable holding the list of fitted poles. Will be initialized by :func:`vector_fit`. """
self.residues = None
""" Instance variable holding the list of fitted residues. Will be initialized by :func:`vector_fit`. """
self.proportional_coeff = None
""" Instance variable holding the list of fitted proportional coefficients. Will be initialized by
:func:`vector_fit`. """
self.constant_coeff = None
""" Instance variable holding the list of fitted constants. Will be initialized by :func:`vector_fit`. """
self.max_iterations = 100
""" Instance variable specifying the maximum number of iterations for the fitting process and for the passivity
enforcement. To be changed by the user before calling :func:`vector_fit` and/or :func:`passivity_enforce`. """
self.max_tol = 1e-6
""" Instance variable specifying the convergence criterion in terms of relative tolerance. To be changed by the
user before calling :func:`vector_fit`. """
self.wall_clock_time = 0
""" Instance variable holding the wall-clock time (in seconds) consumed by the most recent fitting process with
:func:`vector_fit`. Subsequent calls of :func:`vector_fit` will overwrite this value. """
self.d_res_history = []
self.delta_max_history = []
self.history_max_sigma = []
self.history_cond_A = []
# legacy getter and setter methods to support deprecated 'zeros' attribute (now correctly called 'residues')
@property
def zeros(self):
"""
**Deprecated**; Please use :attr:`residues` instead.
"""
warnings.warn('Attribute `zeros` is deprecated and will be removed in a future version. Please use the new '
'attribute `residues` instead.', DeprecationWarning, stacklevel=2)
return self.residues
@zeros.setter
def zeros(self, value):
warnings.warn('Attribute `zeros` is deprecated and will be removed in a future version. Please use the new '
'attribute `residues` instead.', DeprecationWarning, stacklevel=2)
self.residues = value
def _get_real_pole_mask(self, poles: np.ndarray) -> np.ndarray:
"""
Returns a mask need for addressing the A matrices
"""
A_sub_real_mask = []
for rm in poles.imag == 0:
if rm:
A_sub_real_mask += [True]
else:
A_sub_real_mask += [False, False]
return np.array(A_sub_real_mask)
def vector_fit(self, n_poles_real: int = 2, n_poles_cmplx: int = 2, init_pole_spacing: str = 'lin',
parameter_type: str = 's', fit_constant: bool = True, fit_proportional: bool = False) -> None:
"""
Main work routine performing the vector fit. The results will be stored in the class variables
:attr:`poles`, :attr:`residues`, :attr:`proportional_coeff` and :attr:`constant_coeff`.
Parameters
----------
n_poles_real : int, optional
Number of initial real poles. See notes.
n_poles_cmplx : int, optional
Number of initial complex conjugate poles. See notes.
init_pole_spacing : str, optional
Type of initial pole spacing across the frequency interval of the S-matrix. Either linear (lin) or
logarithmic (log).
parameter_type : str, optional
Representation type of the frequency responses to be fitted. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). As scikit-rf can currently
only read S parameters from a Touchstone file, the fit should also be performed on the original S
parameters. Otherwise, scikit-rf will convert the responses from S to Z or Y, which might work for the fit
but can cause other issues.
fit_constant : bool, optional
Include a constant term **d** in the fit.
fit_proportional : bool, optional
Include a proportional term **e** in the fit.
Returns
-------
None
No return value.
Notes
-----
The required number of real or complex conjugate starting poles depends on the behaviour of the frequency
responses. To fit a smooth response such as a low-pass characteristic, 1-3 real poles and no complex conjugate
poles is usually sufficient. If resonances or other types of peaks are present in some or all of the responses,
a similar number of complex conjugate poles is required. Be careful not to use too many poles, as excessive
poles will not only increase the computation workload during the fitting and the subsequent use of the model,
but they can also introduce unwanted resonances at frequencies well outside the fit interval.
"""
timer_start = timer()
# create initial poles and space them across the frequencies in the provided Touchstone file
# use normalized frequencies during the iterations (seems to be more stable during least-squares fit)
norm = np.average(self.network.f)
freqs_norm = np.array(self.network.f) / norm
fmin = np.amin(freqs_norm)
fmax = np.amax(freqs_norm)
if init_pole_spacing == 'log':
pole_freqs_real = np.geomspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.geomspace(fmin, fmax, n_poles_cmplx)
elif init_pole_spacing == 'lin':
pole_freqs_real = np.linspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.linspace(fmin, fmax, n_poles_cmplx)
else:
warnings.warn('Invalid choice of initial pole spacing; proceeding with linear spacing.', UserWarning,
stacklevel=2)
pole_freqs_real = np.linspace(fmin, fmax, n_poles_real)
pole_freqs_cmplx = np.linspace(fmin, fmax, n_poles_cmplx)
# init poles array of correct length
poles = np.zeros(n_poles_real + n_poles_cmplx, dtype=complex)
# add real poles
for i, f in enumerate(pole_freqs_real):
omega = 2 * np.pi * f
poles[i] = - omega
# add complex-conjugate poles (store only positive imaginary parts)
i_offset = len(pole_freqs_real)
for i, f in enumerate(pole_freqs_cmplx):
omega = 2 * np.pi * f
poles[i_offset + i] = (-0.01 + 1j) * omega
# save initial poles (un-normalize first)
self.initial_poles = poles * norm
max_singular = 1
logging.info('### Starting pole relocation process.\n')
# stack frequency responses as a single vector
# stacking order (row-major):
# s11, s12, s13, ..., s21, s22, s23, ...
freq_responses = []
for i in range(self.network.nports):
for j in range(self.network.nports):
if parameter_type.lower() == 's':
freq_responses.append(self.network.s[:, i, j])
elif parameter_type.lower() == 'z':
freq_responses.append(self.network.z[:, i, j])
elif parameter_type.lower() == 'y':
freq_responses.append(self.network.y[:, i, j])
else:
warnings.warn('Invalid choice of matrix parameter type (S, Z, or Y); proceeding with scattering '
'representation.', UserWarning, stacklevel=2)
freq_responses.append(self.network.s[:, i, j])
freq_responses = np.array(freq_responses)
# ITERATIVE FITTING OF POLES to the provided frequency responses
# initial set of poles will be replaced with new poles after every iteration
iterations = self.max_iterations
self.d_res_history = []
self.delta_max_history = []
self.history_cond_A = []
converged = False
omega = 2 * np.pi * freqs_norm
s = 1j * omega
while iterations > 0:
logging.info('Iteration {}'.format(self.max_iterations - iterations + 1))
# count number of rows and columns in final coefficient matrix to solve for (c_res, d_res)
# (ratio #real/#complex poles might change during iterations)
n_cols_unused = np.sum((poles.imag != 0) + 1)
n_cols_used = n_cols_unused
n_cols_used += 1
if fit_constant:
n_cols_unused += 1
if fit_proportional:
n_cols_unused += 1
n_rows_A = n_cols_used * len(freq_responses)
# generate coefficients of approximation function for each target frequency response
# responses will be reduced independently using QR decomposition
# simplified coeff. matrices of all responses will be stacked in matrix A for least-squares solver
A = np.empty((n_rows_A, n_cols_used))
b = np.zeros(n_rows_A)
for i_response, freq_response in enumerate(freq_responses):
# calculate coefficients for each frequency response
# A_sub which will be reduced first (QR decomposition) and then filled into the main
# coefficient matrix A
A_sub = np.empty((len(freqs_norm), n_cols_unused + n_cols_used), dtype=complex)
A_row_extra = np.empty(n_cols_used)
# responses will be weighted according to their norm;
# alternative: equal weights with weight_response = 1.0
# or anti-proportional weights with weight_response = 1 / np.linalg.norm(freq_response)
weight_response = np.linalg.norm(freq_response)
# Split up real and complex poles and store the correspondend column from A_sub
real_mask = poles.imag == 0
poles_real = poles[np.nonzero(real_mask)]
poles_cplx = poles[np.nonzero(~real_mask)]
A_sub_real_idx = np.nonzero(self._get_real_pole_mask(poles))[0]
A_sub_cplx_idx = np.nonzero(~self._get_real_pole_mask(poles))[0][::2]
# add coefficients for a pair of complex conjugate poles
# part 1: first sum of rational functions (residue variable c)
# merged with
# part 3: second sum of rational functions (variable c_res)
coeff = 1 / (s[:, None] - poles_real)
# part 1: coeff = 1 / (s_k - p') = coeff_re + j coeff_im
A_sub[:, A_sub_real_idx] = coeff
# part 3: coeff = -1 * H(s_k) / (s_k - pole)
# Re{coeff} = -1 * coeff_re * resp_re + coeff_im * resp_im
# Im{coeff} = -1 * coeff_re * resp_im - coeff_im * resp_re
A_sub[:, A_sub_real_idx + n_cols_unused] = - coeff * freq_response[:, None]
# extra equation to avoid trivial solution:
# coeff += Re(1 / (s_k - pole)) = coeff_re
A_row_extra[A_sub_real_idx] = np.sum(coeff.real, axis=0)
# coefficient for a complex pole of a conjugated pair: p = p' + jp''
# row 1: add coefficient for real part of residue
# part 1: coeff = 1 / (s_k - pole) + 1 / (s_k - conj(pole))
coeff = 1 / (s[:, None] - poles_cplx) + 1 / (s[:, None] - np.conj(poles_cplx))
A_sub[:, A_sub_cplx_idx] = coeff
# extra equation to avoid trivial solution:
# coeff += Re{1 / (s_k - pole) + 1 / (s_k - conj(pole))}
A_row_extra[A_sub_cplx_idx] = np.sum(coeff.real, axis=0)
# part 3: coeff = -1 * H(s_k) * [1 / (s_k - pole) + 1 / (s_k - conj(pole))]
A_sub[:, A_sub_cplx_idx + n_cols_unused] = - coeff * freq_response[:, None]
# part 1: coeff = 1j / (s_k - pole) - 1j / (s_k - conj(pole))
coeff = 1j / (s[:, None] - poles_cplx) - 1j / (s[:, None] - np.conj(poles_cplx))
A_sub[:, A_sub_cplx_idx + 1] = coeff
# extra equation to avoid trivial solution:
# coeff += Re(1j / (s_k - pole) - 1j / (s_k - conj(pole)))
A_row_extra[A_sub_cplx_idx + 1] = np.sum(coeff.real, axis=0)
# part 3: coeff = -1 * H(s_k) * [1j / (s_k - pole) - 1j / (s_k - conj(pole))]
A_sub[:, A_sub_cplx_idx + 1 + n_cols_unused] = -coeff * freq_response[:, None]
# part 4: constant (variable d_res)
# coeff = -1 * H(s_k)
A_sub[:,-1] = - freq_response
# part 2: constant (variable d) and proportional term (variable e)
offset = n_cols_unused - 1
if fit_constant:
# coeff = 1 + j0
A_sub[:, offset] = 1
offset -=1
if fit_proportional:
# coeff = s_k = j omega_k
A_sub[:, offset] = s
A_row_extra[-1] = len(freqs_norm)
# QR decomposition
A_ri = np.empty((2 * A_sub.shape[0], A_sub.shape[1]))
A_ri[::2] = A_sub.real
A_ri[1::2] = A_sub.imag
R = np.linalg.qr(A_ri, 'r')
# only R22 is required to solve for c_res and d_res
R22 = R[n_cols_unused:, n_cols_unused:]
# similarly, only right half of Q is required (not used here, because RHS is zero)
# Q2 = Q[:, n_cols_unused:]
# apply weight of this response and add coefficients to the system matrix
A[i_response * n_cols_used:(i_response + 1) * n_cols_used, :] = np.sqrt(weight_response) * R22
# multiplication of Q2 by rhs=0 omitted; right-hand side would also require weighting
# b[i_response * n_cols_used:(i_response + 1) * n_cols_used] = np.matmul(np.transpose(Q2), rhs)
# add extra equation to avoid trivial solution
weight_extra = np.linalg.norm(weight_response * freq_response) / len(freq_response)
A[(i_response + 1) * n_cols_used - 1, :] = np.sqrt(weight_extra) * A_row_extra
b[(i_response + 1) * n_cols_used - 1] = np.sqrt(weight_extra) * len(freq_response)
cond_A = np.linalg.cond(A)
logging.info('Condition number of coeff. matrix A = {}'.format(cond_A))
self.history_cond_A.append(cond_A)
# solve least squares for real parts
x, residuals, rank, singular_vals = np.linalg.lstsq(A, b, rcond=None)
# assemble individual result vectors from single LS result x
c_res = x[:-1]
d_res = x[-1]
# check if d_res is suited for zeros calculation
tol_res = 1e-8
if np.abs(d_res) < tol_res:
# d_res is too small, discard solution and proceed the |d_res| = tol_res
d_res = tol_res * (d_res / np.abs(d_res))
warnings.warn('Replacing d_res solution as it was too small. This is not a good sign and probably '
'means that more starting poles are required', RuntimeWarning, stacklevel=2)
self.d_res_history.append(d_res)
logging.info('d_res = {}'.format(d_res))
# build test matrix H, which will hold the new poles as eigenvalues
H = np.zeros((len(c_res), len(c_res)))
i = 0
for i_pole in range(len(poles)):
# fill diagonal with previous poles
pole_re = poles.real[i_pole]
pole_im = poles.imag[i_pole]
if pole_im == 0.0:
# one row for a real pole
H[i, i] = pole_re
H[i] -= c_res / d_res
i += 1
else:
# two rows for a complex pole of a conjugated pair
H[i, i] = pole_re
H[i, i + 1] = pole_im
H[i + 1, i] = -1 * pole_im
H[i + 1, i + 1] = pole_re
H[i] -= 2 * c_res / d_res
i += 2
poles_new = np.linalg.eigvals(H)
# replace poles for next iteration
poles_lst = []
for k, pole in enumerate(poles_new):
if pole.imag >= 0.0:
# complex poles need to come in complex conjugate pairs; append only the positive part
poles_lst.append(pole)
poles = np.array(poles_lst)
# flip real part of unstable poles (real part needs to be negative for stability)
poles.real = - np.abs(poles.real)
# calculate relative changes in the singular values; stop iteration loop once poles have converged
new_max_singular = np.amax(singular_vals)
delta_max = np.abs(1 - new_max_singular / max_singular)
self.delta_max_history.append(delta_max)
logging.info('Max. relative change in residues = {}\n'.format(delta_max))
max_singular = new_max_singular
stop = False
if delta_max < self.max_tol:
if converged:
# is really converged, finish
logging.info('Pole relocation process converged after {} iterations.'.format(
self.max_iterations - iterations + 1))
stop = True
else:
# might be converged, but do one last run to be sure
converged = True
else:
if converged:
# is not really converged, continue
converged = False
iterations -= 1
if iterations == 0:
max_cond = np.amax(self.history_cond_A)
if max_cond > 1e10:
msg_illcond = 'Hint: the linear system was ill-conditioned (max. condition number = {}). ' \
'This often means that more poles are required.'.format(max_cond)
else:
msg_illcond = ''
if converged and stop is False:
warnings.warn('Vector Fitting: The pole relocation process barely converged to tolerance. '
'It took the max. number of iterations (N_max = {}). '
'The results might not have converged properly. '.format(self.max_iterations)
+ msg_illcond, RuntimeWarning, stacklevel=2)
else:
warnings.warn('Vector Fitting: The pole relocation process stopped after reaching the '
'maximum number of iterations (N_max = {}). '
'The results did not converge properly. '.format(self.max_iterations)
+ msg_illcond, RuntimeWarning, stacklevel=2)
if stop:
iterations = 0
# ITERATIONS DONE
logging.info('Initial poles before relocation:')
logging.info(self.initial_poles)
logging.info('Final poles:')
logging.info(poles * norm)
logging.info('\n### Starting residues calculation process.\n')
# finally, solve for the residues with the previously calculated poles
residues = []
constant_coeff = []
proportional_coeff = []
for freq_response in freq_responses:
# calculate coefficients (row A_k in matrix) for each frequency sample s_k of the target response
# row will be appended to submatrix A_sub of complete coeff matrix A_matrix
# 2 rows per pole in result vector (1st for real part, 2nd for imaginary part)
# --> 2 columns per pole in coeff matrix
n_cols = np.sum((poles.imag != 0) + 1)
if fit_constant:
n_cols += 1
if fit_proportional:
n_cols += 1
A_matrix = np.empty((len(freqs_norm), n_cols), dtype=complex)
# Split up real and complex poles and store the correspondend column from A_sub
real_mask = poles.imag == 0
poles_real = poles[np.nonzero(real_mask)]
poles_cplx = poles[np.nonzero(~real_mask)]
A_sub_real_idx = np.nonzero(self._get_real_pole_mask(poles))[0]
A_sub_cplx_idx = np.nonzero(~self._get_real_pole_mask(poles))[0][::2]
# add coefficients for a pair of complex conjugate poles
# part 1: first sum of rational functions (residue variable c)
A_matrix[:, A_sub_real_idx] = 1 / (s[:, None] - poles_real)
# coefficient for real part of residue
A_matrix[:, A_sub_cplx_idx] = (1 / (s[:, None] - poles_cplx) +
1 / (s[:, None] - np.conj(poles_cplx)))
# coefficient for imaginary part of residue
A_matrix[:, A_sub_cplx_idx + 1] = (1j / (s[:, None] - poles_cplx)
- 1j / (s[:, None] - np.conj(poles_cplx)))
offset = np.sum((poles.imag != 0) + 1)
if fit_constant:
A_matrix[:, offset] = 1
offset += 1
if fit_proportional:
A_matrix[:, offset] = s
logging.info('A_matrix: condition number = {}'.format(np.linalg.cond(A_matrix)))
# solve least squares and obtain results as stack of real part vector and imaginary part vector
x, residuals, rank, singular_vals = np.linalg.lstsq(np.vstack((A_matrix.real, A_matrix.imag)), np.hstack((freq_response.real, freq_response.imag)), rcond=None)
i = 0
residues_response = []
for pole_im in poles.imag:
if pole_im == 0.0:
residues_response.append(x[i] + 0j)
i += 1
else:
residues_response.append(x[i] + 1j * x[i + 1])
i += 2
residues.append(residues_response)
if fit_constant and fit_proportional:
# both constant d and proportional e were fitted
constant_coeff.append(x[-2])
proportional_coeff.append(x[-1])
elif fit_constant:
# only constant d was fitted
constant_coeff.append(x[-1])
proportional_coeff.append(0.0)
elif fit_proportional:
# only proportional e was fitted
constant_coeff.append(0.0)
proportional_coeff.append(x[-1])
else:
# neither constant d nor proportional e was fitted
constant_coeff.append(0.0)
proportional_coeff.append(0.0)
# save poles, residues, d, e in actual frequencies (un-normalized)
self.poles = poles * norm
self.residues = np.array(residues) * norm
self.constant_coeff = np.array(constant_coeff)
self.proportional_coeff = np.array(proportional_coeff) / norm
timer_stop = timer()
self.wall_clock_time = timer_stop - timer_start
logging.info('\n### Vector fitting finished in {} seconds.\n'.format(self.wall_clock_time))
def get_rms_error(self, i=-1, j=-1, parameter_type: str = 's'):
r"""
Returns the root-mean-square (rms) error magnitude of the fit, i.e.
:math:`\sqrt{ \mathrm{mean}(|S - S_\mathrm{fit} |^2) }`,
either for an individual response :math:`S_{i+1,j+1}` or for larger slices of the network.
Parameters
----------
i : int, optional
Row indices of the responses to be evaluated. Either a single row selected by an integer
:math:`i \in [0, N_\mathrm{ports}-1]`, or multiple rows selected by a list of integers, or all rows
selected by :math:`i = -1` (*default*).
j : int, optional
Column indices of the responses to be evaluated. Either a single column selected by an integer
:math:`j \in [0, N_\mathrm{ports}-1]`, or multiple columns selected by a list of integers, or all columns
selected by :math:`j = -1` (*default*).
parameter_type: str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`).
Returns
-------
rms_error : ndarray
The rms error magnitude between the vector fitted model and the original network data.
Raises
------
ValueError
If the specified parameter representation type is not :attr:`s`, :attr:`z`, nor :attr:`y`.
"""
if i == -1:
list_i = range(self.network.nports)
elif isinstance(i, int):
list_i = [i]
else:
list_i = i
if j == -1:
list_j = range(self.network.nports)
elif isinstance(j, int):
list_j = [j]
else:
list_j = j
if parameter_type.lower() == 's':
nw_responses = self.network.s
elif parameter_type.lower() == 'z':
nw_responses = self.network.z
elif parameter_type.lower() == 'y':
nw_responses = self.network.y
else:
raise ValueError('Invalid parameter type `{}`. Valid options: `s`, `z`, or `y`'.format(parameter_type))
error_mean_squared = 0
for i in list_i:
for j in list_j:
nw_ij = nw_responses[:, i, j]
fit_ij = self.get_model_response(i, j, self.network.f)
error_mean_squared += np.mean(np.square(np.abs(nw_ij - fit_ij)))
return np.sqrt(error_mean_squared)
def _get_ABCDE(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Private method.
Returns the real-valued system matrices of the state-space representation of the current rational model, as
defined in [#]_.
Returns
-------
A : ndarray
State-space matrix A holding the poles on the diagonal as real values with imaginary parts on the sub-
diagonal
B : ndarray
State-space matrix B holding coefficients (1, 2, or 0), depending on the respective type of pole in A
C : ndarray
State-space matrix C holding the residues
D : ndarray
State-space matrix D holding the constants
E : ndarray
State-space matrix E holding the proportional coefficients (usually 0 in case of fitted S-parameters)
Raises
------
ValueError
If the model parameters have not been initialized (by running :func:`vector_fit()` or :func:`read_npz()`).
References
----------
.. [#] B. Gustavsen and A. Semlyen, "Fast Passivity Assessment for S-Parameter Rational Models Via a Half-Size
Test Matrix," in IEEE Transactions on Microwave Theory and Techniques, vol. 56, no. 12, pp. 2701-2708,
Dec. 2008, DOI: 10.1109/TMTT.2008.2007319.
"""
# initial checks
if self.poles is None:
raise ValueError('self.poles = None; nothing to do. You need to run vector_fit() first.')
if self.residues is None:
raise ValueError('self.residues = None; nothing to do. You need to run vector_fit() first.')
if self.proportional_coeff is None:
raise ValueError('self.proportional_coeff = None; nothing to do. You need to run vector_fit() first.')
if self.constant_coeff is None:
raise ValueError('self.constant_coeff = None; nothing to do. You need to run vector_fit() first.')
# assemble real-valued state-space matrices A, B, C, D, E from fitted complex-valued pole-residue model
# determine size of the matrix system
n_ports = int(np.sqrt(len(self.constant_coeff)))
n_poles_real = 0
n_poles_cplx = 0
for pole in self.poles:
if np.imag(pole) == 0.0:
n_poles_real += 1
else:
n_poles_cplx += 1
n_matrix = (n_poles_real + 2 * n_poles_cplx) * n_ports
# state-space matrix A holds the poles on the diagonal as real values with imaginary parts on the sub-diagonal
# state-space matrix B holds coefficients (1, 2, or 0), depending on the respective type of pole in A
# assemble A = [[poles_real, 0, 0],
# [0, real(poles_cplx), imag(poles_cplx],
# [0, -imag(poles_cplx), real(poles_cplx]]
A = np.identity(n_matrix)
B = np.zeros(shape=(n_matrix, n_ports))
i_A = 0 # index on diagonal of A
for j in range(n_ports):
for pole in self.poles:
if np.imag(pole) == 0.0:
# adding a real pole
A[i_A, i_A] = np.real(pole)
B[i_A, j] = 1
i_A += 1
else:
# adding a complex-conjugate pole
A[i_A, i_A] = np.real(pole)
A[i_A, i_A + 1] = np.imag(pole)
A[i_A + 1, i_A] = -1 * np.imag(pole)
A[i_A + 1, i_A + 1] = np.real(pole)
B[i_A, j] = 2
i_A += 2
# state-space matrix C holds the residues
# assemble C = [[R1.11, R1.12, R1.13, ...], [R2.11, R2.12, R2.13, ...], ...]
C = np.zeros(shape=(n_ports, n_matrix))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
j_residues = 0
for zero in self.residues[i_response]:
if np.imag(zero) == 0.0:
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues] = np.real(zero)
j_residues += 1
else:
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues] = np.real(zero)
C[i, j * (n_poles_real + 2 * n_poles_cplx) + j_residues + 1] = np.imag(zero)
j_residues += 2
# state-space matrix D holds the constants
# assemble D = [[d11, d12, ...], [d21, d22, ...], ...]
D = np.zeros(shape=(n_ports, n_ports))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
D[i, j] = self.constant_coeff[i_response]
# state-space matrix E holds the proportional coefficients (usually 0 in case of fitted S-parameters)
# assemble E = [[e11, e12, ...], [e21, e22, ...], ...]
E = np.zeros(shape=(n_ports, n_ports))
for i in range(n_ports):
for j in range(n_ports):
# i: row index
# j: column index
i_response = i * n_ports + j
E[i, j] = self.proportional_coeff[i_response]
return A, B, C, D, E
@staticmethod
def _get_s_from_ABCDE(freq: float,
A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray, E: np.ndarray) -> np.ndarray:
"""
Private method.
Returns the S-matrix of the vector fitted model calculated from the real-valued system matrices of the state-
space representation, as provided by `_get_ABCDE()`.
Parameters
----------
freq : float
Frequency (in Hz) at which to calculate the S-matrix.
A : ndarray
B : ndarray
C : ndarray
D : ndarray
E : ndarray
Returns
-------
ndarray
Complex-valued S-matrix (NxN) calculated at frequency `freq`.
"""
dim_A = np.shape(A)[0]
stsp_poles = np.linalg.inv(2j * np.pi * freq * np.identity(dim_A) - A)
stsp_S = np.matmul(np.matmul(C, stsp_poles), B)
stsp_S += D + 2j * np.pi * freq * E
return stsp_S
def passivity_test(self, parameter_type: str = 's') -> np.ndarray:
"""
Evaluates the passivity of reciprocal vector fitted models by means of a half-size test matrix [#]_. Any
existing frequency bands of passivity violations will be returned as a sorted list.
Parameters
----------
parameter_type: str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Raises
------
NotImplementedError
If the function is called for `parameter_type` different than `S` (scattering).
ValueError
If the function is used with a model containing nonzero proportional coefficients.
Returns
-------
violation_bands : ndarray
NumPy array with frequency bands of passivity violation:
`[[f_start_1, f_stop_1], [f_start_2, f_stop_2], ...]`.
See Also
--------
is_passive : Query the model passivity as a boolean value.
passivity_enforce : Enforces the passivity of the vector fitted model, if required.
References
----------
.. [#] B. Gustavsen and A. Semlyen, "Fast Passivity Assessment for S-Parameter Rational Models Via a Half-Size
Test Matrix," in IEEE Transactions on Microwave Theory and Techniques, vol. 56, no. 12, pp. 2701-2708,
Dec. 2008, DOI: 10.1109/TMTT.2008.2007319.
"""
if parameter_type.lower() != 's':
raise NotImplementedError('Passivity testing is currently only supported for scattering (S) parameters.')
if parameter_type.lower() == 's' and len(np.flatnonzero(self.proportional_coeff)) > 0:
raise ValueError('Passivity testing of scattering parameters with nonzero proportional coefficients does '
'not make any sense; you need to run vector_fit() with option `fit_proportional=False` '
'first.')
# # the network needs to be reciprocal for this passivity test method to work: S = transpose(S)
# if not np.allclose(self.residues, np.transpose(self.residues)) or \
# not np.allclose(self.constant_coeff, np.transpose(self.constant_coeff)) or \
# not np.allclose(self.proportional_coeff, np.transpose(self.proportional_coeff)):
# logging.error('Passivity testing with unsymmetrical model parameters is not supported. '
# 'The model needs to be reciprocal.')
# return
# get state-space matrices
A, B, C, D, E = self._get_ABCDE()
n_ports = np.shape(D)[0]
# build half-size test matrix P from state-space matrices A, B, C, D
inv_neg = np.linalg.inv(D - np.identity(n_ports))
inv_pos = np.linalg.inv(D + np.identity(n_ports))
prod_neg = np.matmul(np.matmul(B, inv_neg), C)
prod_pos = np.matmul(np.matmul(B, inv_pos), C)
P = np.matmul(A - prod_neg, A - prod_pos)
# extract eigenvalues of P
P_eigs = np.linalg.eigvals(P)
# purely imaginary square roots of eigenvalues identify frequencies (2*pi*f) of borders of passivity violations
freqs_violation = []
for sqrt_eigenval in np.sqrt(P_eigs):
if np.real(sqrt_eigenval) == 0.0:
freqs_violation.append(np.imag(sqrt_eigenval) / 2 / np.pi)
# sort the output from lower to higher frequencies
freqs_violation = np.sort(freqs_violation)
# identify frequency bands of passivity violations
# sweep the bands between crossover frequencies and identify bands of passivity violations
violation_bands = []
for i, freq in enumerate(freqs_violation):
if i == 0:
f_start = 0
f_stop = freq
else:
f_start = freqs_violation[i - 1]
f_stop = freq
# calculate singular values at the center frequency between crossover frequencies to identify violations
f_center = 0.5 * (f_start + f_stop)
s_center = self._get_s_from_ABCDE(f_center, A, B, C, D, E)
u, sigma, vh = np.linalg.svd(s_center)
passive = True
for singval in sigma:
if singval > 1:
# passivity violation in this band
passive = False
if not passive:
# add this band to the list of passivity violations
if violation_bands is None:
violation_bands = [[f_start, f_stop]]
else:
violation_bands.append([f_start, f_stop])
return np.array(violation_bands)
def is_passive(self, parameter_type: str = 's') -> bool:
"""
Returns the passivity status of the model as a boolean value.
Parameters
----------
parameter_type : str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Returns
-------
passivity : bool
:attr:`True` if model is passive, else :attr:`False`.
See Also
--------
passivity_test : Verbose passivity evaluation routine.
passivity_enforce : Enforces the passivity of the vector fitted model, if required.
"""
viol_bands = self.passivity_test(parameter_type)
if len(viol_bands) == 0:
return True
else:
return False
def passivity_enforce(self, n_samples: int = 100, parameter_type: str = 's') -> None:
"""
Enforces the passivity of the vector fitted model, if required. This is an implementation of the method
presented in [#]_.
Parameters
----------
n_samples : int, optional
Number of linearly spaced frequency samples at which passivity will be evaluated and enforced.
(Default: 100)
parameter_type : str, optional
Representation type of the fitted frequency responses. Either *scattering* (:attr:`s` or :attr:`S`),
*impedance* (:attr:`z` or :attr:`Z`) or *admittance* (:attr:`y` or :attr:`Y`). Currently, only scattering
parameters are supported for passivity evaluation.
Returns
-------
None
Raises
------
NotImplementedError
If the function is called for `parameter_type` different than `S` (scattering).
ValueError
If the function is used with a model containing nonzero proportional coefficients.
See Also
--------
is_passive : Returns the passivity status of the model as a boolean value.
passivity_test : Verbose passivity evaluation routine.
plot_passivation : Convergence plot for passivity enforcement iterations.
References
----------
.. [#] T. Dhaene, D. Deschrijver and N. Stevens, "Efficient Algorithm for Passivity Enforcement of S-Parameter-
Based Macromodels," in IEEE Transactions on Microwave Theory and Techniques, vol. 57, no. 2, pp. 415-420,
Feb. 2009, DOI: 10.1109/TMTT.2008.2011201.
"""
if parameter_type.lower() != 's':
raise NotImplementedError('Passivity testing is currently only supported for scattering (S) parameters.')
if parameter_type.lower() == 's' and len(np.flatnonzero(self.proportional_coeff)) > 0:
raise ValueError('Passivity testing of scattering parameters with nonzero proportional coefficients does '
'not make any sense; you need to run vector_fit() with option `fit_proportional=False` '
'first.')
# always run passivity test first; this will write 'self.violation_bands'
violation_bands = self.passivity_test()
if len(violation_bands) == 0:
# model is already passive; do nothing and return
logging.info('Passivity enforcement: The model is already passive. Nothing to do.')
return
freqs_eval = np.linspace(0, 1.2 * violation_bands[-1, -1], n_samples)
A, B, C, D, E = self._get_ABCDE()
dim_A = np.shape(A)[0]
C_t = C
delta = 0.999 # predefined tolerance parameter (users should not need to change this)
# iterative compensation of passivity violations
t = 0
self.history_max_sigma = []
while t < self.max_iterations:
logging.info('Passivity enforcement; Iteration {}'.format(t + 1))
A_matrix = []
b_vector = []
sigma_max = 0
# sweep through evaluation frequencies
for i_eval, freq_eval in enumerate(freqs_eval):
# calculate S-matrix at this frequency
s_eval = self._get_s_from_ABCDE(freq_eval, A, B, C_t, D, E)
# singular value decomposition
u, sigma, vh = np.linalg.svd(s_eval)
# keep track of the greatest singular value in every iteration step
if np.amax(sigma) > sigma_max:
sigma_max = np.amax(sigma)
# prepare and fill the square matrices 'gamma' and 'psi' marking passivity violations
gamma = np.diag(sigma)
psi = np.diag(sigma)
for i, sigma_i in enumerate(sigma):
if sigma_i <= delta:
gamma[i, i] = 0
psi[i, i] = 0
else:
gamma[i, i] = 1
psi[i, i] = delta
# calculate violation S-matrix
# s_viol is again a complex NxN S-matrix (N: number of network ports)
sigma_viol = np.matmul(np.diag(sigma), gamma) - psi
s_viol = np.matmul(np.matmul(u, sigma_viol), vh)
# Laplace frequency of this sample in the sweep
s_k = 2j * np.pi * freq_eval
# build matrix system for least-squares fitting of new set of violation residues C_viol
# using rule for transpose of matrix products: transpose(A * B) = transpose(B) * transpose(A)
# hence, S = C * coeffs <===> transpose(S) = transpose(coeffs) * transpose(C)
coeffs = np.transpose(np.matmul(np.linalg.inv(s_k * np.identity(dim_A) - A), B))
if i_eval == 0:
A_matrix = np.vstack([np.real(coeffs), np.imag(coeffs)])
b_vector = np.vstack([np.real(np.transpose(s_viol)), np.imag(np.transpose(s_viol))])
else:
A_matrix = np.concatenate((A_matrix, np.vstack([np.real(coeffs),
np.imag(coeffs)])), axis=0)
b_vector = np.concatenate((b_vector, np.vstack([np.real(np.transpose(s_viol)),
np.imag(np.transpose(s_viol))])), axis=0)
# solve least squares
x, residuals, rank, singular_vals = np.linalg.lstsq(A_matrix, b_vector, rcond=None)
C_viol = np.transpose(x)
# calculate and update C_t for next iteration
C_t = C_t - C_viol
t += 1
self.history_max_sigma.append(sigma_max)
# stop iterations when model is passive
if sigma_max < 1.0:
break
# PASSIVATION PROCESS DONE; model is either passive or max. number of iterations have been exceeded
if t == self.max_iterations:
warnings.warn('Passivity enforcement: Aborting after the max. number of iterations has been exceeded.',
RuntimeWarning, stacklevel=2)
# save/update model parameters (perturbed residues)
self.history_max_sigma = np.array(self.history_max_sigma)
n_ports = np.shape(D)[0]
for i in range(n_ports):
k = 0 # column index in C_t
for j in range(n_ports):
i_response = i * n_ports + j
z = 0 # column index self.residues
for pole in self.poles:
if np.imag(pole) == 0.0:
# real pole --> real residue
self.residues[i_response, z] = C_t[i, k]
k += 1
else:
# complex-conjugate pole --> complex-conjugate residue
self.residues[i_response, z] = C_t[i, k] + 1j * C_t[i, k + 1]
k += 2
z += 1
def write_npz(self, path: str) -> None:
"""
Writes the model parameters in :attr:`poles`, :attr:`residues`,
:attr:`proportional_coeff` and :attr:`constant_coeff` to a labeled NumPy .npz file.
Parameters
----------
path : str
Target path without filename for the export. The filename will be added automatically based on the network
name in :attr:`network`
Returns
-------
None
See Also
--------
read_npz : Reads all model parameters from a .npz file
"""
if self.poles is None:
warnings.warn('Nothing to export; Poles have not been fitted.', RuntimeWarning, stacklevel=2)
return
if self.residues is None:
warnings.warn('Nothing to export; Residues have not been fitted.', RuntimeWarning, stacklevel=2)
return
if self.proportional_coeff is None:
warnings.warn('Nothing to export; Proportional coefficients have not been fitted.', RuntimeWarning,
stacklevel=2)
return
if self.constant_coeff is None:
warnings.warn('Nothing to export; Constants have not been fitted.', RuntimeWarning, stacklevel=2)
return
filename = self.network.name
logging.info('Exporting results as compressed NumPy array to {}'.format(path))
np.savez_compressed(os.path.join(path, 'coefficients_{}'.format(filename)),
poles=self.poles, residues=self.residues, proportionals=self.proportional_coeff,
constants=self.constant_coeff)
def read_npz(self, file: str) -> None:
"""
Reads all model parameters :attr:`poles`, :attr:`residues`, :attr:`proportional_coeff` and
:attr:`constant_coeff` from a labeled NumPy .npz file.
Parameters
----------
file : str
NumPy .npz file containing the parameters. See notes.
Returns
-------
None
Raises
------
ValueError
If the length of the parameters from the file does not match the size of the Network in :attr:`network`.
Notes
-----
The .npz file needs to include the model parameters as individual NumPy arrays (ndarray) labeled '*poles*',
'*residues*', '*proportionals*' and '*constants*'. The shapes of those arrays need to match the network
properties in :class:`network` (correct number of ports). Preferably, the .npz file was created by
:func:`write_npz`.
See Also
--------
write_npz : Writes all model parameters to a .npz file
"""
with np.load(file) as data:
poles = data['poles']
# legacy support for exported residues
if 'zeros' in data:
# old .npz file from deprecated write_npz() with residues called 'zeros'
residues = data['zeros']
else:
# new .npz file from current write_npz()
residues = data['residues']
proportional_coeff = data['proportionals']
constant_coeff = data['constants']
n_ports = int(np.sqrt(len(constant_coeff)))
n_resp = n_ports ** 2
if np.shape(residues)[0] == np.shape(proportional_coeff)[0] == np.shape(constant_coeff)[0] == n_resp:
self.poles = poles
self.residues = residues
self.proportional_coeff = proportional_coeff
self.constant_coeff = constant_coeff
else:
raise ValueError('Length of the provided parameters does not match the network size. Please initialize '
'VectorFitting with a suited Network first.')
def get_model_response(self, i: int, j: int, freqs: Any = None) -> np.ndarray:
"""
Returns one of the frequency responses :math:`H_{i+1,j+1}` of the fitted model :math:`H`.
Parameters
----------
i : int
Row index of the response in the response matrix.
j : int
Column index of the response in the response matrix.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
Returns
-------
response : ndarray
Model response :math:`H_{i+1,j+1}` at the frequencies specified in `freqs` (complex-valued Numpy array).
Examples
--------
Get fitted S11 at 101 frequencies from 0 Hz to 10 GHz:
>>> import skrf
>>> vf = skrf.VectorFitting(skrf.data.ring_slot)
>>> vf.vector_fit(3, 0)
>>> s11_fit = vf.get_model_response(0, 0, numpy.linspace(0, 10e9, 101))
"""
if self.poles is None:
warnings.warn('Returning a zero-vector; Poles have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if self.residues is None:
warnings.warn('Returning a zero-vector; Residues have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if self.proportional_coeff is None:
warnings.warn('Returning a zero-vector; Proportional coefficients have not been fitted.', RuntimeWarning,
stacklevel=2)
return np.zeros_like(freqs)
if self.constant_coeff is None:
warnings.warn('Returning a zero-vector; Constants have not been fitted.', RuntimeWarning, stacklevel=2)
return np.zeros_like(freqs)
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
s = 2j * np.pi * np.array(freqs)
n_ports = int(np.sqrt(len(self.constant_coeff)))
i_response = i * n_ports + j
residues = self.residues[i_response]
resp = self.proportional_coeff[i_response] * s + self.constant_coeff[i_response]
for i, pole in enumerate(self.poles):
if np.imag(pole) == 0.0:
# real pole
resp += residues[i] / (s - pole)
else:
# complex conjugate pole
resp += residues[i] / (s - pole) + np.conjugate(residues[i]) / (s - np.conjugate(pole))
return resp
@check_plotting
def plot_s_db(self, i: int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the magnitude in dB of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, 20 * np.log10(np.abs(self.network.s[:, i, j])), color='r', label='Samples')
ax.plot(freqs, 20 * np.log10(np.abs(self.get_model_response(i, j, freqs))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude (dB)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_mag(self, i: int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the magnitude in linear scale of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.abs(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.abs(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_deg(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the phase in degrees of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.rad2deg(np.angle(self.network.s[:, i, j])), color='r', label='Samples')
ax.plot(freqs, np.rad2deg(np.angle(self.get_model_response(i, j, freqs))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Phase (Degrees)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_deg_unwrap(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the unwrapped phase in degrees of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.rad2deg(np.unwrap(np.angle(self.network.s[:, i, j]))), color='r', label='Samples')
ax.plot(freqs, np.rad2deg(np.unwrap(np.angle(self.get_model_response(i, j, freqs)))), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Phase (Degrees)')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_re(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the real part of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.real(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.real(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Real Part')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_im(self, i : int, j: int, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the imaginary part of the scattering parameter response :math:`S_{i+1,j+1}` in the fit.
Parameters
----------
i : int
Row index of the response.
j : int
Column index of the response.
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
ax.scatter(self.network.f, np.imag(self.network.s[:, i, j]), color='r', label='Samples')
ax.plot(freqs, np.imag(self.get_model_response(i, j, freqs)), color='k', label='Fit')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Imaginary Part')
ax.legend(loc='best')
ax.set_title('Response i={}, j={}'.format(i, j))
return ax
@check_plotting
def plot_s_singular(self, freqs: Any = None, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the singular values of the vector fitted S-matrix in linear scale.
Parameters
----------
freqs : list of float or ndarray or None, optional
List of frequencies for the response plot. If None, the sample frequencies of the fitted network in
:attr:`network` are used.
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if freqs is None:
freqs = np.linspace(np.amin(self.network.f), np.amax(self.network.f), 1000)
if ax is None:
ax = mplt.gca()
# get system matrices of state-space representation
A, B, C, D, E = self._get_ABCDE()
n_ports = np.shape(D)[0]
singvals = np.zeros((n_ports, len(freqs)))
# calculate and save singular values for each frequency
for i, f in enumerate(freqs):
u, sigma, vh = np.linalg.svd(self._get_s_from_ABCDE(f, A, B, C, D, E))
singvals[:, i] = sigma
# plot the frequency response of each singular value
for n in range(n_ports):
ax.plot(freqs, singvals[n, :], label=r'$\sigma_{}$'.format(n + 1))
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Magnitude')
ax.legend(loc='best')
return ax
@check_plotting
def plot_convergence(self, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the history of the model residue parameter **d_res** during the iterative pole relocation process of the
vector fitting, which should eventually converge to a fixed value. Additionally, the relative change of the
maximum singular value of the coefficient matrix **A** are plotted, which serve as a convergence indicator.
Parameters
----------
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if ax is None:
ax = mplt.gca()
ax.semilogy(np.arange(len(self.delta_max_history)) + 1, self.delta_max_history, color='darkblue')
ax.set_xlabel('Iteration step')
ax.set_ylabel('Max. relative change', color='darkblue')
ax2 = ax.twinx()
ax2.plot(np.arange(len(self.d_res_history)) + 1, self.d_res_history, color='orangered')
ax2.set_ylabel('Residue', color='orangered')
return ax
@check_plotting
def plot_passivation(self, ax: mplt.Axes = None) -> mplt.Axes:
"""
Plots the history of the greatest singular value during the iterative passivity enforcement process, which
should eventually converge to a value slightly lower than 1.0 or stop after reaching the maximum number of
iterations specified in the class variable :attr:`max_iterations`.
Parameters
----------
ax : :class:`matplotlib.Axes` object or None
matplotlib axes to draw on. If None, the current axes is fetched with :func:`gca()`.
Returns
-------
:class:`matplotlib.Axes`
matplotlib axes used for drawing. Either the passed :attr:`ax` argument or the one fetch from the current
figure.
"""
if ax is None:
ax = mplt.gca()
ax.plot(np.arange(len(self.history_max_sigma)) + 1, self.history_max_sigma)
ax.set_xlabel('Iteration step')
ax.set_ylabel('Max. singular value')
return ax
def write_spice_subcircuit_s(self, file: str) -> None:
"""
Creates an equivalent N-port SPICE subcircuit based on its vector fitted S parameter responses.
Parameters
----------
file : str
Path and filename including file extension (usually .sp) for the SPICE subcircuit file.
Returns
-------
None
Notes
-----
In the SPICE subcircuit, all ports will share a common reference node (global SPICE ground on node 0). The
equivalent circuit uses linear dependent current sources on all ports, which are controlled by the currents
through equivalent admittances modelling the parameters from a vector fit. This approach is based on [#]_.
Examples
--------
Load and fit the `Network`, then export the equivalent SPICE subcircuit:
>>> nw_3port = skrf.Network('my3port.s3p')
>>> vf = skrf.VectorFitting(nw_3port)
>>> vf.vector_fit(n_poles_real=1, n_poles_cmplx=4)
>>> vf.write_spice_subcircuit_s('/my3port_model.sp')
References
----------
.. [#] G. Antonini, "SPICE Equivalent Circuits of Frequency-Domain Responses", IEEE Transactions on
Electromagnetic Compatibility, vol. 45, no. 3, pp. 502-512, August 2003,
DOI: https://doi.org/10.1109/TEMC.2003.815528
"""
# list of subcircuits for the equivalent admittances
subcircuits = []
# provides a unique SPICE subcircuit identifier (X1, X2, X3, ...)
def get_new_subckt_identifier():
subcircuits.append('X{}'.format(len(subcircuits) + 1))
return subcircuits[-1]
# use engineering notation for the numbers in the SPICE file (1000 --> 1k)
formatter = EngFormatter(sep="", places=3, usetex=False)
# replace "micron" sign by "u" and "mega" sign by "meg"
letters_dict = formatter.ENG_PREFIXES
letters_dict.update({-6: 'u', 6: 'meg'})
formatter.ENG_PREFIXES = letters_dict
with open(file, 'w') as f:
# write title line
f.write('* EQUIVALENT CIRCUIT FOR VECTOR FITTED S-MATRIX\n')
f.write('* Created using scikit-rf vectorFitting.py\n')
f.write('*\n')
# define the complete equivalent circuit as a subcircuit with one input node per port
# those port nodes are labeled p1, p2, p3, ...
# all ports share a common node for ground reference (node 0)
str_input_nodes = ''
for n in range(self.network.nports):
str_input_nodes += 'p{} '.format(n + 1)
f.write('.SUBCKT s_equivalent {}\n'.format(str_input_nodes))
for n in range(self.network.nports):
f.write('*\n')
f.write('* port {}\n'.format(n + 1))
# add port reference impedance z0 (has to be resistive, no imaginary part)
f.write('R{} a{} 0 {}\n'.format(n + 1, n + 1, np.real(self.network.z0[0, n])))
# add dummy voltage sources (V=0) to measure the input current
f.write('V{} p{} a{} 0\n'.format(n + 1, n + 1, n + 1))
# CCVS and VCVS driving the transfer admittances with a = V/2/sqrt(Z0) + I/2*sqrt(Z0)
# In
f.write('H{} nt{} nts{} V{} {}\n'.format(n + 1, n + 1, n + 1, n + 1, np.real(self.network.z0[0, n])))
# Vn
f.write('E{} nts{} 0 p{} 0 {}\n'.format(n + 1, n + 1, n + 1, 1))
for j in range(self.network.nports):
f.write('* transfer network for s{}{}\n'.format(n + 1, j + 1))
# stacking order in VectorFitting class variables:
# s11, s12, s13, ..., s21, s22, s23, ...
i_response = n * self.network.nports + j
# add CCCS to generate the scattered current I_nj at port n
# control current is measured by the dummy voltage source at the transfer network Y_nj
# the scattered current is injected into the port (source positive connected to ground)
f.write('F{}{} 0 a{} V{}{} {}\n'.format(n + 1, j + 1, n + 1, n + 1, j + 1,
formatter(1 / np.real(self.network.z0[0, n]))))
f.write('F{}{}_inv a{} 0 V{}{}_inv {}\n'.format(n + 1, j + 1, n + 1, n + 1, j + 1,
formatter(1 / np.real(self.network.z0[0, n]))))
# add dummy voltage source (V=0) in series with Y_nj to measure current through transfer admittance
f.write('V{}{} nt{} nt{}{} 0\n'.format(n + 1, j + 1, j + 1, n + 1, j + 1))
f.write('V{}{}_inv nt{} nt{}{}_inv 0\n'.format(n + 1, j + 1, j + 1, n + 1, j + 1))
# add corresponding transfer admittance Y_nj, which is modulating the control current
# the transfer admittance is a parallel circuit (sum) of individual admittances
f.write('* transfer admittances for S{}{}\n'.format(n + 1, j + 1))
# start with proportional and constant term of the model
# H(s) = d + s * e model
# Y(s) = G + s * C equivalent admittance
g = self.constant_coeff[i_response]
c = self.proportional_coeff[i_response]
# add R for constant term
if g < 0:
f.write('R{}{} nt{}{}_inv 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(np.abs(1 / g))))
elif g > 0:
f.write('R{}{} nt{}{} 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(1 / g)))
# add C for proportional term
if c < 0:
f.write('C{}{} nt{}{}_inv 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(np.abs(c))))
elif c > 0:
f.write('C{}{} nt{}{} 0 {}\n'.format(n + 1, j + 1, n + 1, j + 1, formatter(c)))
# add pairs of poles and residues
for i_pole in range(len(self.poles)):
pole = self.poles[i_pole]
residue = self.residues[i_response, i_pole]
node = get_new_subckt_identifier() + ' nt{}{}'.format(n + 1, j + 1)
if np.real(residue) < 0.0:
# multiplication with -1 required, otherwise the values for RLC would be negative
# this gets compensated by inverting the transfer current direction for this subcircuit
residue = -1 * residue
node += '_inv'
if np.imag(pole) == 0.0:
# real pole; add rl_admittance
l = 1 / np.real(residue)
r = -1 * np.real(pole) / np.real(residue)
f.write(node + ' 0 rl_admittance res={} ind={}\n'.format(formatter(r), formatter(l)))
else:
# complex pole of a conjugate pair; add rcl_vccs_admittance
l = 1 / (2 * np.real(residue))
b = -2 * (np.real(residue) * np.real(pole) + np.imag(residue) * np.imag(pole))
r = -1 * np.real(pole) / np.real(residue)
c = 2 * np.real(residue) / (np.abs(pole) ** 2)
gm_add = b * l * c
if gm_add < 0:
m = -1
else:
m = 1
f.write(node + ' 0 rcl_vccs_admittance res={} cap={} ind={} gm={} mult={}\n'.format(
formatter(r),
formatter(c),
formatter(l),
formatter(np.abs(gm_add)),
int(m)))
f.write('.ENDS s_equivalent\n')
f.write('*\n')
# subcircuit for an active RCL+VCCS equivalent admittance Y(s) of a complex-conjugate pole-residue pair H(s)
# Residue: c = c' + j * c"
# Pole: p = p' + j * p"
# H(s) = c / (s - p) + conj(c) / (s - conj(p))
# = (2 * c' * s - 2 * (c'p' + c"p")) / (s ** 2 - 2 * p' * s + |p| ** 2)
# Y(S) = (1 / L * s + b) / (s ** 2 + R / L * s + 1 / (L * C))
f.write('.SUBCKT rcl_vccs_admittance n_pos n_neg res=1k cap=1n ind=100p gm=1m mult=1\n')
f.write('L1 n_pos 1 {ind}\n')
f.write('C1 1 2 {cap}\n')
f.write('R1 2 n_neg {res}\n')
f.write('G1 n_pos n_neg 1 2 {gm} m={mult}\n')
f.write('.ENDS rcl_vccs_admittance\n')
f.write('*\n')
# subcircuit for a passive RL equivalent admittance Y(s) of a real pole-residue pair H(s)
# H(s) = c / (s - p)
# Y(s) = 1 / L / (s + s * R / L)
f.write('.SUBCKT rl_admittance n_pos n_neg res=1k ind=100p\n')
f.write('L1 n_pos 1 {ind}\n')
f.write('R1 1 n_neg {res}\n')
f.write('.ENDS rl_admittance\n')
|
#
# this test corresponds to the branch4 (using prerefl) in test_shadow4_mirrors.ows in shadow4/oasys_workspaces
#
import numpy
from syned.beamline.beamline import BeamlineElement
from syned.beamline.optical_elements.mirrors.mirror import Mirror as SyMirror
from syned.beamline.element_coordinates import ElementCoordinates
from shadow4.syned.shape import Plane, Sphere, Ellipsoid, Paraboloid, Hyperboloid # TODO from syned.beamline.shape
from shadow4.beam.beam import Beam
from shadow4.optical_elements.screen import Screen
from Shadow.ShadowTools import plotxy
from shadow4.compatibility.beam3 import Beam3
from shadow4.syned.shape import MultiplePatch
from shadow4.syned.shape import Rectangle, Ellipse, TwoEllipses # TODO from syned.beamline.shape
from shadow4.optical_elements.mirror import Mirror
from numpy.testing import assert_almost_equal
def run_shadow3():
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
source = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FDISTR = 3
oe0.F_COHER = 1
oe0.HDIV1 = 0.0
oe0.HDIV2 = 0.0
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.ISTAR1 = 5676561
oe0.NPOINT = 100000
oe0.PH1 = 5.0
oe0.POL_DEG = 0.5
oe0.SIGDIX = 1e-06
oe0.SIGDIZ = 1e-06
oe0.SIGMAX = 0.0
oe0.SIGMAZ = 0.0
oe0.VDIV1 = 0.0
oe0.VDIV2 = 0.0
oe1.DUMMY = 100.0
oe1.FHIT_C = 1
oe1.FILE_REFL = b'/Users/srio/Oasys/SiC.dat'
oe1.FWRITE = 0
oe1.F_REFLEC = 0 #1
oe1.RLEN1 = 5e-05
oe1.RLEN2 = 5e-05
oe1.RWIDX1 = 2e-05
oe1.RWIDX2 = 2e-05
oe1.T_IMAGE = 6.0
oe1.T_INCIDENCE = 88.8
oe1.T_REFLECTION = 88.8
# Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
source.genSource(oe0)
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
#
# run optical element 1
#
print(" Running optical element: %d" % (1))
if iwrite:
oe1.write("start.01")
beam.traceOE(oe1, 1)
if iwrite:
oe1.write("end.01")
beam.write("star.01")
# Shadow.ShadowTools.plotxy(beam, 1, 3, nbins=101, nolost=1, title="Real space")
# Shadow.ShadowTools.plotxy(beam,1,4,nbins=101,nolost=1,title="Phase space X")
# Shadow.ShadowTools.plotxy(beam,3,6,nbins=101,nolost=1,title="Phase space Z")
return source, beam, oe1
if __name__ == "__main__":
from srxraylib.plot.gol import set_qt
set_qt()
do_plot = False
do_assert = True
OASYS_HOME = "/Users/srio/Oasys/"
source3, beam3, oe1 = run_shadow3()
beam0 = Beam.initialize_from_array(source3.rays)
#
# syned definitopns
#
# surface shape
surface_shape = Plane()
# boundaries
rlen1 = 5e-05
rlen2 = 5e-05
rwidx1 = 2e-05
rwidx2 = 2e-05
boundary_shape = Rectangle(x_left=-rwidx2,x_right=rwidx1,y_bottom=-rlen2,y_top=rlen1)
symirror1 = SyMirror(
name="M1",
surface_shape=surface_shape,
boundary_shape=boundary_shape,
coating=None, #"%s/SiC.dat" % OASYS_HOME,
coating_thickness=None)
coordinates_syned = ElementCoordinates(p = 10.0,
q = 6.0,
angle_radial = 88.8 * numpy.pi / 180,)
beamline_element_syned = BeamlineElement(optical_element=symirror1, coordinates=coordinates_syned)
#
# shadow definitions
#
mirror1 = Mirror(beamline_element_syned=beamline_element_syned)
print(mirror1.info())
#
# run
#
beam1, mirr1 = mirror1.trace_beam(beam0)
print(mirr1.info())
#
# check
#
if do_plot:
plotxy(beam3, 1, 3, title="Image 1 shadow3", nbins=101, nolost=1)
beam1s3 = Beam3.initialize_from_shadow4_beam(beam1)
plotxy(beam1s3, 1, 3, title="Image 1 shadow4", nbins=101, nolost=1)
# mirr1s3 = Beam3.initialize_from_shadow4_beam(mirr1)
# plotxy(mirr1s3, 2, 1, title="Footprint 1", nbins=101, nolost=1)
#
#
# sy1 = SyBeamStopper(name="Undefined",boundary_shape=patches) # this is beamstopper
# # sy1 = SySlit(name="Undefined", boundary_shape=patches) # this is slit (negative)
#
# coordinates_syned = ElementCoordinates(p=322.971*1e-2, q=5.0*1e-2)
#
# beamline_element_syned = BeamlineElement(optical_element=sy1, coordinates=coordinates_syned)
#
# slit1 = Screen(beamline_element_syned=beamline_element_syned)
#
# print(slit1.info())
#
#
# #
# # trace
# #
#
# beam2 = slit1.trace_beam(beam)
#
# #
# if do_plot:
# beam2_3 = Beam3.initialize_from_shadow4_beam(beam2)
# plotxy(beam2_3,1,3,nbins=100,title="SHADOW4 BEAMSTOPPER", nolost=True)
#
print("col# shadow4 shadow3 source")
for i in range(18):
print("col%d %20.10f %20.10f %20.10f " % (i+1, beam1.rays[10,i], beam3.rays[10,i], source3.rays[10,i]))
if do_assert:
assert_almost_equal (beam1.rays[:,i], beam3.rays[:,i], 4)
mirr3 = Beam3(N=beam0.rays.shape[0])
mirr3.load("mirr.01")
print("\ncol# m-shadow4 m-shadow3 source")
for i in range(18):
print("col%d %20.10f %20.10f %20.10f " % (i+1, mirr1.rays[10,i], mirr3.rays[10,i], source3.rays[10,i]))
if do_assert:
assert_almost_equal (mirr1.rays[:,i], mirr3.rays[:,i], 4)
cosmetics
#
# this test corresponds to the branch4 (using prerefl) in test_shadow4_mirrors.ows in shadow4/oasys_workspaces
#
import numpy
from syned.beamline.beamline import BeamlineElement
from syned.beamline.optical_elements.mirrors.mirror import Mirror as SyMirror
from syned.beamline.element_coordinates import ElementCoordinates
from shadow4.syned.shape import Plane, Sphere, Ellipsoid, Paraboloid, Hyperboloid # TODO from syned.beamline.shape
from shadow4.beam.beam import Beam
from shadow4.optical_elements.screen import Screen
from Shadow.ShadowTools import plotxy
from shadow4.compatibility.beam3 import Beam3
from shadow4.syned.shape import MultiplePatch
from shadow4.syned.shape import Rectangle, Ellipse, TwoEllipses # TODO from syned.beamline.shape
from shadow4.optical_elements.mirror import Mirror
from numpy.testing import assert_almost_equal
def run_shadow3():
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
source = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FDISTR = 3
oe0.F_COHER = 1
oe0.HDIV1 = 0.0
oe0.HDIV2 = 0.0
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.ISTAR1 = 5676561
oe0.NPOINT = 100000
oe0.PH1 = 5.0
oe0.POL_DEG = 0.5
oe0.SIGDIX = 1e-06
oe0.SIGDIZ = 1e-06
oe0.SIGMAX = 0.0
oe0.SIGMAZ = 0.0
oe0.VDIV1 = 0.0
oe0.VDIV2 = 0.0
oe1.DUMMY = 100.0
oe1.FHIT_C = 1
oe1.FILE_REFL = b'/Users/srio/Oasys/SiC.dat'
oe1.FWRITE = 0
oe1.F_REFLEC = 0 #1
oe1.RLEN1 = 5e-05
oe1.RLEN2 = 5e-05
oe1.RWIDX1 = 2e-05
oe1.RWIDX2 = 2e-05
oe1.T_IMAGE = 6.0
oe1.T_INCIDENCE = 88.8
oe1.T_REFLECTION = 88.8
# Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
source.genSource(oe0)
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
#
# run optical element 1
#
print(" Running optical element: %d" % (1))
if iwrite:
oe1.write("start.01")
beam.traceOE(oe1, 1)
if iwrite:
oe1.write("end.01")
beam.write("star.01")
# Shadow.ShadowTools.plotxy(beam, 1, 3, nbins=101, nolost=1, title="Real space")
# Shadow.ShadowTools.plotxy(beam,1,4,nbins=101,nolost=1,title="Phase space X")
# Shadow.ShadowTools.plotxy(beam,3,6,nbins=101,nolost=1,title="Phase space Z")
return source, beam, oe1
if __name__ == "__main__":
from srxraylib.plot.gol import set_qt
set_qt()
do_plot = False
do_assert = True
OASYS_HOME = "/Users/srio/Oasys/"
source3, beam3, oe1 = run_shadow3()
beam0 = Beam.initialize_from_array(source3.rays)
#
# syned definitopns
#
# surface shape
surface_shape = Plane()
# boundaries
rlen1 = 5e-05
rlen2 = 5e-05
rwidx1 = 2e-05
rwidx2 = 2e-05
boundary_shape = Rectangle(x_left=-rwidx2,x_right=rwidx1,y_bottom=-rlen2,y_top=rlen1)
symirror1 = SyMirror(
name="M1",
surface_shape=surface_shape,
boundary_shape=boundary_shape,
coating=None, #"%s/SiC.dat" % OASYS_HOME,
coating_thickness=None)
coordinates_syned = ElementCoordinates(p = 10.0,
q = 6.0,
angle_radial = 88.8 * numpy.pi / 180,)
beamline_element_syned = BeamlineElement(optical_element=symirror1, coordinates=coordinates_syned)
#
# shadow definitions
#
mirror1 = Mirror(beamline_element_syned=beamline_element_syned)
print(mirror1.info())
#
# run
#
beam1, mirr1 = mirror1.trace_beam(beam0, flag_lost_value=-11000.0)
print(mirr1.info())
#
# check
#
if do_plot:
plotxy(beam3, 1, 3, title="Image 1 shadow3", nbins=101, nolost=1)
beam1s3 = Beam3.initialize_from_shadow4_beam(beam1)
plotxy(beam1s3, 1, 3, title="Image 1 shadow4", nbins=101, nolost=1)
# mirr1s3 = Beam3.initialize_from_shadow4_beam(mirr1)
# plotxy(mirr1s3, 2, 1, title="Footprint 1", nbins=101, nolost=1)
#
#
# sy1 = SyBeamStopper(name="Undefined",boundary_shape=patches) # this is beamstopper
# # sy1 = SySlit(name="Undefined", boundary_shape=patches) # this is slit (negative)
#
# coordinates_syned = ElementCoordinates(p=322.971*1e-2, q=5.0*1e-2)
#
# beamline_element_syned = BeamlineElement(optical_element=sy1, coordinates=coordinates_syned)
#
# slit1 = Screen(beamline_element_syned=beamline_element_syned)
#
# print(slit1.info())
#
#
# #
# # trace
# #
#
# beam2 = slit1.trace_beam(beam)
#
# #
# if do_plot:
# beam2_3 = Beam3.initialize_from_shadow4_beam(beam2)
# plotxy(beam2_3,1,3,nbins=100,title="SHADOW4 BEAMSTOPPER", nolost=True)
#
mirr3 = Beam3(N=beam0.rays.shape[0])
mirr3.load("mirr.01")
print("\ncol# m-shadow4 m-shadow3 source")
for i in range(18):
print("col%d %20.10f %20.10f %20.10f " % (i+1, mirr1.rays[10,i], mirr3.rays[10,i], source3.rays[10,i]))
if do_assert:
assert_almost_equal (mirr1.rays[:,i], mirr3.rays[:,i], 1)
print("\ncol# shadow4 shadow3 source")
for i in range(18):
print("col%d %20.10f %20.10f %20.10f " % (i+1, beam1.rays[10,i], beam3.rays[10,i], source3.rays[10,i]))
if do_assert:
assert_almost_equal (beam1.rays[:,i], beam3.rays[:,i], 1) |
# -*- coding: utf-8 -*-
"""
treebeard.ns_tree
-----------------
Nested Sets Tree.
:copyright: 2008-2010 by Gustavo Picon
:license: Apache License 2.0
An implementation of Nested Sets trees for Django 1.0+, as described by
`Joe Celko`_ in `Trees and Hierarchies in SQL for Smarties`_.
Nested sets have very efficient reads at the cost of high maintenance on
write/delete operations.
.. _`Joe Celko`: http://www.celko.com/
.. _`Trees and Hierarchies in SQL for Smarties`:
http://www.elsevier.com/wps/product/cws_home/702605
"""
import operator
from django.db.models import Q
from django.core import serializers
from django.db import models, transaction, connection
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class NS_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self, removed_ranges=None):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
if removed_ranges is not None:
# we already know the children, let's call the default django
# delete method and let it handle the removal of the user's
# foreign keys...
super(NS_NodeQuerySet, self).delete()
cursor = connection.cursor()
# Now closing the gap (Celko's trees book, page 62)
# We do this for every gap that was left in the tree when the nodes
# were removed. If many nodes were removed, we're going to update
# the same nodes over and over again. This would be probably
# cheaper precalculating the gapsize per intervals, or just do a
# complete reordering of the tree (uses COUNT)...
for tree_id, drop_lft, drop_rgt in sorted(removed_ranges,
reverse=True):
sql, params = self.model._get_close_gap_sql(drop_lft, drop_rgt,
tree_id)
cursor.execute(sql, params)
else:
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('tree_id', 'lft'):
found = False
for rid, rnode in removed.items():
if node.is_descendant_of(rnode):
found = True
break
if not found:
removed[node.id] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their descendants
toremove = []
ranges = []
for id, node in removed.items():
toremove.append(Q(lft__range=(node.lft, node.rgt)) &
Q(tree_id=node.tree_id))
ranges.append((node.tree_id, node.lft, node.rgt))
if toremove:
self.model.objects.filter(
reduce(operator.or_, toremove)).delete(
removed_ranges=ranges)
transaction.commit_unless_managed()
class NS_NodeManager(models.Manager):
""" Custom manager for nodes.
"""
def get_query_set(self):
"""
Sets the custom queryset as the default.
"""
return NS_NodeQuerySet(self.model).order_by('tree_id', 'lft')
class NS_Node(Node):
"""
Abstract model to create your own Nested Sets Trees.
.. attribute:: node_order_by
Attribute: a list of model fields that will be used for node
ordering. When enabled, all tree operations will assume this ordering.
Example::
node_order_by = ['field1', 'field2', 'field3']
.. attribute:: depth
``PositiveIntegerField``, depth of a node in the tree. A root node
has a depth of *1*.
.. attribute:: lft
``PositiveIntegerField``
.. attribute:: rgt
``PositiveIntegerField``
.. attribute:: tree_id
``PositiveIntegerField``
"""
node_order_by = []
lft = models.PositiveIntegerField(db_index=True)
rgt = models.PositiveIntegerField(db_index=True)
tree_id = models.PositiveIntegerField(db_index=True)
depth = models.PositiveIntegerField(db_index=True)
objects = NS_NodeManager()
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree.
See: :meth:`treebeard.Node.add_root`
"""
# do we have a root node already?
last_root = cls.get_last_root_node()
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **kwargs)
if last_root:
# adding the new root node as the last one
#newtree_id = last_root.tree_id + 100
newtree_id = last_root.tree_id + 1
else:
# adding the first root node
#newtree_id = 100
newtree_id = 1
# creating the new object
newobj = cls(**kwargs)
newobj.depth = 1
newobj.tree_id = newtree_id
newobj.lft = 1
#newobj.rgt = 400000000
newobj.rgt = 2
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
@classmethod
def _move_right(cls, tree_id, rgt, lftmove=False, incdec=2):
if lftmove:
lftop = '>='
else:
lftop = '>'
sql = 'UPDATE %(table)s ' \
' SET lft = CASE WHEN lft %(lftop)s %(parent_rgt)d ' \
' THEN lft %(incdec)+d ' \
' ELSE lft END, ' \
' rgt = CASE WHEN rgt >= %(parent_rgt)d ' \
' THEN rgt %(incdec)+d ' \
' ELSE rgt END ' \
' WHERE rgt >= %(parent_rgt)d AND ' \
' tree_id = %(tree_id)s' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'parent_rgt': rgt,
'tree_id': tree_id,
'lftop': lftop,
'incdec': incdec}
return sql, []
@classmethod
def _move_tree_right(cls, tree_id):
sql = 'UPDATE %(table)s ' \
' SET tree_id = tree_id+1 ' \
' WHERE tree_id >= %(tree_id)d' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'tree_id': tree_id}
return sql, []
def add_child(self, **kwargs):
"""
Adds a child to the node.
See: :meth:`treebeard.Node.add_child`
"""
if not self.is_leaf():
# there are child nodes, delegate insertion to add_sibling
if self.node_order_by:
pos = 'sorted-sibling'
else:
pos = 'last-sibling'
last_child = self.get_last_child()
tmp = self.__class__.objects.get(pk=self.id)
last_child._cached_parent_obj = self
return last_child.add_sibling(pos, **kwargs)
# we're adding the first child of this node
sql, params = self.__class__._move_right(self.tree_id,
self.rgt, False, 2)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.tree_id = self.tree_id
newobj.depth = self.depth + 1
newobj.lft = self.lft + 1
newobj.rgt = self.lft + 2
# this is just to update the cache
self.rgt = self.rgt + 2
newobj._cached_parent_obj = self
cursor = connection.cursor()
cursor.execute(sql, params)
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
See: :meth:`treebeard.Node.add_sibling`
"""
pos = self._fix_add_sibling_opts(pos)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth
sql = None
target = self
if target.is_root():
newobj.lft = 1
newobj.rgt = 2
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
last_root = target.__class__.get_last_root_node()
if pos == 'last-sibling' \
or (pos == 'right' and target == last_root):
newobj.tree_id = last_root.tree_id + 1
else:
newpos = {'first-sibling': 1,
'left': target.tree_id,
'right': target.tree_id + 1}[pos]
sql, params = target.__class__._move_tree_right(newpos)
newobj.tree_id = newpos
else:
newobj.tree_id = target.tree_id
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
move_right = self.__class__._move_right
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, 2)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos - 1, False, 2)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, 2)
newobj.lft = newpos
newobj.rgt = newpos + 1
# saving the instance before returning it
if sql:
cursor = connection.cursor()
cursor.execute(sql, params)
newobj.save()
transaction.commit_unless_managed()
return newobj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
See: :meth:`treebeard.Node.move`
"""
pos = self._fix_move_opts(pos)
cls = self.__class__
stmts = []
parent = None
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
if target.is_leaf():
parent = target
pos = 'last-child'
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
if target.is_descendant_of(self):
raise InvalidMoveToDescendant("Can't move node to a descendant.")
if self == target and (
(pos == 'left') or \
(pos in ('right', 'last-sibling') and \
target == target.get_last_sibling()) or \
(pos == 'first-sibling' and \
target == target.get_first_sibling())):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), self))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
# ok let's move this
cursor = connection.cursor()
move_right = cls._move_right
gap = self.rgt - self.lft + 1
sql = None
target_tree = target.tree_id
# first make a hole
if pos == 'last-child':
newpos = parent.rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif target.is_root():
newpos = 1
if pos == 'last-sibling':
target_tree = target.get_siblings().reverse()[0].tree_id + 1
elif pos == 'first-sibling':
target_tree = 1
sql, params = cls._move_tree_right(1)
elif pos == 'left':
sql, params = cls._move_tree_right(target.tree_id)
else:
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id,
newpos - 1, False, gap)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, gap)
if sql:
cursor.execute(sql, params)
# we reload 'self' because lft/rgt may have changed
fromobj = cls.objects.get(pk=self.id)
depthdiff = target.depth - fromobj.depth
if parent:
depthdiff += 1
# move the tree to the hole
sql = "UPDATE %(table)s " \
" SET tree_id = %(target_tree)d, " \
" lft = lft + %(jump)d , " \
" rgt = rgt + %(jump)d , " \
" depth = depth + %(depthdiff)d " \
" WHERE tree_id = %(from_tree)d AND " \
" lft BETWEEN %(fromlft)d AND %(fromrgt)d" % {
'table': connection.ops.quote_name(cls._meta.db_table),
'from_tree': fromobj.tree_id,
'target_tree': target_tree,
'jump': newpos - fromobj.lft,
'depthdiff': depthdiff,
'fromlft': fromobj.lft,
'fromrgt': fromobj.rgt}
cursor.execute(sql, [])
# close the gap
sql, params = cls._get_close_gap_sql(fromobj.lft,
fromobj.rgt, fromobj.tree_id)
cursor.execute(sql, params)
transaction.commit_unless_managed()
@classmethod
def _get_close_gap_sql(cls, drop_lft, drop_rgt, tree_id):
sql = 'UPDATE %(table)s ' \
' SET lft = CASE ' \
' WHEN lft > %(drop_lft)d ' \
' THEN lft - %(gapsize)d ' \
' ELSE lft END, ' \
' rgt = CASE ' \
' WHEN rgt > %(drop_lft)d ' \
' THEN rgt - %(gapsize)d ' \
' ELSE rgt END ' \
' WHERE (lft > %(drop_lft)d ' \
' OR rgt > %(drop_lft)d) AND '\
' tree_id=%(tree_id)d' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'gapsize': drop_rgt - drop_lft + 1,
'drop_lft': drop_lft,
'tree_id': tree_id}
return sql, []
@classmethod
def load_bulk(cls, bulk_data, parent=None, keep_ids=False):
"""
Loads a list/dictionary structure to the tree.
See: :meth:`treebeard.Node.move`
"""
# tree, iterative preorder
added = []
if parent:
parent_id = parent.id
else:
parent_id = None
# stack of nodes to analize
stack = [(parent_id, node) for node in bulk_data[::-1]]
while stack:
parent_id, node_struct = stack.pop()
# shallow copy of the data strucure so it doesn't persist...
node_data = node_struct['data'].copy()
if keep_ids:
node_data['id'] = node_struct['id']
if parent_id:
parent = cls.objects.get(pk=parent_id)
node_obj = parent.add_child(**node_data)
else:
node_obj = cls.add_root(**node_data)
added.append(node_obj.id)
if 'children' in node_struct:
# extending the stack with the current node as the parent of
# the new nodes
stack.extend([(node_obj.id, node) \
for node in node_struct['children'][::-1]])
transaction.commit_unless_managed()
return added
def get_children(self):
"""
:returns: A queryset of all the node's children
See: :meth:`treebeard.Node.get_children`
"""
return self.get_descendants().filter(depth=self.depth + 1)
def get_depth(self):
"""
:returns: the depth (level) of the node
See: :meth:`treebeard.Node.get_depth`
"""
return self.depth
def is_leaf(self):
"""
:returns: True if the node is a leaf node (else, returns False)
See: :meth:`treebeard.Node.is_leaf`
"""
return self.rgt - self.lft == 1
def get_root(self):
"""
:returns: the root node for the current node object.
See: :meth:`treebeard.Node.get_root`
"""
if self.lft == 1:
return self
return self.__class__.objects.get(tree_id=self.tree_id,
lft=1)
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
See: :meth:`treebeard.Node.get_siblings`
"""
if self.lft == 1:
return self.get_root_nodes()
return self.get_parent(True).get_children()
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""
Dumps a tree branch to a python data structure.
See: :meth:`treebeard.Node.dump_bulk`
"""
qset = cls.get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# for serobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
lft = fields['lft']
tree_id = fields['tree_id']
# this will be useless in load_bulk
del fields['lft']
del fields['rgt']
del fields['depth']
del fields['tree_id']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or \
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.id]
if 'children' not in parentser:
parentser['children'] = []
parentser['children'].append(newobj)
lnk[pyobj.id] = newobj
return ret
@classmethod
def get_tree(cls, parent=None):
"""
:returns: A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, all trees are returned.
See: :meth:`treebeard.Node.get_tree`
.. note::
This metod returns a queryset.
"""
if parent is None:
# return the entire tree
return cls.objects.all()
if parent.is_leaf():
return cls.objects.filter(pk=parent.id)
return cls.objects.filter(
tree_id=parent.tree_id,
lft__range=(parent.lft, parent.rgt - 1))
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
See: :meth:`treebeard.Node.get_descendants`
"""
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.get_tree(self).exclude(pk=self.id)
def get_descendant_count(self):
"""
:returns: the number of descendants of a node.
See: :meth:`treebeard.Node.get_descendant_count`
"""
return (self.rgt - self.lft - 1) / 2
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
See: :meth:`treebeard.Node.get_ancestors`
"""
if self.is_root():
return self.__class__.objects.none()
return self.__class__.objects.filter(
tree_id=self.tree_id,
lft__lt=self.lft,
rgt__gt=self.rgt)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node if a descendant of another node given
as an argument, else, returns ``False``
See: :meth:`treebeard.Node.is_descendant_of`
"""
return self.tree_id == node.tree_id and \
self.lft > node.lft and \
self.rgt < node.rgt
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
See: :meth:`treebeard.Node.get_parent`
"""
if self.is_root():
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
# parent = our most direct ancestor
self._cached_parent_obj = self.get_ancestors().reverse()[0]
return self._cached_parent_obj
@classmethod
def get_root_nodes(cls):
"""
:returns: A queryset containing the root nodes in the tree.
Example::
MyNodeModel.get_root_nodes()
"""
return cls.objects.filter(lft=1)
class Meta:
"""
Abstract model.
"""
abstract = True
removing unused tmp var in NS_Node.add_child
# -*- coding: utf-8 -*-
"""
treebeard.ns_tree
-----------------
Nested Sets Tree.
:copyright: 2008-2010 by Gustavo Picon
:license: Apache License 2.0
An implementation of Nested Sets trees for Django 1.0+, as described by
`Joe Celko`_ in `Trees and Hierarchies in SQL for Smarties`_.
Nested sets have very efficient reads at the cost of high maintenance on
write/delete operations.
.. _`Joe Celko`: http://www.celko.com/
.. _`Trees and Hierarchies in SQL for Smarties`:
http://www.elsevier.com/wps/product/cws_home/702605
"""
import operator
from django.db.models import Q
from django.core import serializers
from django.db import models, transaction, connection
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class NS_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self, removed_ranges=None):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
if removed_ranges is not None:
# we already know the children, let's call the default django
# delete method and let it handle the removal of the user's
# foreign keys...
super(NS_NodeQuerySet, self).delete()
cursor = connection.cursor()
# Now closing the gap (Celko's trees book, page 62)
# We do this for every gap that was left in the tree when the nodes
# were removed. If many nodes were removed, we're going to update
# the same nodes over and over again. This would be probably
# cheaper precalculating the gapsize per intervals, or just do a
# complete reordering of the tree (uses COUNT)...
for tree_id, drop_lft, drop_rgt in sorted(removed_ranges,
reverse=True):
sql, params = self.model._get_close_gap_sql(drop_lft, drop_rgt,
tree_id)
cursor.execute(sql, params)
else:
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('tree_id', 'lft'):
found = False
for rid, rnode in removed.items():
if node.is_descendant_of(rnode):
found = True
break
if not found:
removed[node.id] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their descendants
toremove = []
ranges = []
for id, node in removed.items():
toremove.append(Q(lft__range=(node.lft, node.rgt)) &
Q(tree_id=node.tree_id))
ranges.append((node.tree_id, node.lft, node.rgt))
if toremove:
self.model.objects.filter(
reduce(operator.or_, toremove)).delete(
removed_ranges=ranges)
transaction.commit_unless_managed()
class NS_NodeManager(models.Manager):
""" Custom manager for nodes.
"""
def get_query_set(self):
"""
Sets the custom queryset as the default.
"""
return NS_NodeQuerySet(self.model).order_by('tree_id', 'lft')
class NS_Node(Node):
"""
Abstract model to create your own Nested Sets Trees.
.. attribute:: node_order_by
Attribute: a list of model fields that will be used for node
ordering. When enabled, all tree operations will assume this ordering.
Example::
node_order_by = ['field1', 'field2', 'field3']
.. attribute:: depth
``PositiveIntegerField``, depth of a node in the tree. A root node
has a depth of *1*.
.. attribute:: lft
``PositiveIntegerField``
.. attribute:: rgt
``PositiveIntegerField``
.. attribute:: tree_id
``PositiveIntegerField``
"""
node_order_by = []
lft = models.PositiveIntegerField(db_index=True)
rgt = models.PositiveIntegerField(db_index=True)
tree_id = models.PositiveIntegerField(db_index=True)
depth = models.PositiveIntegerField(db_index=True)
objects = NS_NodeManager()
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree.
See: :meth:`treebeard.Node.add_root`
"""
# do we have a root node already?
last_root = cls.get_last_root_node()
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **kwargs)
if last_root:
# adding the new root node as the last one
#newtree_id = last_root.tree_id + 100
newtree_id = last_root.tree_id + 1
else:
# adding the first root node
#newtree_id = 100
newtree_id = 1
# creating the new object
newobj = cls(**kwargs)
newobj.depth = 1
newobj.tree_id = newtree_id
newobj.lft = 1
#newobj.rgt = 400000000
newobj.rgt = 2
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
@classmethod
def _move_right(cls, tree_id, rgt, lftmove=False, incdec=2):
if lftmove:
lftop = '>='
else:
lftop = '>'
sql = 'UPDATE %(table)s ' \
' SET lft = CASE WHEN lft %(lftop)s %(parent_rgt)d ' \
' THEN lft %(incdec)+d ' \
' ELSE lft END, ' \
' rgt = CASE WHEN rgt >= %(parent_rgt)d ' \
' THEN rgt %(incdec)+d ' \
' ELSE rgt END ' \
' WHERE rgt >= %(parent_rgt)d AND ' \
' tree_id = %(tree_id)s' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'parent_rgt': rgt,
'tree_id': tree_id,
'lftop': lftop,
'incdec': incdec}
return sql, []
@classmethod
def _move_tree_right(cls, tree_id):
sql = 'UPDATE %(table)s ' \
' SET tree_id = tree_id+1 ' \
' WHERE tree_id >= %(tree_id)d' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'tree_id': tree_id}
return sql, []
def add_child(self, **kwargs):
"""
Adds a child to the node.
See: :meth:`treebeard.Node.add_child`
"""
if not self.is_leaf():
# there are child nodes, delegate insertion to add_sibling
if self.node_order_by:
pos = 'sorted-sibling'
else:
pos = 'last-sibling'
last_child = self.get_last_child()
last_child._cached_parent_obj = self
return last_child.add_sibling(pos, **kwargs)
# we're adding the first child of this node
sql, params = self.__class__._move_right(self.tree_id,
self.rgt, False, 2)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.tree_id = self.tree_id
newobj.depth = self.depth + 1
newobj.lft = self.lft + 1
newobj.rgt = self.lft + 2
# this is just to update the cache
self.rgt = self.rgt + 2
newobj._cached_parent_obj = self
cursor = connection.cursor()
cursor.execute(sql, params)
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
See: :meth:`treebeard.Node.add_sibling`
"""
pos = self._fix_add_sibling_opts(pos)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth
sql = None
target = self
if target.is_root():
newobj.lft = 1
newobj.rgt = 2
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
last_root = target.__class__.get_last_root_node()
if pos == 'last-sibling' \
or (pos == 'right' and target == last_root):
newobj.tree_id = last_root.tree_id + 1
else:
newpos = {'first-sibling': 1,
'left': target.tree_id,
'right': target.tree_id + 1}[pos]
sql, params = target.__class__._move_tree_right(newpos)
newobj.tree_id = newpos
else:
newobj.tree_id = target.tree_id
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), newobj))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
move_right = self.__class__._move_right
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, 2)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos - 1, False, 2)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, 2)
newobj.lft = newpos
newobj.rgt = newpos + 1
# saving the instance before returning it
if sql:
cursor = connection.cursor()
cursor.execute(sql, params)
newobj.save()
transaction.commit_unless_managed()
return newobj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
See: :meth:`treebeard.Node.move`
"""
pos = self._fix_move_opts(pos)
cls = self.__class__
stmts = []
parent = None
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
if target.is_leaf():
parent = target
pos = 'last-child'
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
if target.is_descendant_of(self):
raise InvalidMoveToDescendant("Can't move node to a descendant.")
if self == target and (
(pos == 'left') or \
(pos in ('right', 'last-sibling') and \
target == target.get_last_sibling()) or \
(pos == 'first-sibling' and \
target == target.get_first_sibling())):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), self))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
# ok let's move this
cursor = connection.cursor()
move_right = cls._move_right
gap = self.rgt - self.lft + 1
sql = None
target_tree = target.tree_id
# first make a hole
if pos == 'last-child':
newpos = parent.rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif target.is_root():
newpos = 1
if pos == 'last-sibling':
target_tree = target.get_siblings().reverse()[0].tree_id + 1
elif pos == 'first-sibling':
target_tree = 1
sql, params = cls._move_tree_right(1)
elif pos == 'left':
sql, params = cls._move_tree_right(target.tree_id)
else:
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id,
newpos - 1, False, gap)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, gap)
if sql:
cursor.execute(sql, params)
# we reload 'self' because lft/rgt may have changed
fromobj = cls.objects.get(pk=self.id)
depthdiff = target.depth - fromobj.depth
if parent:
depthdiff += 1
# move the tree to the hole
sql = "UPDATE %(table)s " \
" SET tree_id = %(target_tree)d, " \
" lft = lft + %(jump)d , " \
" rgt = rgt + %(jump)d , " \
" depth = depth + %(depthdiff)d " \
" WHERE tree_id = %(from_tree)d AND " \
" lft BETWEEN %(fromlft)d AND %(fromrgt)d" % {
'table': connection.ops.quote_name(cls._meta.db_table),
'from_tree': fromobj.tree_id,
'target_tree': target_tree,
'jump': newpos - fromobj.lft,
'depthdiff': depthdiff,
'fromlft': fromobj.lft,
'fromrgt': fromobj.rgt}
cursor.execute(sql, [])
# close the gap
sql, params = cls._get_close_gap_sql(fromobj.lft,
fromobj.rgt, fromobj.tree_id)
cursor.execute(sql, params)
transaction.commit_unless_managed()
@classmethod
def _get_close_gap_sql(cls, drop_lft, drop_rgt, tree_id):
sql = 'UPDATE %(table)s ' \
' SET lft = CASE ' \
' WHEN lft > %(drop_lft)d ' \
' THEN lft - %(gapsize)d ' \
' ELSE lft END, ' \
' rgt = CASE ' \
' WHEN rgt > %(drop_lft)d ' \
' THEN rgt - %(gapsize)d ' \
' ELSE rgt END ' \
' WHERE (lft > %(drop_lft)d ' \
' OR rgt > %(drop_lft)d) AND '\
' tree_id=%(tree_id)d' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'gapsize': drop_rgt - drop_lft + 1,
'drop_lft': drop_lft,
'tree_id': tree_id}
return sql, []
@classmethod
def load_bulk(cls, bulk_data, parent=None, keep_ids=False):
"""
Loads a list/dictionary structure to the tree.
See: :meth:`treebeard.Node.move`
"""
# tree, iterative preorder
added = []
if parent:
parent_id = parent.id
else:
parent_id = None
# stack of nodes to analize
stack = [(parent_id, node) for node in bulk_data[::-1]]
while stack:
parent_id, node_struct = stack.pop()
# shallow copy of the data strucure so it doesn't persist...
node_data = node_struct['data'].copy()
if keep_ids:
node_data['id'] = node_struct['id']
if parent_id:
parent = cls.objects.get(pk=parent_id)
node_obj = parent.add_child(**node_data)
else:
node_obj = cls.add_root(**node_data)
added.append(node_obj.id)
if 'children' in node_struct:
# extending the stack with the current node as the parent of
# the new nodes
stack.extend([(node_obj.id, node) \
for node in node_struct['children'][::-1]])
transaction.commit_unless_managed()
return added
def get_children(self):
"""
:returns: A queryset of all the node's children
See: :meth:`treebeard.Node.get_children`
"""
return self.get_descendants().filter(depth=self.depth + 1)
def get_depth(self):
"""
:returns: the depth (level) of the node
See: :meth:`treebeard.Node.get_depth`
"""
return self.depth
def is_leaf(self):
"""
:returns: True if the node is a leaf node (else, returns False)
See: :meth:`treebeard.Node.is_leaf`
"""
return self.rgt - self.lft == 1
def get_root(self):
"""
:returns: the root node for the current node object.
See: :meth:`treebeard.Node.get_root`
"""
if self.lft == 1:
return self
return self.__class__.objects.get(tree_id=self.tree_id,
lft=1)
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
See: :meth:`treebeard.Node.get_siblings`
"""
if self.lft == 1:
return self.get_root_nodes()
return self.get_parent(True).get_children()
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""
Dumps a tree branch to a python data structure.
See: :meth:`treebeard.Node.dump_bulk`
"""
qset = cls.get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# for serobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
lft = fields['lft']
tree_id = fields['tree_id']
# this will be useless in load_bulk
del fields['lft']
del fields['rgt']
del fields['depth']
del fields['tree_id']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or \
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.id]
if 'children' not in parentser:
parentser['children'] = []
parentser['children'].append(newobj)
lnk[pyobj.id] = newobj
return ret
@classmethod
def get_tree(cls, parent=None):
"""
:returns: A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, all trees are returned.
See: :meth:`treebeard.Node.get_tree`
.. note::
This metod returns a queryset.
"""
if parent is None:
# return the entire tree
return cls.objects.all()
if parent.is_leaf():
return cls.objects.filter(pk=parent.id)
return cls.objects.filter(
tree_id=parent.tree_id,
lft__range=(parent.lft, parent.rgt - 1))
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
See: :meth:`treebeard.Node.get_descendants`
"""
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.get_tree(self).exclude(pk=self.id)
def get_descendant_count(self):
"""
:returns: the number of descendants of a node.
See: :meth:`treebeard.Node.get_descendant_count`
"""
return (self.rgt - self.lft - 1) / 2
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
See: :meth:`treebeard.Node.get_ancestors`
"""
if self.is_root():
return self.__class__.objects.none()
return self.__class__.objects.filter(
tree_id=self.tree_id,
lft__lt=self.lft,
rgt__gt=self.rgt)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node if a descendant of another node given
as an argument, else, returns ``False``
See: :meth:`treebeard.Node.is_descendant_of`
"""
return self.tree_id == node.tree_id and \
self.lft > node.lft and \
self.rgt < node.rgt
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
See: :meth:`treebeard.Node.get_parent`
"""
if self.is_root():
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
# parent = our most direct ancestor
self._cached_parent_obj = self.get_ancestors().reverse()[0]
return self._cached_parent_obj
@classmethod
def get_root_nodes(cls):
"""
:returns: A queryset containing the root nodes in the tree.
Example::
MyNodeModel.get_root_nodes()
"""
return cls.objects.filter(lft=1)
class Meta:
"""
Abstract model.
"""
abstract = True
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the view for the site menus."""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.api import users
from soc.views.template import Template
from soc.views.base_templates import LoggedInMsg
def siteMenuContext(data):
"""Generates URL links for the hard-coded GSoC site menu items.
"""
redirect = data.redirect
program = data.program
from soc.modules.gci.models.program import GCIProgram
about_page = GCIProgram.about_page.get_value_for_datastore(program)
connect = GCIProgram.connect_with_us_page.get_value_for_datastore(program)
help_page = GCIProgram.help_page.get_value_for_datastore(program)
terms = GCIProgram.terms_and_conditions.get_value_for_datastore(program)
context = {
'about_link': redirect.document(about_page).url(),
'terms_link': redirect.document(terms).url(),
'events_link': redirect.events().url(),
'connect_link': redirect.document(connect).url(),
'help_link': redirect.document(help_page).url(),
}
if users.get_current_user():
context['logout_link'] = redirect.logout().url()
else:
context['login_link'] = redirect.login().url()
if data.profile:
context['dashboard_link'] = redirect.dashboard().url()
if data.timeline.tasksPubliclyVisible():
context['tasks_link'] = ''
return context
class Header(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def templatePath(self):
return "v2/modules/gci/_header.html"
def context(self):
return {
'home_link': self.data.redirect.homepage().url(),
# TODO(SRabbelier): make this dynamic somehow
'gsoc_link': '/gsoc/homepage/google/gsoc2011',
}
class MainMenu(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
context.update({
'home_link': self.data.redirect.homepage().url(),
})
if self.data.profile:
self.data.redirect.program()
if self.data.profile.status == 'active':
context['profile_link'] = self.data.redirect.urlOf('edit_gci_profile')
# Add org admin dashboard link if the user has active
# org admin profile and is an org admin of some organization
if self.data.is_org_admin:
context['org_dashboard_link'] = self.data.redirect.urlOf(
'gci_org_dashboard')
else:
context['profile_link'] = self.data.redirect.urlOf('show_gci_profile')
if self.data.is_host:
self.data.redirect.program()
context['admin_link'] = self.data.redirect.urlOf('gci_admin_dashboard')
return context
def templatePath(self):
return "v2/modules/gci/_mainmenu.html"
class Footer(Template):
"""Footer template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
redirect = self.data.redirect
program = self.data.program
context.update({
'privacy_policy_link': program.privacy_policy_url,
'blogger_link': program.blogger,
'email_id': program.email,
'irc_link': program.irc,
})
return context
def templatePath(self):
return "v2/modules/gci/_footer.html"
class Status(Template):
"""Template to render the status block.
"""
def __init__(self, data):
self.data = data
def context(self):
return {
'user_email': self.data.user.account.email(),
'logout_link': self.data.redirect.logout().url(),
}
def templatePath(self):
return "v2/modules/gci/_status_block.html"
Fix dashbaord link in status block.
Also dashbaord exists for users without a profile.
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the view for the site menus."""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.api import users
from soc.views.template import Template
from soc.views.base_templates import LoggedInMsg
def siteMenuContext(data):
"""Generates URL links for the hard-coded GCI site menu items.
"""
redirect = data.redirect
program = data.program
from soc.modules.gci.models.program import GCIProgram
about_page = GCIProgram.about_page.get_value_for_datastore(program)
connect = GCIProgram.connect_with_us_page.get_value_for_datastore(program)
help_page = GCIProgram.help_page.get_value_for_datastore(program)
terms = GCIProgram.terms_and_conditions.get_value_for_datastore(program)
context = {
'about_link': redirect.document(about_page).url(),
'terms_link': redirect.document(terms).url(),
'events_link': redirect.events().url(),
'connect_link': redirect.document(connect).url(),
'help_link': redirect.document(help_page).url(),
}
if users.get_current_user():
context['logout_link'] = redirect.logout().url()
else:
context['login_link'] = redirect.login().url()
if data.user:
context['dashboard_link'] = redirect.dashboard().url()
if data.timeline.tasksPubliclyVisible():
context['tasks_link'] = ''
return context
class Header(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def templatePath(self):
return "v2/modules/gci/_header.html"
def context(self):
return {
'home_link': self.data.redirect.homepage().url(),
# TODO(SRabbelier): make this dynamic somehow
'gsoc_link': '/gsoc/homepage/google/gsoc2011',
}
class MainMenu(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
context.update({
'home_link': self.data.redirect.homepage().url(),
})
if self.data.profile:
self.data.redirect.program()
if self.data.profile.status == 'active':
context['profile_link'] = self.data.redirect.urlOf('edit_gci_profile')
# Add org admin dashboard link if the user has active
# org admin profile and is an org admin of some organization
if self.data.is_org_admin:
context['org_dashboard_link'] = self.data.redirect.urlOf(
'gci_org_dashboard')
else:
context['profile_link'] = self.data.redirect.urlOf('show_gci_profile')
if self.data.is_host:
self.data.redirect.program()
context['admin_link'] = self.data.redirect.urlOf('gci_admin_dashboard')
return context
def templatePath(self):
return "v2/modules/gci/_mainmenu.html"
class Footer(Template):
"""Footer template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
redirect = self.data.redirect
program = self.data.program
context.update({
'privacy_policy_link': program.privacy_policy_url,
'blogger_link': program.blogger,
'email_id': program.email,
'irc_link': program.irc,
})
return context
def templatePath(self):
return "v2/modules/gci/_footer.html"
class Status(Template):
"""Template to render the status block.
"""
def __init__(self, data):
self.data = data
def context(self):
return {
'user_email': self.data.user.account.email(),
'logout_link': self.data.redirect.logout().url(),
'dashboard_link': self.data.redirect.dashboard().url()
}
def templatePath(self):
return "v2/modules/gci/_status_block.html"
|
import datetime
import json
import logging
import re
from BeautifulSoup import BeautifulSoup as soup
from xml.sax.saxutils import escape as xhtml_escape
from tornado.httpclient import AsyncHTTPClient
import tornado.web
from util.cache import Cache
from util.route import route
@route(r'/atom/(\d+)')
class AtomHandler(tornado.web.RequestHandler):
"""Fetches the public posts for a given G+ user id as an Atom feed."""
profile_json_url_template = 'https://plus.google.com/_/stream/getactivities/?sp=[1,2,"%s"]&rt=j'
cache_key_template = 'pluss--gplusid--atom--%s'
comma_fixer_regex = re.compile(r',(?=,)')
space_compress_regex = re.compile(r'\s+')
ATOM_DATEFMT = "%Y-%m-%dT%H:%M:%SZ"
HTTP_DATEFMT = "%a, %d %b %Y %H:%M:%S GMT"
@tornado.web.asynchronous
def get(self, user_id):
self.gplus_user_id = user_id
if len(user_id) != 21:
self.write("Google+ profile IDs are exactly 21 digits long. Please specify a proper profile ID.")
return self.finish()
self.cache_key = self.cache_key_template % user_id
cached_result = Cache.get(self.cache_key)
if cached_result and not self.request.arguments.get('flush', [None])[0]:
return self._respond(**cached_result)
http_client = AsyncHTTPClient()
http_client.fetch(self.profile_json_url_template % user_id, self._on_http_response)
def _respond(self, headers=(), body='', **kwargs):
for (header, value) in headers:
self.set_header(header, value)
self.write(body)
return self.finish()
def _on_http_response(self, response):
if response.error:
logging.error("AsyncHTTPRequest error: %r" % response.error)
self.send_error(500)
else:
pseudojson = response.body.lstrip(")]}'\n")
pseudojson = self.comma_fixer_regex.sub(',null', pseudojson)
pseudojson = pseudojson.replace('[,', '[null,')
pseudojson = pseudojson.replace(',]', ',null]')
data = json.loads(pseudojson)
posts = data[0][0][1][0]
headers = [('Content-Type', 'application/atom+xml')]
params = {
'userid': self.gplus_user_id,
'baseurl': 'http://%s' % self.request.host,
'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]),
}
if not posts:
params['lastupdate'] = datetime.datetime.today().strftime(self.ATOM_DATEFMT)
return self._respond(headers, self.empty_feed_template.format(**params))
# Return a maximum of 10 items
posts = posts[:10]
lastupdate = datetime.datetime.fromtimestamp(float(posts[0][5])/1000)
params['author'] = posts[0][3]
#params['authorimg'] = posts[0][18]
params['lastupdate'] = lastupdate.strftime(self.ATOM_DATEFMT)
headers.append( ('Last-Modified', lastupdate.strftime(self.HTTP_DATEFMT)) )
params['entrycontent'] = u''.join(self.entry_template.format(**self.get_post_params(p)) for p in posts)
body = self.feed_template.format(**params)
Cache.set(self.cache_key, {'headers': headers, 'body': body}, time=900) # 15 minute cache
return self._respond(headers, body)
def get_post_params(self, post):
post_timestamp = datetime.datetime.fromtimestamp(float(post[5])/1000)
post_id = post[21]
permalink = 'https://plus.google.com/%s' % post_id
# post[4] is the full post text (with HTML).
# not sure what post[47] is, but plusfeed uses it if it exists
content = [post[47] or post[4] or '']
if post[44]: # "originally shared by"
content.append('<br/><br/>')
content.append('<a href="https://plus.google.com/%s">%s</a>' % (post[44][1], post[44][0]))
content.append(' originally shared this post: ')
if post[66]: # attached content
attach = post[66]
if attach[0][1]: # attached link
content.append('<br/><br/>')
content.append('<a href="%s">%s</a>' % (attach[0][1], attach[0][3]))
if attach[0][6]: #attached media
media = attach[0][6][0]
if media[1] and media[1].startswith('image'): # attached image
content.append('<br/><br/>')
content.append('<img src="http:%s" alt="attached image"/>' % media[2])
elif len(media) >= 9: # some other attached media
try:
content.append('<br/><br/>')
content.append('<a href="%s">%s</a>' % (media[8], media[8]))
except:
pass
# If no actual parseable content was found, just link to the post
post_content = u''.join(content) or permalink
# Generate the post title out of just text [max: 100 characters]
post_title = u' '.join(x.string for x in soup(post_content).findAll(text=True))
post_title = self.space_compress_regex.sub(' ', post_title)
if len(post_title) > 100:
if post_title == permalink:
post_title = u"A public G+ post"
else:
candidate_title = post_title[:97]
if '&' in candidate_title[-5:]: # Don't risk cutting off HTML entities
candidate_title = candidate_title.rsplit('&', 1)[0]
if ' ' in candidate_title[-5:]: # Reasonably avoid cutting off words
candidate_title = candidate_title.rsplit(' ', 1)[0]
post_title = u"%s..." % candidate_title
return {
'title': post_title,
'permalink': xhtml_escape(permalink),
'postatomdate': post_timestamp.strftime(self.ATOM_DATEFMT),
'postdate': post_timestamp.strftime('%Y-%m-%d'),
'id': xhtml_escape(post_id),
'summary': xhtml_escape(post_content),
}
entry_template = u"""
<entry>
<title>{title}</title>
<link href="{permalink}" rel="alternate" />
<updated>{postatomdate}</updated>
<id>tag:plus.google.com,{postdate}:/{id}/</id>
<summary type="html">{summary}</summary>
</entry>
"""
feed_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<title>{author} - Google+ Public Posts</title>
<link href="https://plus.google.com/{userid}" rel="alternate" />
<link href="{requesturi}" rel="self" />
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<author>
<name>{author}</name>
</author>
{entrycontent}
</feed>
"""
empty_feed_template = u"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>No Public Items Found</title>
<link href="https://plus.google.com/{userid}" rel="alternate"></link>
<link href="{requesturi}" rel="self"></link>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<entry>
<title>No Public Items Found</title>
<link href="http://plus.google.com/{userid}"/>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<summary>Google+ user {userid} has not made any posts public.</summary>
</entry>
</feed>
"""
Add userid to title of empty posts feed
import datetime
import json
import logging
import re
from BeautifulSoup import BeautifulSoup as soup
from xml.sax.saxutils import escape as xhtml_escape
from tornado.httpclient import AsyncHTTPClient
import tornado.web
from util.cache import Cache
from util.route import route
@route(r'/atom/(\d+)')
class AtomHandler(tornado.web.RequestHandler):
"""Fetches the public posts for a given G+ user id as an Atom feed."""
profile_json_url_template = 'https://plus.google.com/_/stream/getactivities/?sp=[1,2,"%s"]&rt=j'
cache_key_template = 'pluss--gplusid--atom--%s'
comma_fixer_regex = re.compile(r',(?=,)')
space_compress_regex = re.compile(r'\s+')
ATOM_DATEFMT = "%Y-%m-%dT%H:%M:%SZ"
HTTP_DATEFMT = "%a, %d %b %Y %H:%M:%S GMT"
@tornado.web.asynchronous
def get(self, user_id):
self.gplus_user_id = user_id
if len(user_id) != 21:
self.write("Google+ profile IDs are exactly 21 digits long. Please specify a proper profile ID.")
return self.finish()
self.cache_key = self.cache_key_template % user_id
cached_result = Cache.get(self.cache_key)
if cached_result and not self.request.arguments.get('flush', [None])[0]:
return self._respond(**cached_result)
http_client = AsyncHTTPClient()
http_client.fetch(self.profile_json_url_template % user_id, self._on_http_response)
def _respond(self, headers=(), body='', **kwargs):
for (header, value) in headers:
self.set_header(header, value)
self.write(body)
return self.finish()
def _on_http_response(self, response):
if response.error:
logging.error("AsyncHTTPRequest error: %r" % response.error)
self.send_error(500)
else:
pseudojson = response.body.lstrip(")]}'\n")
pseudojson = self.comma_fixer_regex.sub(',null', pseudojson)
pseudojson = pseudojson.replace('[,', '[null,')
pseudojson = pseudojson.replace(',]', ',null]')
data = json.loads(pseudojson)
posts = data[0][0][1][0]
headers = [('Content-Type', 'application/atom+xml')]
params = {
'userid': self.gplus_user_id,
'baseurl': 'http://%s' % self.request.host,
'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]),
}
if not posts:
params['lastupdate'] = datetime.datetime.today().strftime(self.ATOM_DATEFMT)
return self._respond(headers, self.empty_feed_template.format(**params))
# Return a maximum of 10 items
posts = posts[:10]
lastupdate = datetime.datetime.fromtimestamp(float(posts[0][5])/1000)
params['author'] = posts[0][3]
#params['authorimg'] = posts[0][18]
params['lastupdate'] = lastupdate.strftime(self.ATOM_DATEFMT)
headers.append( ('Last-Modified', lastupdate.strftime(self.HTTP_DATEFMT)) )
params['entrycontent'] = u''.join(self.entry_template.format(**self.get_post_params(p)) for p in posts)
body = self.feed_template.format(**params)
Cache.set(self.cache_key, {'headers': headers, 'body': body}, time=900) # 15 minute cache
return self._respond(headers, body)
def get_post_params(self, post):
post_timestamp = datetime.datetime.fromtimestamp(float(post[5])/1000)
post_id = post[21]
permalink = 'https://plus.google.com/%s' % post_id
# post[4] is the full post text (with HTML).
# not sure what post[47] is, but plusfeed uses it if it exists
content = [post[47] or post[4] or '']
if post[44]: # "originally shared by"
content.append('<br/><br/>')
content.append('<a href="https://plus.google.com/%s">%s</a>' % (post[44][1], post[44][0]))
content.append(' originally shared this post: ')
if post[66]: # attached content
attach = post[66]
if attach[0][1]: # attached link
content.append('<br/><br/>')
content.append('<a href="%s">%s</a>' % (attach[0][1], attach[0][3]))
if attach[0][6]: #attached media
media = attach[0][6][0]
if media[1] and media[1].startswith('image'): # attached image
content.append('<br/><br/>')
content.append('<img src="http:%s" alt="attached image"/>' % media[2])
elif len(media) >= 9: # some other attached media
try:
content.append('<br/><br/>')
content.append('<a href="%s">%s</a>' % (media[8], media[8]))
except:
pass
# If no actual parseable content was found, just link to the post
post_content = u''.join(content) or permalink
# Generate the post title out of just text [max: 100 characters]
post_title = u' '.join(x.string for x in soup(post_content).findAll(text=True))
post_title = self.space_compress_regex.sub(' ', post_title)
if len(post_title) > 100:
if post_title == permalink:
post_title = u"A public G+ post"
else:
candidate_title = post_title[:97]
if '&' in candidate_title[-5:]: # Don't risk cutting off HTML entities
candidate_title = candidate_title.rsplit('&', 1)[0]
if ' ' in candidate_title[-5:]: # Reasonably avoid cutting off words
candidate_title = candidate_title.rsplit(' ', 1)[0]
post_title = u"%s..." % candidate_title
return {
'title': post_title,
'permalink': xhtml_escape(permalink),
'postatomdate': post_timestamp.strftime(self.ATOM_DATEFMT),
'postdate': post_timestamp.strftime('%Y-%m-%d'),
'id': xhtml_escape(post_id),
'summary': xhtml_escape(post_content),
}
entry_template = u"""
<entry>
<title>{title}</title>
<link href="{permalink}" rel="alternate" />
<updated>{postatomdate}</updated>
<id>tag:plus.google.com,{postdate}:/{id}/</id>
<summary type="html">{summary}</summary>
</entry>
"""
feed_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<title>{author} - Google+ Public Posts</title>
<link href="https://plus.google.com/{userid}" rel="alternate" />
<link href="{requesturi}" rel="self" />
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<author>
<name>{author}</name>
</author>
{entrycontent}
</feed>
"""
empty_feed_template = u"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>No Public Items Found for {userid}</title>
<link href="https://plus.google.com/{userid}" rel="alternate"></link>
<link href="{requesturi}" rel="self"></link>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<entry>
<title>No Public Items Found</title>
<link href="http://plus.google.com/{userid}"/>
<id>https://plus.google.com/{userid}</id>
<updated>{lastupdate}</updated>
<summary>Google+ user {userid} has not made any posts public.</summary>
</entry>
</feed>
"""
|
from mohawk import Receiver
from rest_framework import permissions
class IsStaffOrReadOnly(permissions.BasePermission):
"""
The request is authenticated as an admin staff (eg. sheriffs), or is a read-only request.
"""
def has_permission(self, request, view):
return (request.method in permissions.SAFE_METHODS or
request.user and
request.user.is_authenticated() and
request.user.is_staff)
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `user` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `user`.
return obj.user == request.user
class HasHawkPermissionsOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
hawk_header = request.META.get('hawk.receiver')
return hawk_header and isinstance(hawk_header, Receiver)
Bug 1275405 - Remove unnecessary check for user.is_authenticated
Since:
* It's always true for all users other than `AnonymousUser`.
* d-r-f's similar `IsAdminUser` implementation doesn't use it:
https://github.com/tomchristie/django-rest-framework/blob/3.3.3/rest_framework/permissions.py#L49-L55
from mohawk import Receiver
from rest_framework import permissions
class IsStaffOrReadOnly(permissions.BasePermission):
"""
The request is authenticated as an admin staff (eg. sheriffs), or is a read-only request.
"""
def has_permission(self, request, view):
return (request.method in permissions.SAFE_METHODS or
request.user and request.user.is_staff)
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `user` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `user`.
return obj.user == request.user
class HasHawkPermissionsOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
hawk_header = request.META.get('hawk.receiver')
return hawk_header and isinstance(hawk_header, Receiver)
|
#!/usr/bin/env python
"""Representation of calls in ELAPS:PlayMat."""
from __future__ import division, print_function
from elaps import signature
from elaps.qt import QDataArg
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSlot
class QCall(QtGui.QListWidgetItem):
"""Representation of a call in ELAPS:PlayMat."""
def __init__(self, playmat, callid):
"""Initialize the call representation."""
QtGui.QListWidgetItem.__init__(self)
self.playmat = playmat
self.sig = None
self.UI_init()
@property
def callid(self):
"""Get the callid."""
return self.playmat.UI_calls.row(self)
@property
def call(self):
"""Get the associalted Call (or BasicCall)."""
return self.playmat.experiment.calls[self.callid]
def UI_init(self):
"""Initialize the GUI elements."""
routines = list(self.playmat.experiment.sampler["kernels"])
# layout
self.widget = QtGui.QWidget()
layout = QtGui.QGridLayout()
self.widget.setLayout(layout)
# routine label
routinelabel = QtGui.QLabel()
layout.addWidget(routinelabel, 0, 0)
# routine
routine = QtGui.QLineEdit(textChanged=self.on_arg_change)
layout.addWidget(routine, 1, 0)
routine.argid = 0
completer = QtGui.QCompleter(routines, routine)
routine.setCompleter(completer)
# spaces
layout.setColumnStretch(100, 1)
# shortcuts
# attributes
self.UI_args = [routine]
self.UI_arglabels = [routinelabel]
def update_size(self):
"""Update the size of the call item from its widget."""
def update():
"""Update the size (asynchronous callback)."""
try:
self.setSizeHint(self.widget.sizeHint())
except:
pass
QtCore.QTimer.singleShot(0, update)
def UI_args_set(self, force=False):
"""Set all arguments."""
for argid in range(len(self.call)):
self.UI_arg_set(argid, force)
def UI_arg_set(self, argid=None, force=False):
"""Set an argument."""
value = self.call[argid]
UI_arg = self.UI_args[argid]
UI_arglabel = self.UI_arglabels[argid]
if force or not UI_arg.hasFocus():
if isinstance(UI_arg, QtGui.QLineEdit):
if type(value) is list:
UI_arg.setText("[%s]" % value[0])
else:
UI_arg.setText(str(value))
elif isinstance(UI_arg, QtGui.QComboBox):
UI_arg.setCurrentIndex(UI_arg.findText(str(value)))
if isinstance(self.call, signature.BasicCall):
self.playmat.UI_set_invalid(UI_arg, False)
if isinstance(self.call, signature.Call):
# apply hideargs
show = not isinstance(
self.sig[argid], tuple(self.playmat.hideargs) + (type(None),)
)
UI_arg.setVisible(show)
UI_arglabel.setVisible(show)
# viz
if isinstance(self.sig[argid], signature.Data):
UI_arg.viz()
# validity
if argid > 0:
try:
self.playmat.experiment.set_arg(self.callid, argid, value,
check_only=True)
self.playmat.UI_set_invalid(UI_arg, False)
except:
self.playmat.UI_set_invalid(UI_arg)
self.update_size()
def setall(self):
"""Set all UI elements."""
self.playmat.UI_setting += 1
self.fixcallid = self.callid
if isinstance(self.call, signature.BasicCall):
if self.sig != self.call.sig:
self.args_clear()
self.args_init()
self.sig = self.call.sig
else:
if self.sig:
self.args_clear()
self.sig = None
self.UI_args_set()
self.playmat.UI_setting -= 1
def args_init(self):
"""Initialize the arguments."""
Qroutine = self.UI_args[0]
assert(isinstance(self.call, signature.BasicCall))
sig = self.call.sig
doc = self.playmat.docs_get(self.call[0])
if doc:
Qroutine.setToolTip(doc[0][1])
for argid, arg in enumerate(sig):
if argid == 0:
continue
tooltip = ""
if isinstance(sig, signature.Signature):
argname = sig[argid].name
if doc and argid < len(doc):
tooltip = doc[argid][1]
else:
argname = sig[argid].replace("*", " *")
if doc and argid < len(doc):
argname += doc[argid][0]
tooltip = doc[argid][1]
if argname in ("int *", "float *", "double *"):
if doc:
tooltip += "\n\n*Format*:\n"
tooltip += ("Scalar:\tvalue\t\te.g. 1, -0.5\n"
"Array:\t[#elements]"
"\te.g. [10000] for a 100x100 matrix")
# arglabel
UI_arglabel = QtGui.QLabel(
argname, toolTip=tooltip, alignment=QtCore.Qt.AlignCenter
)
self.UI_arglabels.append(UI_arglabel)
self.widget.layout().addWidget(UI_arglabel, 0, argid)
# arg
if isinstance(sig, signature.Signature):
arg = sig[argid]
if isinstance(arg, signature.Flag):
UI_arg = QtGui.QComboBox()
UI_arg.addItems(arg.flags)
UI_arg.currentIndexChanged[str].connect(self.on_arg_change)
elif isinstance(arg, signature.Data):
UI_arg = QDataArg(
self,
editingFinished=self.on_dataarg_focusout
)
else:
UI_arg = QtGui.QLineEdit(
textChanged=self.on_arg_change,
editingFinished=self.on_arg_focusout
)
else:
UI_arg = QtGui.QLineEdit("", textChanged=self.on_arg_change)
UI_arg.pyqtConfigure(
toolTip=tooltip,
contextMenuPolicy=QtCore.Qt.CustomContextMenu,
customContextMenuRequested=self.on_arg_rightclick
)
UI_arg.argid = argid
self.UI_args.append(UI_arg)
self.widget.layout().addWidget(UI_arg, 1, argid,
QtCore.Qt.AlignCenter)
def args_clear(self):
"""Clear the arguments."""
for UI_arg in self.UI_args[1:]:
UI_arg.deleteLater()
self.UI_args = [self.UI_args[0]]
for UI_arglabel in self.UI_arglabels[1:]:
UI_arglabel.deleteLater()
self.UI_arglabels = [self.UI_arglabels[0]]
self.UI_args[0].setToolTip("")
def viz(self):
"""Visualize all operands."""
if not isinstance(self.call, signature.Call):
return
for argid in self.call.sig.dataargs():
self.UI_args[argid].viz()
self.update_size()
# event handlers
# @pyqtSlot(str) # sender() pyqt bug
def on_arg_change(self, value):
"""Event: Changed an argument."""
sender = self.playmat.Qapp.sender()
value = str(value)
argid = sender.argid
if isinstance(sender, QtGui.QLineEdit) and not isinstance(sender,
QDataArg):
# adjust widt no matter where the change came from
width = sender.fontMetrics().width(value) + 4
width += sender.minimumSizeHint().width()
margins = sender.getTextMargins()
width += margins[0] + margins[2]
width = min(width, sender.sizeHint().width())
height = sender.sizeHint().height()
if argid == 0:
sender.setMinimumSize(max(height, width), height)
else:
sender.setFixedSize(max(height, width), height)
if self.playmat.UI_setting:
return
self.playmat.on_arg_set(self.callid, argid, value)
def on_dataarg_focusout(self):
"""Event: Changed a data argument."""
sender = self.playmat.Qapp.sender()
value = sender.text()
self.on_arg_change(value)
self.on_arg_focusout()
# @pyqtSlot(QtCore.QPoint) # sender() pyqt bug
def on_arg_rightclick(self, pos):
"""Event: right click on arg."""
if self.playmat.UI_setting:
return
sender = self.playmat.Qapp.sender()
globalpos = sender.mapToGlobal(pos)
menu = sender.createStandardContextMenu()
if isinstance(self.call, signature.Call):
actions = menu.actions()
if actions[-1].text() == "Insert Unicode control character":
actions[-1].setVisible(False)
argid = sender.argid
if isinstance(self.call.sig[argid], signature.Ld):
inferld = QtGui.QAction("Infer leading dimension", sender,
triggered=self.on_inferld)
inferld.argid = argid
menu.insertAction(actions[0], inferld)
elif isinstance(self.call.sig[argid], signature.Lwork):
inferlwork = QtGui.QAction("Infer workspace size", sender,
triggered=self.on_inferlwork)
inferlwork.argid = argid
menu.insertAction(actions[0], inferlwork)
if isinstance(self.call.sig[argid], signature.Data):
for action in self.playmat.UI_varyactions(self.call[argid]):
if action:
menu.insertAction(actions[0], action)
else:
menu.insertSeparator(actions[0])
if len(menu.actions()) > len(actions):
menu.insertSeparator(actions[0])
menu.exec_(globalpos)
# @pyqtSlot() # sender() pyqt bug
def on_inferld(self):
"""Event: infer ld."""
if self.playmat.UI_setting:
return
argid = self.playmat.Qapp.sender().argid
self.UI_args[argid].clearFocus()
self.playmat.on_infer_ld(self.callid, argid)
# @pyqtSlot() # sender() pyqt bug
def on_inferlwork(self):
"""Event: infer ld."""
if self.playmat.UI_setting:
return
argid = self.playmat.Qapp.sender().argid
self.UI_args[argid].clearFocus()
self.playmat.on_infer_lwork(self.callid, argid)
@pyqtSlot()
def on_arg_focusout(self):
"""Argument looses focus."""
if self.playmat.UI_setting:
return
if self.playmat.Qapp.activeModalWidget():
return
self.playmat.UI_calls_set()
simpler text edit resizing
#!/usr/bin/env python
"""Representation of calls in ELAPS:PlayMat."""
from __future__ import division, print_function
from elaps import signature
from elaps.qt import QDataArg
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSlot
class QCall(QtGui.QListWidgetItem):
"""Representation of a call in ELAPS:PlayMat."""
def __init__(self, playmat, callid):
"""Initialize the call representation."""
QtGui.QListWidgetItem.__init__(self)
self.playmat = playmat
self.sig = None
self.UI_init()
@property
def callid(self):
"""Get the callid."""
return self.playmat.UI_calls.row(self)
@property
def call(self):
"""Get the associalted Call (or BasicCall)."""
return self.playmat.experiment.calls[self.callid]
def UI_init(self):
"""Initialize the GUI elements."""
routines = list(self.playmat.experiment.sampler["kernels"])
# layout
self.widget = QtGui.QWidget()
layout = QtGui.QGridLayout()
self.widget.setLayout(layout)
# routine label
routinelabel = QtGui.QLabel()
layout.addWidget(routinelabel, 0, 0)
# routine
routine = QtGui.QLineEdit(textChanged=self.on_arg_change)
layout.addWidget(routine, 1, 0)
routine.argid = 0
completer = QtGui.QCompleter(routines, routine)
routine.setCompleter(completer)
# spaces
layout.setColumnStretch(100, 1)
# shortcuts
# attributes
self.UI_args = [routine]
self.UI_arglabels = [routinelabel]
def update_size(self):
"""Update the size of the call item from its widget."""
def update():
"""Update the size (asynchronous callback)."""
try:
self.setSizeHint(self.widget.sizeHint())
except:
pass
QtCore.QTimer.singleShot(0, update)
def UI_args_set(self, force=False):
"""Set all arguments."""
for argid in range(len(self.call)):
self.UI_arg_set(argid, force)
def UI_arg_set(self, argid=None, force=False):
"""Set an argument."""
value = self.call[argid]
UI_arg = self.UI_args[argid]
UI_arglabel = self.UI_arglabels[argid]
if force or not UI_arg.hasFocus():
if isinstance(UI_arg, QtGui.QLineEdit):
if type(value) is list:
UI_arg.setText("[%s]" % value[0])
else:
UI_arg.setText(str(value))
elif isinstance(UI_arg, QtGui.QComboBox):
UI_arg.setCurrentIndex(UI_arg.findText(str(value)))
if isinstance(self.call, signature.BasicCall):
self.playmat.UI_set_invalid(UI_arg, False)
if isinstance(self.call, signature.Call):
# apply hideargs
show = not isinstance(
self.sig[argid], tuple(self.playmat.hideargs) + (type(None),)
)
UI_arg.setVisible(show)
UI_arglabel.setVisible(show)
# viz
if isinstance(self.sig[argid], signature.Data):
UI_arg.viz()
# validity
if argid > 0:
try:
self.playmat.experiment.set_arg(self.callid, argid, value,
check_only=True)
self.playmat.UI_set_invalid(UI_arg, False)
except:
self.playmat.UI_set_invalid(UI_arg)
self.update_size()
def setall(self):
"""Set all UI elements."""
self.playmat.UI_setting += 1
self.fixcallid = self.callid
if isinstance(self.call, signature.BasicCall):
if self.sig != self.call.sig:
self.args_clear()
self.args_init()
self.sig = self.call.sig
else:
if self.sig:
self.args_clear()
self.sig = None
self.UI_args_set()
self.playmat.UI_setting -= 1
def args_init(self):
"""Initialize the arguments."""
Qroutine = self.UI_args[0]
assert(isinstance(self.call, signature.BasicCall))
sig = self.call.sig
doc = self.playmat.docs_get(self.call[0])
if doc:
Qroutine.setToolTip(doc[0][1])
for argid, arg in enumerate(sig):
if argid == 0:
continue
tooltip = ""
if isinstance(sig, signature.Signature):
argname = sig[argid].name
if doc and argid < len(doc):
tooltip = doc[argid][1]
else:
argname = sig[argid].replace("*", " *")
if doc and argid < len(doc):
argname += doc[argid][0]
tooltip = doc[argid][1]
if argname in ("int *", "float *", "double *"):
if doc:
tooltip += "\n\n*Format*:\n"
tooltip += ("Scalar:\tvalue\t\te.g. 1, -0.5\n"
"Array:\t[#elements]"
"\te.g. [10000] for a 100x100 matrix")
# arglabel
UI_arglabel = QtGui.QLabel(
argname, toolTip=tooltip, alignment=QtCore.Qt.AlignCenter
)
self.UI_arglabels.append(UI_arglabel)
self.widget.layout().addWidget(UI_arglabel, 0, argid)
# arg
if isinstance(sig, signature.Signature):
arg = sig[argid]
if isinstance(arg, signature.Flag):
UI_arg = QtGui.QComboBox()
UI_arg.addItems(arg.flags)
UI_arg.currentIndexChanged[str].connect(self.on_arg_change)
elif isinstance(arg, signature.Data):
UI_arg = QDataArg(
self,
editingFinished=self.on_dataarg_focusout
)
else:
UI_arg = QtGui.QLineEdit(
textChanged=self.on_arg_change,
editingFinished=self.on_arg_focusout
)
else:
UI_arg = QtGui.QLineEdit("", textChanged=self.on_arg_change)
UI_arg.pyqtConfigure(
toolTip=tooltip,
contextMenuPolicy=QtCore.Qt.CustomContextMenu,
customContextMenuRequested=self.on_arg_rightclick
)
UI_arg.argid = argid
self.UI_args.append(UI_arg)
self.widget.layout().addWidget(UI_arg, 1, argid,
QtCore.Qt.AlignCenter)
def args_clear(self):
"""Clear the arguments."""
for UI_arg in self.UI_args[1:]:
UI_arg.deleteLater()
self.UI_args = [self.UI_args[0]]
for UI_arglabel in self.UI_arglabels[1:]:
UI_arglabel.deleteLater()
self.UI_arglabels = [self.UI_arglabels[0]]
self.UI_args[0].setToolTip("")
def viz(self):
"""Visualize all operands."""
if not isinstance(self.call, signature.Call):
return
for argid in self.call.sig.dataargs():
self.UI_args[argid].viz()
self.update_size()
# event handlers
# @pyqtSlot(str) # sender() pyqt bug
def on_arg_change(self, value):
"""Event: Changed an argument."""
sender = self.playmat.Qapp.sender()
value = str(value)
argid = sender.argid
if isinstance(sender, QtGui.QLineEdit) and not isinstance(sender,
QDataArg):
# adjust widt no matter where the change came from
width = sender.fontMetrics().width(value) + 4
width += sender.minimumSizeHint().width()
margins = sender.getTextMargins()
width += margins[0] + margins[2]
width = min(width, sender.sizeHint().width())
width = max(width, sender.sizeHint().height())
if argid == 0:
sender.setMinimumWidth(width)
else:
sender.setFixedWidth(width)
if self.playmat.UI_setting:
return
self.playmat.on_arg_set(self.callid, argid, value)
def on_dataarg_focusout(self):
"""Event: Changed a data argument."""
sender = self.playmat.Qapp.sender()
value = sender.text()
self.on_arg_change(value)
self.on_arg_focusout()
# @pyqtSlot(QtCore.QPoint) # sender() pyqt bug
def on_arg_rightclick(self, pos):
"""Event: right click on arg."""
if self.playmat.UI_setting:
return
sender = self.playmat.Qapp.sender()
globalpos = sender.mapToGlobal(pos)
menu = sender.createStandardContextMenu()
if isinstance(self.call, signature.Call):
actions = menu.actions()
if actions[-1].text() == "Insert Unicode control character":
actions[-1].setVisible(False)
argid = sender.argid
if isinstance(self.call.sig[argid], signature.Ld):
inferld = QtGui.QAction("Infer leading dimension", sender,
triggered=self.on_inferld)
inferld.argid = argid
menu.insertAction(actions[0], inferld)
elif isinstance(self.call.sig[argid], signature.Lwork):
inferlwork = QtGui.QAction("Infer workspace size", sender,
triggered=self.on_inferlwork)
inferlwork.argid = argid
menu.insertAction(actions[0], inferlwork)
if isinstance(self.call.sig[argid], signature.Data):
for action in self.playmat.UI_varyactions(self.call[argid]):
if action:
menu.insertAction(actions[0], action)
else:
menu.insertSeparator(actions[0])
if len(menu.actions()) > len(actions):
menu.insertSeparator(actions[0])
menu.exec_(globalpos)
# @pyqtSlot() # sender() pyqt bug
def on_inferld(self):
"""Event: infer ld."""
if self.playmat.UI_setting:
return
argid = self.playmat.Qapp.sender().argid
self.UI_args[argid].clearFocus()
self.playmat.on_infer_ld(self.callid, argid)
# @pyqtSlot() # sender() pyqt bug
def on_inferlwork(self):
"""Event: infer ld."""
if self.playmat.UI_setting:
return
argid = self.playmat.Qapp.sender().argid
self.UI_args[argid].clearFocus()
self.playmat.on_infer_lwork(self.callid, argid)
@pyqtSlot()
def on_arg_focusout(self):
"""Argument looses focus."""
if self.playmat.UI_setting:
return
if self.playmat.Qapp.activeModalWidget():
return
self.playmat.UI_calls_set()
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from logging.handlers import TimedRotatingFileHandler
import time
import os
from collections import namedtuple
import ipaddr
import aruba.aruba_pipeline as aruba
from util import mac_addr_is_unicast
from ryu.lib import ofctl_v1_3 as ofctl
from ryu.lib import mac
from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, packet
from ryu.lib.packet import vlan as packet_vlan
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
class LinkNeighbor(object):
def __init__(self, eth_src, now):
self.eth_src = eth_src
self.cache_time = now
class HostCacheEntry(object):
def __init__(self, eth_src, permanent, now):
self.eth_src = eth_src
self.permanent = permanent
self.cache_time = now
def valve_factory(dp):
"""Return a Valve object based dp's hardware configuration field.
Arguments:
dp -- a DP object with the configuration for this valve.
"""
SUPPORTED_HARDWARE = {
'Allied-Telesis': Valve,
'Aruba': ArubaValve,
'NoviFlow': Valve,
'Open vSwitch': Valve,
'ZodiacFX': Valve,
}
if dp.hardware in SUPPORTED_HARDWARE:
return SUPPORTED_HARDWARE[dp.hardware](dp)
else:
return None
class Valve(object):
"""Generates the messages to configure a datapath as a l2 learning switch.
Vendor specific implementations may require sending configuration flows.
This can be achieved by inheriting from this class and overwriting the
function switch_features.
"""
FAUCET_MAC = '0e:00:00:00:00:01'
def __init__(self, dp, logname='faucet', *args, **kwargs):
self.dp = dp
self.logger = logging.getLogger(logname)
self.ofchannel_logger = None
def switch_features(self, dp_id, msg):
"""Send configuration flows necessary for the switch implementation.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
msg -- OFPSwitchFeatures msg sent from switch.
Vendor specific configuration should be implemented here.
"""
return []
def ofchannel_log(self, ofmsgs):
if self.dp is not None:
if self.dp.ofchannel_log is not None:
if self.ofchannel_logger is None:
self.ofchannel_logger = logging.getLogger(
self.dp.ofchannel_log)
logger_handler = TimedRotatingFileHandler(
self.dp.ofchannel_log,
when='midnight')
log_fmt = ('%(asctime)s %(name)-6s '
'%(levelname)-8s %(message)s')
logger_handler.setFormatter(
logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
self.ofchannel_logger.addHandler(logger_handler)
self.ofchannel_logger.propagate = 0
self.ofchannel_logger.setLevel(logging.DEBUG)
for ofmsg in ofmsgs:
self.ofchannel_logger.debug(ofmsgs)
@staticmethod
def ignore_port(port_num):
"""Ignore non-physical ports."""
# port numbers > 0xF0000000 indicate a logical port
return port_num > 0xF0000000
@staticmethod
def apply_actions(actions):
return parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)
@staticmethod
def goto_table(table_id):
return parser.OFPInstructionGotoTable(table_id)
@staticmethod
def set_eth_src(eth_src):
return parser.OFPActionSetField(eth_src=eth_src)
@staticmethod
def set_eth_dst(eth_dst):
return parser.OFPActionSetField(eth_dst=eth_dst)
@staticmethod
def dec_ip_ttl():
return parser.OFPActionDecNwTtl()
@staticmethod
def valve_packetout(out_port, data):
return parser.OFPPacketOut(
datapath=None,
buffer_id=ofp.OFP_NO_BUFFER,
in_port=ofp.OFPP_CONTROLLER,
actions=[parser.OFPActionOutput(out_port, 0)],
data=data)
@staticmethod
def valve_in_match(in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None,
nw_src=None, nw_dst=None):
match_dict = {}
if in_port is not None:
match_dict['in_port'] = in_port
if vlan is not None:
if vlan.vid == ofp.OFPVID_NONE:
match_dict['vlan_vid'] = ofp.OFPVID_NONE
else:
match_dict['vlan_vid'] = (vlan.vid | ofp.OFPVID_PRESENT)
if eth_src is not None:
match_dict['eth_src'] = eth_src
if eth_dst is not None:
if eth_dst_mask is not None:
match_dict['eth_dst'] = (eth_dst, eth_dst_mask)
else:
match_dict['eth_dst'] = eth_dst
if nw_proto is not None:
match_dict['ip_proto'] = nw_proto
if nw_src is not None:
match_dict['ipv4_src'] = (str(nw_src.ip), str(nw_src.netmask))
if icmpv6_type is not None:
match_dict['icmpv6_type'] = icmpv6_type
if ipv6_nd_target is not None:
match_dict['ipv6_nd_target'] = str(ipv6_nd_target.ip)
if nw_dst is not None:
nw_dst_masked = (str(nw_dst.ip), str(nw_dst.netmask))
if eth_type == ether.ETH_TYPE_ARP:
match_dict['arp_tpa'] = nw_dst_masked
elif eth_type == ether.ETH_TYPE_IP:
match_dict['ipv4_dst'] = nw_dst_masked
else:
match_dict['ipv6_dst'] = nw_dst_masked
if eth_type is not None:
match_dict['eth_type'] = eth_type
match = parser.OFPMatch(**match_dict)
return match
def ignore_dpid(self, dp_id):
"""Ignore all DPIDs except the DPID configured."""
if dp_id != self.dp.dp_id:
self.logger.error('Unknown dpid:%s', dp_id)
return True
return False
def all_valve_tables(self):
return (
self.dp.vlan_table,
self.dp.acl_table,
self.dp.eth_src_table,
self.dp.ipv4_fib_table,
self.dp.ipv6_fib_table,
self.dp.eth_dst_table,
self.dp.flood_table)
def valve_flowmod(self, table_id, match=None, priority=None,
inst=None, command=ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0):
"""Helper function to construct a flow mod message with cookie."""
if match is None:
match = self.valve_in_match()
if priority is None:
priority = self.dp.lowest_priority
if inst is None:
inst = []
return parser.OFPFlowMod(
datapath=None,
cookie=self.dp.cookie,
command=command,
table_id=table_id,
priority=priority,
out_port=out_port,
out_group=out_group,
match=match,
instructions=inst,
hard_timeout=hard_timeout,
idle_timeout=idle_timeout)
def valve_flowdel(self, table_id, match=None, priority=None,
out_port=ofp.OFPP_ANY):
"""Delete matching flows from a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
command=ofp.OFPFC_DELETE,
out_port=out_port,
out_group=ofp.OFPG_ANY)
def valve_flowdrop(self, table_id, match=None, priority=None,
hard_timeout=0):
"""Add drop matching flow to a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
hard_timeout=hard_timeout,
inst=[])
def valve_flowcontroller(self, table_id, match=None, priority=None,
inst=None):
if inst is None:
inst = []
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
inst=[self.apply_actions([parser.OFPActionOutput(
ofp.OFPP_CONTROLLER, max_len=256)])] + inst)
def delete_all_valve_flows(self):
"""Delete all flows from all FAUCET tables."""
ofmsgs = []
for table_id in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(table_id))
return ofmsgs
def add_default_drop_flows(self):
"""Add default drop rules on all FAUCET tables."""
# default drop on all tables.
ofmsgs = []
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdrop(
table,
priority=self.dp.lowest_priority))
# antispoof for FAUCET's MAC address
# TODO: antispoof for controller IPs on this VLAN, too.
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_src=self.FAUCET_MAC),
priority=self.dp.high_priority))
# drop STDP BPDU
for bpdu_mac in ('01:80:C2:00:00:00', '01:00:0C:CC:CC:CD'):
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_dst=bpdu_mac),
priority=self.dp.highest_priority))
# drop LLDP
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_type=ether.ETH_TYPE_LLDP),
priority=self.dp.highest_priority))
# drop broadcast sources
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_src=mac.BROADCAST_STR),
priority=self.dp.highest_priority))
return ofmsgs
def add_vlan_flood_flow(self):
"""Add a flow to flood packets for unknown destinations."""
return [self.valve_flowmod(
self.dp.eth_dst_table,
priority=self.dp.low_priority,
inst=[self.goto_table(self.dp.flood_table)])]
def add_controller_learn_flow(self):
"""Add a flow for controller to learn/add flows for destinations."""
return [self.valve_flowcontroller(
self.dp.eth_src_table,
priority=self.dp.low_priority,
inst=[self.goto_table(self.dp.eth_dst_table)])]
def add_default_flows(self):
"""Configure datapath with necessary default tables and rules."""
ofmsgs = []
ofmsgs.extend(self.delete_all_valve_flows())
ofmsgs.extend(self.add_default_drop_flows())
ofmsgs.extend(self.add_vlan_flood_flow())
ofmsgs.extend(self.add_controller_learn_flow())
return ofmsgs
def add_ports_and_vlans(self, discovered_port_nums):
"""Add all configured and discovered ports and VLANs."""
ofmsgs = []
all_port_nums = set()
# add vlan ports
for vlan in self.dp.vlans.itervalues():
self.logger.info('Configuring VLAN %s', vlan)
vlan_ports = vlan.tagged + vlan.untagged
for port in vlan_ports:
all_port_nums.add(port.number)
# install eth_dst_table flood ofmsgs
ofmsgs.extend(self.build_flood_rules(vlan))
# add mirror ports.
for port_num in self.dp.mirror_from_port.itervalues():
all_port_nums.add(port_num)
# add any ports discovered but not configured
for port_num in discovered_port_nums:
if self.ignore_port(port_num):
continue
if port_num not in all_port_nums:
all_port_nums.add(port_num)
# now configure all ports
for port_num in all_port_nums:
ofmsgs.extend(self.port_add(self.dp.dp_id, port_num))
return ofmsgs
@staticmethod
def build_flood_ports_for_vlan(vlan_ports, eth_dst):
ports = []
for port in vlan_ports:
if not port.running():
continue
if eth_dst is None or mac_addr_is_unicast(eth_dst):
if not port.unicast_flood:
continue
ports.append(port)
return ports
def build_flood_rule_actions(self, vlan, eth_dst):
flood_acts = []
tagged_ports = self.build_flood_ports_for_vlan(vlan.tagged, eth_dst)
for port in tagged_ports:
flood_acts.append(parser.OFPActionOutput(port.number))
untagged_ports = self.build_flood_ports_for_vlan(
vlan.untagged, eth_dst)
if untagged_ports:
flood_acts.append(parser.OFPActionPopVlan())
for port in untagged_ports:
flood_acts.append(parser.OFPActionOutput(port.number))
return flood_acts
def build_flood_rules(self, vlan, modify=False):
"""Add a flow to flood packets to unknown destinations on a VLAN."""
command = ofp.OFPFC_ADD
if modify:
command = ofp.OFPFC_MODIFY_STRICT
flood_priority = self.dp.low_priority
flood_eth_dst_matches = []
if vlan.unicast_flood:
flood_eth_dst_matches.extend([(None, None)])
flood_eth_dst_matches.extend([
('01:80:C2:00:00:00', '01:80:C2:00:00:00'), # 802.x
('01:00:5E:00:00:00', 'ff:ff:ff:00:00:00'), # IPv4 multicast
('33:33:00:00:00:00', 'ff:ff:00:00:00:00'), # IPv6 multicast
(mac.BROADCAST_STR, None), # flood on ethernet broadcasts
])
ofmsgs = []
for eth_dst, eth_dst_mask in flood_eth_dst_matches:
flood_acts = self.build_flood_rule_actions(vlan, eth_dst)
ofmsgs.append(self.valve_flowmod(
self.dp.flood_table,
match=self.valve_in_match(
vlan=vlan, eth_dst=eth_dst, eth_dst_mask=eth_dst_mask),
command=command,
inst=[self.apply_actions(flood_acts)],
priority=flood_priority))
flood_priority += 1
for port in vlan.tagged + vlan.untagged:
if port.number in self.dp.mirror_from_port:
mirror_port = self.dp.mirror_from_port[port.number]
mirror_acts = [
parser.OFPActionOutput(mirror_port)] + flood_acts
for eth_dst, eth_dst_mask in flood_eth_dst_matches:
flood_acts = self.build_flood_rule_actions(vlan, eth_dst)
ofmsgs.append(self.valve_flowmod(
self.dp.flood_table,
match=self.valve_in_match(
in_port=port.number, vlan=vlan,
eth_dst=eth_dst, eth_dst_mask=eth_dst_mask),
command=command,
inst=[self.apply_actions(mirror_acts)],
priority=flood_priority))
flood_priority += 1
return ofmsgs
def datapath_connect(self, dp_id, discovered_port_nums):
"""Generate the default openflow msgs for a datapath upon connection.
Depending on the implementation, a network state database may be
updated.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
ports -- a list containing the port numbers of each port on the
datapath.
Returns:
A list of flow mod messages that will be sent in order to the datapath
in order to configure it."""
if self.ignore_dpid(dp_id):
return []
if discovered_port_nums is None:
discovered_port_nums = []
self.logger.info('Configuring datapath')
ofmsgs = []
ofmsgs.extend(self.add_default_flows())
ofmsgs.extend(self.add_ports_and_vlans(discovered_port_nums))
self.dp.running = True
return ofmsgs
def datapath_disconnect(self, dp_id):
"""Update n/w state db upon disconnection of datapath with id dp_id."""
if not self.ignore_dpid(dp_id):
self.logger.critical('Datapath disconnected')
return []
def datapath_down(self, dp_id):
if not self.ignore_dpid(dp_id):
self.dp.running = False
self.logger.warning('Datapath %s down', dp_id)
return []
def port_add_acl(self, port_num):
ofmsgs = []
forwarding_table = self.dp.eth_src_table
if port_num in self.dp.acl_in:
acl_num = self.dp.acl_in[port_num]
forwarding_table = self.dp.acl_table
acl_rule_priority = self.dp.highest_priority
acl_allow_inst = self.goto_table(self.dp.eth_src_table)
for rule_conf in self.dp.acls[acl_num]:
acl_inst = []
match_dict = {}
for attrib, attrib_value in rule_conf.iteritems():
if attrib == 'actions':
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
acl_inst.append(
self.apply_actions([
parser.OFPActionOutput(port_no)]))
# if output selected, output packet now
# and exit pipeline.
if 'output' in attrib_value:
output_dict = attrib_value['output']
output_actions = []
# if destination rewriting selected, rewrite it.
if 'dl_dst' in output_dict:
output_actions.append(
parser.OFPActionSetField(
eth_dst=output_dict['dl_dst']))
# output to port
port_no = output_dict['port']
output_actions.append(
parser.OFPActionOutput(port_no))
acl_inst.append(
self.apply_actions(output_actions))
continue
if attrib_value['allow'] == 1:
acl_inst.append(acl_allow_inst)
continue
if attrib == 'in_port':
continue
match_dict[attrib] = attrib_value
# override in_port always
match_dict['in_port'] = port_num
# to_match() needs to access parser via dp
# this uses the old API, which is oh so convenient
# (transparently handling masks for example).
null_dp = namedtuple('null_dp', 'ofproto_parser')
null_dp.ofproto_parser = parser
acl_match = ofctl.to_match(null_dp, match_dict)
ofmsgs.append(self.valve_flowmod(
self.dp.acl_table,
acl_match,
priority=acl_rule_priority,
inst=acl_inst))
acl_rule_priority -= 1
return ofmsgs, forwarding_table
def add_controller_ips(self, controller_ips, vlan):
ofmsgs = []
for controller_ip in controller_ips:
controller_ip_host = ipaddr.IPNetwork(
'/'.join((str(controller_ip.ip),
str(controller_ip.max_prefixlen))))
max_prefixlen = controller_ip_host.prefixlen
if controller_ip_host.version == 4:
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_ARP,
nw_dst=controller_ip_host,
vlan=vlan),
priority=self.dp.highest_priority + max_prefixlen))
# Initialize IPv4 FIB
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IP,
eth_dst=self.FAUCET_MAC,
vlan=vlan),
priority=self.dp.highest_priority,
inst=[self.goto_table(self.dp.ipv4_fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.dp.ipv4_fib_table,
self.valve_in_match(
vlan=vlan,
eth_type=ether.ETH_TYPE_IP,
nw_proto=inet.IPPROTO_ICMP,
nw_src=controller_ip,
nw_dst=controller_ip_host),
priority=self.dp.highest_priority + max_prefixlen))
else:
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
ipv6_nd_target=controller_ip_host,
icmpv6_type=icmpv6.ND_NEIGHBOR_SOLICIT),
priority=self.dp.highest_priority + max_prefixlen))
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
eth_dst=self.FAUCET_MAC,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT),
priority=self.dp.highest_priority + max_prefixlen))
# Initialize IPv6 FIB
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
eth_dst=self.FAUCET_MAC,
vlan=vlan),
priority=self.dp.highest_priority,
inst=[self.goto_table(self.dp.ipv6_fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.dp.ipv6_fib_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
nw_dst=controller_ip_host,
icmpv6_type=icmpv6.ICMPV6_ECHO_REQUEST),
priority=self.dp.highest_priority + max_prefixlen))
return ofmsgs
def port_add_vlan_untagged(self, port, vlan, forwarding_table, mirror_act):
ofmsgs = []
ofmsgs.extend(self.add_controller_ips(vlan.controller_ips, vlan))
push_vlan_act = mirror_act + [
parser.OFPActionPushVlan(ether.ETH_TYPE_8021Q),
parser.OFPActionSetField(vlan_vid=(vlan.vid | ofp.OFPVID_PRESENT))]
push_vlan_inst = [
self.apply_actions(push_vlan_act),
self.goto_table(forwarding_table)
]
null_vlan = namedtuple('null_vlan', 'vid')
null_vlan.vid = ofp.OFPVID_NONE
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(in_port=port.number, vlan=null_vlan),
priority=self.dp.low_priority,
inst=push_vlan_inst))
ofmsgs.extend(self.build_flood_rules(vlan))
return ofmsgs
def port_add_vlan_tagged(self, port, vlan, forwarding_table, mirror_act):
ofmsgs = []
ofmsgs.extend(self.add_controller_ips(vlan.controller_ips, vlan))
vlan_inst = [
self.goto_table(forwarding_table)
]
if mirror_act:
vlan_inst = [self.apply_actions(mirror_act)] + vlan_inst
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(in_port=port.number, vlan=vlan),
priority=self.dp.low_priority,
inst=vlan_inst))
ofmsgs.extend(self.build_flood_rules(vlan))
return ofmsgs
def port_add_vlans(self, port, forwarding_table, mirror_act):
ofmsgs = []
vlans = self.dp.vlans.values()
tagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.tagged]
untagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.untagged]
for vlan in tagged_vlans_with_port:
ofmsgs.extend(self.port_add_vlan_tagged(
port, vlan, forwarding_table, mirror_act))
for vlan in untagged_vlans_with_port:
ofmsgs.extend(self.port_add_vlan_untagged(
port, vlan, forwarding_table, mirror_act))
return ofmsgs
def port_add(self, dp_id, port_num):
"""Generate openflow msgs to update the datapath upon addition of port.
Arguments:
dp_id -- the unique id of the datapath
port_num -- the port number of the new port
Returns
A list of flow mod messages to be sent to the datapath."""
if self.ignore_dpid(dp_id) or self.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
self.logger.info(
'Autoconfiguring port:%u based on default config', port_num)
self.dp.add_port(port_num)
port = self.dp.ports[port_num]
self.logger.info('Port %s added', port)
port.phys_up = True
if not port.running():
return []
in_port_match = self.valve_in_match(in_port=port_num)
ofmsgs = []
self.logger.info('Sending config for port %s', port)
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(table, in_port_match))
# if this port is used as mirror port in any acl - drop input packets
for acl in self.dp.acls.values():
for rule_conf in acl:
for attrib, attrib_value in rule_conf.iteritems():
if attrib == 'actions':
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(in_port=port_no)))
if port_num in self.dp.mirror_from_port.values():
# this is a mirror port - drop all input packets
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
in_port_match))
return ofmsgs
mirror_act = []
# this port is mirrored to another port
if port_num in self.dp.mirror_from_port:
mirror_port_num = self.dp.mirror_from_port[port_num]
mirror_act = [parser.OFPActionOutput(mirror_port_num)]
acl_ofmsgs, forwarding_table = self.port_add_acl(port_num)
ofmsgs.extend(acl_ofmsgs)
ofmsgs.extend(self.port_add_vlans(port, forwarding_table, mirror_act))
return ofmsgs
def port_delete(self, dp_id, port_num):
"""Generate openflow msgs to update the datapath upon deletion of port.
Returns
A list of flow mod messages to be sent to the datapath."""
if self.ignore_dpid(dp_id) or self.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
return []
port = self.dp.ports[port_num]
port.phys_up = False
self.logger.warning('Port %s down', port)
ofmsgs = []
if not port.permanent_learn:
# delete all rules matching this port in all tables.
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(
table,
self.valve_in_match(in_port=port_num)))
# delete eth_dst rules
ofmsgs.append(self.valve_flowdel(
self.dp.eth_dst_table,
out_port=port_num))
ofmsgs.append(parser.OFPBarrierRequest(None))
for vlan in self.dp.vlans.values():
if port_num in vlan.tagged or port_num in vlan.untagged:
ofmsgs.extend(self.build_flood_rules(vlan), modify=True)
return ofmsgs
def delete_host_from_vlan(self, eth_src, vlan):
ofmsgs = []
# delete any existing ofmsgs for this vlan/mac combination on the
# src mac table
ofmsgs.append(self.valve_flowdel(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan, eth_src=eth_src)))
# delete any existing ofmsgs for this vlan/mac combination on the dst
# mac table
ofmsgs.append(self.valve_flowdel(
self.dp.eth_dst_table,
self.valve_in_match(vlan=vlan, eth_dst=eth_src)))
ofmsgs.append(parser.OFPBarrierRequest(None))
return ofmsgs
def build_ethernet_pkt(self, eth_dst, in_port, vlan, ethertype):
pkt = packet.Packet()
if vlan.port_is_tagged(in_port):
eth_pkt = ethernet.ethernet(
eth_dst, self.FAUCET_MAC, ether.ETH_TYPE_8021Q)
vlan_pkt = packet_vlan.vlan(
vid=vlan.vid, ethertype=ethertype)
pkt.add_protocol(eth_pkt)
pkt.add_protocol(vlan_pkt)
else:
eth_pkt = ethernet.ethernet(
eth_dst, self.FAUCET_MAC, ethertype)
pkt.add_protocol(eth_pkt)
return pkt
def add_route(self, vlan, ip_gw, ip_dst):
ofmsgs = []
if ip_dst.version == 6:
routes = vlan.ipv6_routes
neighbor_cache = vlan.nd_cache
eth_type = ether.ETH_TYPE_IPV6
fib_table = self.dp.ipv6_fib_table
else:
routes = vlan.ipv4_routes
neighbor_cache = vlan.arp_cache
eth_type = ether.ETH_TYPE_IP
fib_table = self.dp.ipv4_fib_table
routes[ip_dst] = ip_gw
if ip_gw in neighbor_cache:
eth_dst = neighbor_cache[ip_gw].eth_src
ofmsgs.extend(
self.add_resolved_route(
eth_type=eth_type,
fib_table=fib_table,
vlan=vlan,
neighbor_cache=neighbor_cache,
ip_gw=ip_gw,
ip_dst=ip_dst,
eth_dst=eth_dst,
is_updated=False))
return ofmsgs
def del_route(self, vlan, ip_dst):
ofmsgs = []
if ip_dst.version == 6:
if ip_dst in vlan.ipv6_routes:
del vlan.ipv6_routes[ip_dst]
route_match = self.valve_in_match(
vlan=vlan, eth_type=ether.ETH_TYPE_IPV6, nw_dst=ip_dst)
ofmsgs.append(self.valve_flowdel(
self.dp.ipv6_fib_table, route_match))
else:
if ip_dst in vlan.ipv4_routes:
del vlan.ipv4_routes[ip_dst]
route_match = self.valve_in_match(
vlan=vlan, eth_type=ether.ETH_TYPE_IP, nw_dst=ip_dst)
ofmsgs.append(self.valve_flowdel(
self.dp.ipv4_fib_table, route_match))
return ofmsgs
def add_resolved_route(self, eth_type, fib_table, vlan, neighbor_cache,
ip_gw, ip_dst, eth_dst, is_updated=None):
ofmsgs = []
if is_updated is not None:
in_match = self.valve_in_match(
vlan=vlan, eth_type=eth_type, nw_dst=ip_dst)
prefixlen = ipaddr.IPNetwork(ip_dst).prefixlen
priority = self.dp.highest_priority + prefixlen
if is_updated:
self.logger.info(
'Updating next hop for route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
ofmsgs.append(self.valve_flowdel(
fib_table,
in_match,
priority=priority))
else:
self.logger.info(
'Adding new route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
ofmsgs.append(self.valve_flowmod(
fib_table,
in_match,
priority=priority,
inst=[self.apply_actions(
[self.set_eth_src(self.FAUCET_MAC),
self.set_eth_dst(eth_dst),
self.dec_ip_ttl()])] +
[self.goto_table(self.dp.eth_dst_table)]))
now = time.time()
link_neighbor = LinkNeighbor(eth_dst, now)
neighbor_cache[ip_gw] = link_neighbor
return ofmsgs
def control_plane_arp_handler(self, in_port, vlan, eth_src, arp_pkt):
ofmsgs = []
if arp_pkt.opcode == arp.ARP_REQUEST:
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_ARP)
arp_pkt = arp.arp(
opcode=arp.ARP_REPLY, src_mac=self.FAUCET_MAC,
src_ip=arp_pkt.dst_ip, dst_mac=eth_src, dst_ip=arp_pkt.src_ip)
pkt.add_protocol(arp_pkt)
pkt.serialize()
ofmsgs.append(self.valve_packetout(in_port, pkt.data))
self.logger.info(
'Responded to ARP request for %s from %s',
arp_pkt.src_ip, arp_pkt.dst_ip)
elif arp_pkt.opcode == arp.ARP_REPLY:
resolved_ip_gw = ipaddr.IPv4Address(arp_pkt.src_ip)
self.logger.info('ARP response %s for %s', eth_src, resolved_ip_gw)
is_updated = None
if resolved_ip_gw in vlan.arp_cache:
cached_eth_dst = vlan.arp_cache[resolved_ip_gw].eth_src
if cached_eth_dst != eth_src:
is_updated = True
else:
is_updated = False
for ip_dst, ip_gw in vlan.ipv4_routes.iteritems():
if ip_gw == resolved_ip_gw:
ofmsgs.extend(
self.add_resolved_route(
ether.ETH_TYPE_IP, self.dp.ipv4_fib_table,
vlan, vlan.arp_cache,
ip_gw, ip_dst, eth_src, is_updated))
return ofmsgs
def control_plane_icmp_handler(self, in_port, vlan, eth_src,
ipv4_pkt, icmp_pkt):
ofmsgs = []
if icmp_pkt is not None:
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_IP)
ipv4_pkt = ipv4.ipv4(
dst=ipv4_pkt.src, src=ipv4_pkt.dst, proto=ipv4_pkt.proto)
icmp_pkt = icmp.icmp(
type_=icmp.ICMP_ECHO_REPLY, code=icmp.ICMP_ECHO_REPLY_CODE,
data=icmp_pkt.data)
pkt.add_protocol(ipv4_pkt)
pkt.add_protocol(icmp_pkt)
pkt.serialize()
ofmsgs.append(self.valve_packetout(in_port, pkt.data))
return ofmsgs
def control_plane_icmpv6_handler(self, in_port, vlan, eth_src,
ipv6_pkt, icmpv6_pkt):
ofmsgs = []
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_IPV6)
if icmpv6_pkt.type_ == icmpv6.ND_NEIGHBOR_SOLICIT:
dst = icmpv6_pkt.data.dst
ipv6_reply = ipv6.ipv6(
src=dst,
dst=ipv6_pkt.src,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=ipv6_pkt.hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=dst,
option=icmpv6.nd_option_tla(
hw_src=self.FAUCET_MAC), res=7))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
ofmsgs.extend([self.valve_packetout(in_port, pkt.data)])
elif icmpv6_pkt.type_ == icmpv6.ND_NEIGHBOR_ADVERT:
resolved_ip_gw = ipaddr.IPv6Address(icmpv6_pkt.data.dst)
self.logger.info('ND response %s for %s', eth_src, resolved_ip_gw)
is_updated = None
if resolved_ip_gw in vlan.nd_cache:
cached_eth_dst = vlan.nd_cache[resolved_ip_gw].eth_src
if cached_eth_dst != eth_src:
is_updated = True
else:
is_updated = False
for ip_dst, ip_gw in vlan.ipv6_routes.iteritems():
if ip_gw == resolved_ip_gw:
ofmsgs.extend(
self.add_resolved_route(
ether.ETH_TYPE_IPV6, self.dp.ipv6_fib_table,
vlan, vlan.nd_cache,
ip_gw, ip_dst, eth_src, is_updated))
elif icmpv6_pkt.type_ == icmpv6.ICMPV6_ECHO_REQUEST:
dst = ipv6_pkt.dst
ipv6_reply = ipv6.ipv6(
src=dst,
dst=ipv6_pkt.src,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=ipv6_pkt.hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REPLY,
data=icmpv6.echo(
id_=icmpv6_pkt.data.id,
seq=icmpv6_pkt.data.seq,
data=icmpv6_pkt.data.data))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
ofmsgs.extend([self.valve_packetout(in_port, pkt.data)])
return ofmsgs
@staticmethod
def to_faucet_ip(vlan, src_ip, dst_ip):
for controller_ip in vlan.controller_ips:
if src_ip in controller_ip or dst_ip in controller_ip:
return True
return False
def learn_host_on_vlan_port(self, port, vlan, eth_src):
ofmsgs = []
in_port = port.number
# hosts learned on this port never relearned
if port.permanent_learn:
learn_timeout = 0
# antispoof this host
ofmsgs.append(self.valve_flowdrop(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan, eth_src=eth_src),
priority=(self.dp.highest_priority - 2)))
else:
learn_timeout = self.dp.timeout
ofmsgs.extend(self.delete_host_from_vlan(eth_src, vlan))
mirror_acts = []
if in_port in self.dp.mirror_from_port:
mirror_port_num = self.dp.mirror_from_port[in_port]
mirror_acts = [parser.OFPActionOutput(mirror_port_num)]
# Update datapath to no longer send packets from this mac to controller
# note the use of hard_timeout here and idle_timeout for the dst table
# this is to ensure that the source rules will always be deleted before
# any rules on the dst table. Otherwise if the dst table rule expires
# but the src table rule is still being hit intermittantly the switch
# will flood packets to that dst and not realise it needs to relearn
# the rule
# NB: Must be lower than highest priority otherwise it can match
# flows destined to controller
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(in_port=in_port, vlan=vlan, eth_src=eth_src),
priority=(self.dp.highest_priority - 1),
inst=[self.goto_table(self.dp.eth_dst_table)],
hard_timeout=learn_timeout))
# update datapath to output packets to this mac via the associated port
if vlan.port_is_tagged(in_port):
dst_act = [parser.OFPActionOutput(in_port)]
else:
dst_act = [
parser.OFPActionPopVlan(),
parser.OFPActionOutput(in_port)]
if mirror_acts:
dst_act.extend(mirror_acts)
inst = [self.apply_actions(dst_act)]
ofmsgs.append(self.valve_flowmod(
self.dp.eth_dst_table,
self.valve_in_match(vlan=vlan, eth_dst=eth_src),
priority=self.dp.high_priority,
inst=inst,
idle_timeout=learn_timeout))
return ofmsgs
def handle_control_plane(self, in_port, vlan, eth_src, eth_dst, pkt):
ofmsgs = []
if eth_dst == self.FAUCET_MAC or not mac_addr_is_unicast(eth_dst):
arp_pkt = pkt.get_protocol(arp.arp)
ipv4_pkt = pkt.get_protocol(ipv4.ipv4)
ipv6_pkt = pkt.get_protocol(ipv6.ipv6)
if arp_pkt is not None:
src_ip = ipaddr.IPv4Address(arp_pkt.src_ip)
dst_ip = ipaddr.IPv4Address(arp_pkt.dst_ip)
if (arp_pkt.opcode == arp.ARP_REQUEST and
self.to_faucet_ip(vlan, src_ip, dst_ip)):
ofmsgs.extend(self.control_plane_arp_handler(
in_port, vlan, eth_src, arp_pkt))
elif (arp_pkt.opcode == arp.ARP_REPLY and
eth_dst == self.FAUCET_MAC):
ofmsgs.extend(self.control_plane_arp_handler(
in_port, vlan, eth_src, arp_pkt))
elif ipv4_pkt is not None:
icmp_pkt = pkt.get_protocol(icmp.icmp)
if icmp_pkt is not None:
src_ip = ipaddr.IPv4Address(ipv4_pkt.src)
dst_ip = ipaddr.IPv4Address(ipv4_pkt.dst)
if self.to_faucet_ip(vlan, src_ip, dst_ip):
ofmsgs.extend(self.control_plane_icmp_handler(
in_port, vlan, eth_src, ipv4_pkt, icmp_pkt))
elif ipv6_pkt is not None:
icmpv6_pkt = pkt.get_protocol(icmpv6.icmpv6)
if icmpv6_pkt is not None:
src_ip = ipaddr.IPv6Address(ipv6_pkt.src)
dst_ip = ipaddr.IPv6Address(ipv6_pkt.dst)
if self.to_faucet_ip(vlan, src_ip, dst_ip):
ofmsgs.extend(self.control_plane_icmpv6_handler(
in_port, vlan, eth_src, ipv6_pkt, icmpv6_pkt))
return ofmsgs
def known_up_dpid_and_port(self, dp_id, in_port):
if (not self.ignore_dpid(dp_id) and not self.ignore_port(in_port) and
self.dp.running and in_port in self.dp.ports):
return True
return False
def rcv_packet(self, dp_id, in_port, vlan_vid, pkt):
"""Generate openflow msgs to update datapath upon receipt of packet.
This involves asssociating the ethernet source address of the packet
with the given in_port (ethernet switching) ideally so that no packets
from this address are sent to the controller, and packets to this
address are output to in_port. This may not be fully possible depending
on the limitations of the datapath.
Depending on implementation this may involve updating a nw state db.
Arguments:
dp_id -- the unique id of the datapath that received the packet (64bit
int)
in_port -- the port number of the port that received the packet
vlan_vid -- the vlan_vid tagged to the packet.
pkt -- the packet send to us (Ryu ethernet object).
Returns
A list of flow mod messages to be sent to the datpath."""
if not self.known_up_dpid_and_port(dp_id, in_port):
return []
ofmsgs = []
eth_pkt = pkt.get_protocol(ethernet.ethernet)
eth_src = eth_pkt.src
eth_dst = eth_pkt.dst
vlan = self.dp.vlans[vlan_vid]
port = self.dp.ports[in_port]
if mac_addr_is_unicast(eth_src):
self.logger.debug(
'Packet_in dp_id: %x src:%s in_port:%d vid:%s',
dp_id, eth_src, in_port, vlan_vid)
ofmsgs.extend(self.handle_control_plane(
in_port, vlan, eth_src, eth_dst, pkt))
# ban learning new hosts if max_hosts reached on a VLAN.
if (vlan.max_hosts is not None and
len(vlan.host_cache) == vlan.max_hosts and
eth_src not in vlan.host_cache):
self.logger.info(
'max hosts %u reached on vlan %u, ' +
'temporarily banning learning on this vlan',
vlan.max_hosts, vlan.vid)
ofmsgs.extend([self.valve_flowdrop(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan),
priority=(self.dp.low_priority + 1),
hard_timeout=self.dp.timeout)])
else:
ofmsgs.extend(self.learn_host_on_vlan_port(
port, vlan, eth_src))
host_cache_entry = HostCacheEntry(
eth_src,
port.permanent_learn,
time.time())
vlan.host_cache[eth_src] = host_cache_entry
self.logger.info(
'learned %u hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
return ofmsgs
def reload_config(self, new_dp):
"""Reload the config from new_dp
KW Arguments:
new_dp -- A new DP object containing the updated config."""
ofmsgs = []
if self.dp.running:
self.dp = new_dp
ofmsgs = self.datapath_connect(
self.dp.dp_id, self.dp.ports.keys())
return ofmsgs
def arp_for_ip_gw(self, ip_gw, controller_ip, vlan, ports):
ofmsgs = []
if ports:
self.logger.info('Resolving %s', ip_gw)
arp_pkt = arp.arp(
opcode=arp.ARP_REQUEST, src_mac=self.FAUCET_MAC,
src_ip=str(controller_ip.ip), dst_mac=mac.DONTCARE_STR,
dst_ip=str(ip_gw))
port_num = ports[0].number
pkt = self.build_ethernet_pkt(
mac.BROADCAST_STR, port_num, vlan, ether.ETH_TYPE_ARP)
pkt.add_protocol(arp_pkt)
pkt.serialize()
for port in ports:
ofmsgs.append(self.valve_packetout(port.number, pkt.data))
return ofmsgs
@staticmethod
def ipv6_link_eth_mcast(ucast):
nd_mac_bytes = ipaddr.Bytes('\x33\x33') + ucast.packed[-4:]
nd_mac = ':'.join(['%02X' % ord(x) for x in nd_mac_bytes])
return nd_mac
@staticmethod
def ipv6_link_mcast_from_ucast(ucast):
link_mcast_prefix = ipaddr.IPv6Network('ff02::1:ff00:0/104')
mcast_bytes = ipaddr.Bytes(
link_mcast_prefix.packed[:13] + ucast.packed[-3:])
link_mcast = ipaddr.IPv6Address(mcast_bytes)
return link_mcast
def nd_solicit_ip_gw(self, ip_gw, controller_ip, vlan, ports):
ofmsgs = []
if ports:
self.logger.info('Resolving %s', ip_gw)
nd_mac = self.ipv6_link_eth_mcast(ip_gw)
ip_gw_mcast = self.ipv6_link_mcast_from_ucast(ip_gw)
port_num = ports[0].number
pkt = self.build_ethernet_pkt(
nd_mac, port_num, vlan, ether.ETH_TYPE_IPV6)
ipv6_pkt = ipv6.ipv6(
src=controller_ip.ip, dst=ip_gw_mcast, nxt=inet.IPPROTO_ICMPV6)
icmpv6_pkt = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=ip_gw,
option=icmpv6.nd_option_sla(hw_src=self.FAUCET_MAC)))
pkt.add_protocol(ipv6_pkt)
pkt.add_protocol(icmpv6_pkt)
pkt.serialize()
for port in ports:
ofmsgs.append(self.valve_packetout(port.number, pkt.data))
return ofmsgs
def resolve_gateways(self):
if not self.dp.running:
return []
ofmsgs = []
now = time.time()
for vlan in self.dp.vlans.itervalues():
untagged_ports = self.build_flood_ports_for_vlan(
vlan.untagged, None)
tagged_ports = self.build_flood_ports_for_vlan(
vlan.tagged, None)
for routes, neighbor_cache, neighbor_resolver in (
(vlan.ipv4_routes, vlan.arp_cache, self.arp_for_ip_gw),
(vlan.ipv6_routes, vlan.nd_cache, self.nd_solicit_ip_gw)):
for ip_gw in set(routes.values()):
for controller_ip in vlan.controller_ips:
if ip_gw in controller_ip:
cache_age = None
if ip_gw in neighbor_cache:
cache_time = neighbor_cache[ip_gw].cache_time
cache_age = now - cache_time
if (cache_age is None or
cache_age > self.dp.arp_neighbor_timeout):
for ports in untagged_ports, tagged_ports:
ofmsgs.extend(neighbor_resolver(
ip_gw, controller_ip, vlan, ports))
return ofmsgs
def host_expire(self):
if not self.dp.running:
return
now = time.time()
for vlan in self.dp.vlans.itervalues():
expired_hosts = []
for eth_src, host_cache_entry in vlan.host_cache.iteritems():
if not host_cache_entry.permanent:
host_cache_entry_age = now - host_cache_entry.cache_time
if host_cache_entry_age > self.dp.timeout:
expired_hosts.append(eth_src)
if expired_hosts:
for eth_src in expired_hosts:
del vlan.host_cache[eth_src]
self.logger.info(
'expiring host %s from vlan %u',
eth_src, vlan.vid)
self.logger.info(
'%u recently active hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
class ArubaValve(Valve):
def switch_features(self, dp_id, msg):
ryu_table_loader = aruba.LoadRyuTables()
ryu_table_loader.load_tables(
os.path.join(aruba.CFG_PATH, 'aruba_pipeline.json'), parser)
ofmsgs = [parser.OFPTableFeaturesStatsRequest(
datapath=None,
body=ryu_table_loader.ryu_tables)]
return ofmsgs
duplicate ofmsg logging.
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from logging.handlers import TimedRotatingFileHandler
import time
import os
from collections import namedtuple
import ipaddr
import aruba.aruba_pipeline as aruba
from util import mac_addr_is_unicast
from ryu.lib import ofctl_v1_3 as ofctl
from ryu.lib import mac
from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, packet
from ryu.lib.packet import vlan as packet_vlan
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
class LinkNeighbor(object):
def __init__(self, eth_src, now):
self.eth_src = eth_src
self.cache_time = now
class HostCacheEntry(object):
def __init__(self, eth_src, permanent, now):
self.eth_src = eth_src
self.permanent = permanent
self.cache_time = now
def valve_factory(dp):
"""Return a Valve object based dp's hardware configuration field.
Arguments:
dp -- a DP object with the configuration for this valve.
"""
SUPPORTED_HARDWARE = {
'Allied-Telesis': Valve,
'Aruba': ArubaValve,
'NoviFlow': Valve,
'Open vSwitch': Valve,
'ZodiacFX': Valve,
}
if dp.hardware in SUPPORTED_HARDWARE:
return SUPPORTED_HARDWARE[dp.hardware](dp)
else:
return None
class Valve(object):
"""Generates the messages to configure a datapath as a l2 learning switch.
Vendor specific implementations may require sending configuration flows.
This can be achieved by inheriting from this class and overwriting the
function switch_features.
"""
FAUCET_MAC = '0e:00:00:00:00:01'
def __init__(self, dp, logname='faucet', *args, **kwargs):
self.dp = dp
self.logger = logging.getLogger(logname)
self.ofchannel_logger = None
def switch_features(self, dp_id, msg):
"""Send configuration flows necessary for the switch implementation.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
msg -- OFPSwitchFeatures msg sent from switch.
Vendor specific configuration should be implemented here.
"""
return []
def ofchannel_log(self, ofmsgs):
if self.dp is not None:
if self.dp.ofchannel_log is not None:
if self.ofchannel_logger is None:
self.ofchannel_logger = logging.getLogger(
self.dp.ofchannel_log)
logger_handler = TimedRotatingFileHandler(
self.dp.ofchannel_log,
when='midnight')
log_fmt = ('%(asctime)s %(name)-6s '
'%(levelname)-8s %(message)s')
logger_handler.setFormatter(
logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
self.ofchannel_logger.addHandler(logger_handler)
self.ofchannel_logger.propagate = 0
self.ofchannel_logger.setLevel(logging.DEBUG)
for ofmsg in ofmsgs:
self.ofchannel_logger.debug(ofmsg)
@staticmethod
def ignore_port(port_num):
"""Ignore non-physical ports."""
# port numbers > 0xF0000000 indicate a logical port
return port_num > 0xF0000000
@staticmethod
def apply_actions(actions):
return parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)
@staticmethod
def goto_table(table_id):
return parser.OFPInstructionGotoTable(table_id)
@staticmethod
def set_eth_src(eth_src):
return parser.OFPActionSetField(eth_src=eth_src)
@staticmethod
def set_eth_dst(eth_dst):
return parser.OFPActionSetField(eth_dst=eth_dst)
@staticmethod
def dec_ip_ttl():
return parser.OFPActionDecNwTtl()
@staticmethod
def valve_packetout(out_port, data):
return parser.OFPPacketOut(
datapath=None,
buffer_id=ofp.OFP_NO_BUFFER,
in_port=ofp.OFPP_CONTROLLER,
actions=[parser.OFPActionOutput(out_port, 0)],
data=data)
@staticmethod
def valve_in_match(in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None,
nw_src=None, nw_dst=None):
match_dict = {}
if in_port is not None:
match_dict['in_port'] = in_port
if vlan is not None:
if vlan.vid == ofp.OFPVID_NONE:
match_dict['vlan_vid'] = ofp.OFPVID_NONE
else:
match_dict['vlan_vid'] = (vlan.vid | ofp.OFPVID_PRESENT)
if eth_src is not None:
match_dict['eth_src'] = eth_src
if eth_dst is not None:
if eth_dst_mask is not None:
match_dict['eth_dst'] = (eth_dst, eth_dst_mask)
else:
match_dict['eth_dst'] = eth_dst
if nw_proto is not None:
match_dict['ip_proto'] = nw_proto
if nw_src is not None:
match_dict['ipv4_src'] = (str(nw_src.ip), str(nw_src.netmask))
if icmpv6_type is not None:
match_dict['icmpv6_type'] = icmpv6_type
if ipv6_nd_target is not None:
match_dict['ipv6_nd_target'] = str(ipv6_nd_target.ip)
if nw_dst is not None:
nw_dst_masked = (str(nw_dst.ip), str(nw_dst.netmask))
if eth_type == ether.ETH_TYPE_ARP:
match_dict['arp_tpa'] = nw_dst_masked
elif eth_type == ether.ETH_TYPE_IP:
match_dict['ipv4_dst'] = nw_dst_masked
else:
match_dict['ipv6_dst'] = nw_dst_masked
if eth_type is not None:
match_dict['eth_type'] = eth_type
match = parser.OFPMatch(**match_dict)
return match
def ignore_dpid(self, dp_id):
"""Ignore all DPIDs except the DPID configured."""
if dp_id != self.dp.dp_id:
self.logger.error('Unknown dpid:%s', dp_id)
return True
return False
def all_valve_tables(self):
return (
self.dp.vlan_table,
self.dp.acl_table,
self.dp.eth_src_table,
self.dp.ipv4_fib_table,
self.dp.ipv6_fib_table,
self.dp.eth_dst_table,
self.dp.flood_table)
def valve_flowmod(self, table_id, match=None, priority=None,
inst=None, command=ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0):
"""Helper function to construct a flow mod message with cookie."""
if match is None:
match = self.valve_in_match()
if priority is None:
priority = self.dp.lowest_priority
if inst is None:
inst = []
return parser.OFPFlowMod(
datapath=None,
cookie=self.dp.cookie,
command=command,
table_id=table_id,
priority=priority,
out_port=out_port,
out_group=out_group,
match=match,
instructions=inst,
hard_timeout=hard_timeout,
idle_timeout=idle_timeout)
def valve_flowdel(self, table_id, match=None, priority=None,
out_port=ofp.OFPP_ANY):
"""Delete matching flows from a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
command=ofp.OFPFC_DELETE,
out_port=out_port,
out_group=ofp.OFPG_ANY)
def valve_flowdrop(self, table_id, match=None, priority=None,
hard_timeout=0):
"""Add drop matching flow to a table."""
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
hard_timeout=hard_timeout,
inst=[])
def valve_flowcontroller(self, table_id, match=None, priority=None,
inst=None):
if inst is None:
inst = []
return self.valve_flowmod(
table_id,
match=match,
priority=priority,
inst=[self.apply_actions([parser.OFPActionOutput(
ofp.OFPP_CONTROLLER, max_len=256)])] + inst)
def delete_all_valve_flows(self):
"""Delete all flows from all FAUCET tables."""
ofmsgs = []
for table_id in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(table_id))
return ofmsgs
def add_default_drop_flows(self):
"""Add default drop rules on all FAUCET tables."""
# default drop on all tables.
ofmsgs = []
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdrop(
table,
priority=self.dp.lowest_priority))
# antispoof for FAUCET's MAC address
# TODO: antispoof for controller IPs on this VLAN, too.
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_src=self.FAUCET_MAC),
priority=self.dp.high_priority))
# drop STDP BPDU
for bpdu_mac in ('01:80:C2:00:00:00', '01:00:0C:CC:CC:CD'):
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_dst=bpdu_mac),
priority=self.dp.highest_priority))
# drop LLDP
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_type=ether.ETH_TYPE_LLDP),
priority=self.dp.highest_priority))
# drop broadcast sources
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(eth_src=mac.BROADCAST_STR),
priority=self.dp.highest_priority))
return ofmsgs
def add_vlan_flood_flow(self):
"""Add a flow to flood packets for unknown destinations."""
return [self.valve_flowmod(
self.dp.eth_dst_table,
priority=self.dp.low_priority,
inst=[self.goto_table(self.dp.flood_table)])]
def add_controller_learn_flow(self):
"""Add a flow for controller to learn/add flows for destinations."""
return [self.valve_flowcontroller(
self.dp.eth_src_table,
priority=self.dp.low_priority,
inst=[self.goto_table(self.dp.eth_dst_table)])]
def add_default_flows(self):
"""Configure datapath with necessary default tables and rules."""
ofmsgs = []
ofmsgs.extend(self.delete_all_valve_flows())
ofmsgs.extend(self.add_default_drop_flows())
ofmsgs.extend(self.add_vlan_flood_flow())
ofmsgs.extend(self.add_controller_learn_flow())
return ofmsgs
def add_ports_and_vlans(self, discovered_port_nums):
"""Add all configured and discovered ports and VLANs."""
ofmsgs = []
all_port_nums = set()
# add vlan ports
for vlan in self.dp.vlans.itervalues():
self.logger.info('Configuring VLAN %s', vlan)
vlan_ports = vlan.tagged + vlan.untagged
for port in vlan_ports:
all_port_nums.add(port.number)
# install eth_dst_table flood ofmsgs
ofmsgs.extend(self.build_flood_rules(vlan))
# add mirror ports.
for port_num in self.dp.mirror_from_port.itervalues():
all_port_nums.add(port_num)
# add any ports discovered but not configured
for port_num in discovered_port_nums:
if self.ignore_port(port_num):
continue
if port_num not in all_port_nums:
all_port_nums.add(port_num)
# now configure all ports
for port_num in all_port_nums:
ofmsgs.extend(self.port_add(self.dp.dp_id, port_num))
return ofmsgs
@staticmethod
def build_flood_ports_for_vlan(vlan_ports, eth_dst):
ports = []
for port in vlan_ports:
if not port.running():
continue
if eth_dst is None or mac_addr_is_unicast(eth_dst):
if not port.unicast_flood:
continue
ports.append(port)
return ports
def build_flood_rule_actions(self, vlan, eth_dst):
flood_acts = []
tagged_ports = self.build_flood_ports_for_vlan(vlan.tagged, eth_dst)
for port in tagged_ports:
flood_acts.append(parser.OFPActionOutput(port.number))
untagged_ports = self.build_flood_ports_for_vlan(
vlan.untagged, eth_dst)
if untagged_ports:
flood_acts.append(parser.OFPActionPopVlan())
for port in untagged_ports:
flood_acts.append(parser.OFPActionOutput(port.number))
return flood_acts
def build_flood_rules(self, vlan, modify=False):
"""Add a flow to flood packets to unknown destinations on a VLAN."""
command = ofp.OFPFC_ADD
if modify:
command = ofp.OFPFC_MODIFY_STRICT
flood_priority = self.dp.low_priority
flood_eth_dst_matches = []
if vlan.unicast_flood:
flood_eth_dst_matches.extend([(None, None)])
flood_eth_dst_matches.extend([
('01:80:C2:00:00:00', '01:80:C2:00:00:00'), # 802.x
('01:00:5E:00:00:00', 'ff:ff:ff:00:00:00'), # IPv4 multicast
('33:33:00:00:00:00', 'ff:ff:00:00:00:00'), # IPv6 multicast
(mac.BROADCAST_STR, None), # flood on ethernet broadcasts
])
ofmsgs = []
for eth_dst, eth_dst_mask in flood_eth_dst_matches:
flood_acts = self.build_flood_rule_actions(vlan, eth_dst)
ofmsgs.append(self.valve_flowmod(
self.dp.flood_table,
match=self.valve_in_match(
vlan=vlan, eth_dst=eth_dst, eth_dst_mask=eth_dst_mask),
command=command,
inst=[self.apply_actions(flood_acts)],
priority=flood_priority))
flood_priority += 1
for port in vlan.tagged + vlan.untagged:
if port.number in self.dp.mirror_from_port:
mirror_port = self.dp.mirror_from_port[port.number]
mirror_acts = [
parser.OFPActionOutput(mirror_port)] + flood_acts
for eth_dst, eth_dst_mask in flood_eth_dst_matches:
flood_acts = self.build_flood_rule_actions(vlan, eth_dst)
ofmsgs.append(self.valve_flowmod(
self.dp.flood_table,
match=self.valve_in_match(
in_port=port.number, vlan=vlan,
eth_dst=eth_dst, eth_dst_mask=eth_dst_mask),
command=command,
inst=[self.apply_actions(mirror_acts)],
priority=flood_priority))
flood_priority += 1
return ofmsgs
def datapath_connect(self, dp_id, discovered_port_nums):
"""Generate the default openflow msgs for a datapath upon connection.
Depending on the implementation, a network state database may be
updated.
Arguments:
dp_id -- the Datapath unique ID (64bit int)
ports -- a list containing the port numbers of each port on the
datapath.
Returns:
A list of flow mod messages that will be sent in order to the datapath
in order to configure it."""
if self.ignore_dpid(dp_id):
return []
if discovered_port_nums is None:
discovered_port_nums = []
self.logger.info('Configuring datapath')
ofmsgs = []
ofmsgs.extend(self.add_default_flows())
ofmsgs.extend(self.add_ports_and_vlans(discovered_port_nums))
self.dp.running = True
return ofmsgs
def datapath_disconnect(self, dp_id):
"""Update n/w state db upon disconnection of datapath with id dp_id."""
if not self.ignore_dpid(dp_id):
self.logger.critical('Datapath disconnected')
return []
def datapath_down(self, dp_id):
if not self.ignore_dpid(dp_id):
self.dp.running = False
self.logger.warning('Datapath %s down', dp_id)
return []
def port_add_acl(self, port_num):
ofmsgs = []
forwarding_table = self.dp.eth_src_table
if port_num in self.dp.acl_in:
acl_num = self.dp.acl_in[port_num]
forwarding_table = self.dp.acl_table
acl_rule_priority = self.dp.highest_priority
acl_allow_inst = self.goto_table(self.dp.eth_src_table)
for rule_conf in self.dp.acls[acl_num]:
acl_inst = []
match_dict = {}
for attrib, attrib_value in rule_conf.iteritems():
if attrib == 'actions':
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
acl_inst.append(
self.apply_actions([
parser.OFPActionOutput(port_no)]))
# if output selected, output packet now
# and exit pipeline.
if 'output' in attrib_value:
output_dict = attrib_value['output']
output_actions = []
# if destination rewriting selected, rewrite it.
if 'dl_dst' in output_dict:
output_actions.append(
parser.OFPActionSetField(
eth_dst=output_dict['dl_dst']))
# output to port
port_no = output_dict['port']
output_actions.append(
parser.OFPActionOutput(port_no))
acl_inst.append(
self.apply_actions(output_actions))
continue
if attrib_value['allow'] == 1:
acl_inst.append(acl_allow_inst)
continue
if attrib == 'in_port':
continue
match_dict[attrib] = attrib_value
# override in_port always
match_dict['in_port'] = port_num
# to_match() needs to access parser via dp
# this uses the old API, which is oh so convenient
# (transparently handling masks for example).
null_dp = namedtuple('null_dp', 'ofproto_parser')
null_dp.ofproto_parser = parser
acl_match = ofctl.to_match(null_dp, match_dict)
ofmsgs.append(self.valve_flowmod(
self.dp.acl_table,
acl_match,
priority=acl_rule_priority,
inst=acl_inst))
acl_rule_priority -= 1
return ofmsgs, forwarding_table
def add_controller_ips(self, controller_ips, vlan):
ofmsgs = []
for controller_ip in controller_ips:
controller_ip_host = ipaddr.IPNetwork(
'/'.join((str(controller_ip.ip),
str(controller_ip.max_prefixlen))))
max_prefixlen = controller_ip_host.prefixlen
if controller_ip_host.version == 4:
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_ARP,
nw_dst=controller_ip_host,
vlan=vlan),
priority=self.dp.highest_priority + max_prefixlen))
# Initialize IPv4 FIB
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IP,
eth_dst=self.FAUCET_MAC,
vlan=vlan),
priority=self.dp.highest_priority,
inst=[self.goto_table(self.dp.ipv4_fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.dp.ipv4_fib_table,
self.valve_in_match(
vlan=vlan,
eth_type=ether.ETH_TYPE_IP,
nw_proto=inet.IPPROTO_ICMP,
nw_src=controller_ip,
nw_dst=controller_ip_host),
priority=self.dp.highest_priority + max_prefixlen))
else:
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
ipv6_nd_target=controller_ip_host,
icmpv6_type=icmpv6.ND_NEIGHBOR_SOLICIT),
priority=self.dp.highest_priority + max_prefixlen))
ofmsgs.append(self.valve_flowcontroller(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
eth_dst=self.FAUCET_MAC,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT),
priority=self.dp.highest_priority + max_prefixlen))
# Initialize IPv6 FIB
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
eth_dst=self.FAUCET_MAC,
vlan=vlan),
priority=self.dp.highest_priority,
inst=[self.goto_table(self.dp.ipv6_fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.dp.ipv6_fib_table,
self.valve_in_match(
eth_type=ether.ETH_TYPE_IPV6,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
nw_dst=controller_ip_host,
icmpv6_type=icmpv6.ICMPV6_ECHO_REQUEST),
priority=self.dp.highest_priority + max_prefixlen))
return ofmsgs
def port_add_vlan_untagged(self, port, vlan, forwarding_table, mirror_act):
ofmsgs = []
ofmsgs.extend(self.add_controller_ips(vlan.controller_ips, vlan))
push_vlan_act = mirror_act + [
parser.OFPActionPushVlan(ether.ETH_TYPE_8021Q),
parser.OFPActionSetField(vlan_vid=(vlan.vid | ofp.OFPVID_PRESENT))]
push_vlan_inst = [
self.apply_actions(push_vlan_act),
self.goto_table(forwarding_table)
]
null_vlan = namedtuple('null_vlan', 'vid')
null_vlan.vid = ofp.OFPVID_NONE
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(in_port=port.number, vlan=null_vlan),
priority=self.dp.low_priority,
inst=push_vlan_inst))
ofmsgs.extend(self.build_flood_rules(vlan))
return ofmsgs
def port_add_vlan_tagged(self, port, vlan, forwarding_table, mirror_act):
ofmsgs = []
ofmsgs.extend(self.add_controller_ips(vlan.controller_ips, vlan))
vlan_inst = [
self.goto_table(forwarding_table)
]
if mirror_act:
vlan_inst = [self.apply_actions(mirror_act)] + vlan_inst
ofmsgs.append(self.valve_flowmod(
self.dp.vlan_table,
self.valve_in_match(in_port=port.number, vlan=vlan),
priority=self.dp.low_priority,
inst=vlan_inst))
ofmsgs.extend(self.build_flood_rules(vlan))
return ofmsgs
def port_add_vlans(self, port, forwarding_table, mirror_act):
ofmsgs = []
vlans = self.dp.vlans.values()
tagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.tagged]
untagged_vlans_with_port = [
vlan for vlan in vlans if port in vlan.untagged]
for vlan in tagged_vlans_with_port:
ofmsgs.extend(self.port_add_vlan_tagged(
port, vlan, forwarding_table, mirror_act))
for vlan in untagged_vlans_with_port:
ofmsgs.extend(self.port_add_vlan_untagged(
port, vlan, forwarding_table, mirror_act))
return ofmsgs
def port_add(self, dp_id, port_num):
"""Generate openflow msgs to update the datapath upon addition of port.
Arguments:
dp_id -- the unique id of the datapath
port_num -- the port number of the new port
Returns
A list of flow mod messages to be sent to the datapath."""
if self.ignore_dpid(dp_id) or self.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
self.logger.info(
'Autoconfiguring port:%u based on default config', port_num)
self.dp.add_port(port_num)
port = self.dp.ports[port_num]
self.logger.info('Port %s added', port)
port.phys_up = True
if not port.running():
return []
in_port_match = self.valve_in_match(in_port=port_num)
ofmsgs = []
self.logger.info('Sending config for port %s', port)
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(table, in_port_match))
# if this port is used as mirror port in any acl - drop input packets
for acl in self.dp.acls.values():
for rule_conf in acl:
for attrib, attrib_value in rule_conf.iteritems():
if attrib == 'actions':
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
self.valve_in_match(in_port=port_no)))
if port_num in self.dp.mirror_from_port.values():
# this is a mirror port - drop all input packets
ofmsgs.append(self.valve_flowdrop(
self.dp.vlan_table,
in_port_match))
return ofmsgs
mirror_act = []
# this port is mirrored to another port
if port_num in self.dp.mirror_from_port:
mirror_port_num = self.dp.mirror_from_port[port_num]
mirror_act = [parser.OFPActionOutput(mirror_port_num)]
acl_ofmsgs, forwarding_table = self.port_add_acl(port_num)
ofmsgs.extend(acl_ofmsgs)
ofmsgs.extend(self.port_add_vlans(port, forwarding_table, mirror_act))
return ofmsgs
def port_delete(self, dp_id, port_num):
"""Generate openflow msgs to update the datapath upon deletion of port.
Returns
A list of flow mod messages to be sent to the datapath."""
if self.ignore_dpid(dp_id) or self.ignore_port(port_num):
return []
if port_num not in self.dp.ports:
return []
port = self.dp.ports[port_num]
port.phys_up = False
self.logger.warning('Port %s down', port)
ofmsgs = []
if not port.permanent_learn:
# delete all rules matching this port in all tables.
for table in self.all_valve_tables():
ofmsgs.append(self.valve_flowdel(
table,
self.valve_in_match(in_port=port_num)))
# delete eth_dst rules
ofmsgs.append(self.valve_flowdel(
self.dp.eth_dst_table,
out_port=port_num))
ofmsgs.append(parser.OFPBarrierRequest(None))
for vlan in self.dp.vlans.values():
if port_num in vlan.tagged or port_num in vlan.untagged:
ofmsgs.extend(self.build_flood_rules(vlan), modify=True)
return ofmsgs
def delete_host_from_vlan(self, eth_src, vlan):
ofmsgs = []
# delete any existing ofmsgs for this vlan/mac combination on the
# src mac table
ofmsgs.append(self.valve_flowdel(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan, eth_src=eth_src)))
# delete any existing ofmsgs for this vlan/mac combination on the dst
# mac table
ofmsgs.append(self.valve_flowdel(
self.dp.eth_dst_table,
self.valve_in_match(vlan=vlan, eth_dst=eth_src)))
ofmsgs.append(parser.OFPBarrierRequest(None))
return ofmsgs
def build_ethernet_pkt(self, eth_dst, in_port, vlan, ethertype):
pkt = packet.Packet()
if vlan.port_is_tagged(in_port):
eth_pkt = ethernet.ethernet(
eth_dst, self.FAUCET_MAC, ether.ETH_TYPE_8021Q)
vlan_pkt = packet_vlan.vlan(
vid=vlan.vid, ethertype=ethertype)
pkt.add_protocol(eth_pkt)
pkt.add_protocol(vlan_pkt)
else:
eth_pkt = ethernet.ethernet(
eth_dst, self.FAUCET_MAC, ethertype)
pkt.add_protocol(eth_pkt)
return pkt
def add_route(self, vlan, ip_gw, ip_dst):
ofmsgs = []
if ip_dst.version == 6:
routes = vlan.ipv6_routes
neighbor_cache = vlan.nd_cache
eth_type = ether.ETH_TYPE_IPV6
fib_table = self.dp.ipv6_fib_table
else:
routes = vlan.ipv4_routes
neighbor_cache = vlan.arp_cache
eth_type = ether.ETH_TYPE_IP
fib_table = self.dp.ipv4_fib_table
routes[ip_dst] = ip_gw
if ip_gw in neighbor_cache:
eth_dst = neighbor_cache[ip_gw].eth_src
ofmsgs.extend(
self.add_resolved_route(
eth_type=eth_type,
fib_table=fib_table,
vlan=vlan,
neighbor_cache=neighbor_cache,
ip_gw=ip_gw,
ip_dst=ip_dst,
eth_dst=eth_dst,
is_updated=False))
return ofmsgs
def del_route(self, vlan, ip_dst):
ofmsgs = []
if ip_dst.version == 6:
if ip_dst in vlan.ipv6_routes:
del vlan.ipv6_routes[ip_dst]
route_match = self.valve_in_match(
vlan=vlan, eth_type=ether.ETH_TYPE_IPV6, nw_dst=ip_dst)
ofmsgs.append(self.valve_flowdel(
self.dp.ipv6_fib_table, route_match))
else:
if ip_dst in vlan.ipv4_routes:
del vlan.ipv4_routes[ip_dst]
route_match = self.valve_in_match(
vlan=vlan, eth_type=ether.ETH_TYPE_IP, nw_dst=ip_dst)
ofmsgs.append(self.valve_flowdel(
self.dp.ipv4_fib_table, route_match))
return ofmsgs
def add_resolved_route(self, eth_type, fib_table, vlan, neighbor_cache,
ip_gw, ip_dst, eth_dst, is_updated=None):
ofmsgs = []
if is_updated is not None:
in_match = self.valve_in_match(
vlan=vlan, eth_type=eth_type, nw_dst=ip_dst)
prefixlen = ipaddr.IPNetwork(ip_dst).prefixlen
priority = self.dp.highest_priority + prefixlen
if is_updated:
self.logger.info(
'Updating next hop for route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
ofmsgs.append(self.valve_flowdel(
fib_table,
in_match,
priority=priority))
else:
self.logger.info(
'Adding new route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
ofmsgs.append(self.valve_flowmod(
fib_table,
in_match,
priority=priority,
inst=[self.apply_actions(
[self.set_eth_src(self.FAUCET_MAC),
self.set_eth_dst(eth_dst),
self.dec_ip_ttl()])] +
[self.goto_table(self.dp.eth_dst_table)]))
now = time.time()
link_neighbor = LinkNeighbor(eth_dst, now)
neighbor_cache[ip_gw] = link_neighbor
return ofmsgs
def control_plane_arp_handler(self, in_port, vlan, eth_src, arp_pkt):
ofmsgs = []
if arp_pkt.opcode == arp.ARP_REQUEST:
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_ARP)
arp_pkt = arp.arp(
opcode=arp.ARP_REPLY, src_mac=self.FAUCET_MAC,
src_ip=arp_pkt.dst_ip, dst_mac=eth_src, dst_ip=arp_pkt.src_ip)
pkt.add_protocol(arp_pkt)
pkt.serialize()
ofmsgs.append(self.valve_packetout(in_port, pkt.data))
self.logger.info(
'Responded to ARP request for %s from %s',
arp_pkt.src_ip, arp_pkt.dst_ip)
elif arp_pkt.opcode == arp.ARP_REPLY:
resolved_ip_gw = ipaddr.IPv4Address(arp_pkt.src_ip)
self.logger.info('ARP response %s for %s', eth_src, resolved_ip_gw)
is_updated = None
if resolved_ip_gw in vlan.arp_cache:
cached_eth_dst = vlan.arp_cache[resolved_ip_gw].eth_src
if cached_eth_dst != eth_src:
is_updated = True
else:
is_updated = False
for ip_dst, ip_gw in vlan.ipv4_routes.iteritems():
if ip_gw == resolved_ip_gw:
ofmsgs.extend(
self.add_resolved_route(
ether.ETH_TYPE_IP, self.dp.ipv4_fib_table,
vlan, vlan.arp_cache,
ip_gw, ip_dst, eth_src, is_updated))
return ofmsgs
def control_plane_icmp_handler(self, in_port, vlan, eth_src,
ipv4_pkt, icmp_pkt):
ofmsgs = []
if icmp_pkt is not None:
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_IP)
ipv4_pkt = ipv4.ipv4(
dst=ipv4_pkt.src, src=ipv4_pkt.dst, proto=ipv4_pkt.proto)
icmp_pkt = icmp.icmp(
type_=icmp.ICMP_ECHO_REPLY, code=icmp.ICMP_ECHO_REPLY_CODE,
data=icmp_pkt.data)
pkt.add_protocol(ipv4_pkt)
pkt.add_protocol(icmp_pkt)
pkt.serialize()
ofmsgs.append(self.valve_packetout(in_port, pkt.data))
return ofmsgs
def control_plane_icmpv6_handler(self, in_port, vlan, eth_src,
ipv6_pkt, icmpv6_pkt):
ofmsgs = []
pkt = self.build_ethernet_pkt(
eth_src, in_port, vlan, ether.ETH_TYPE_IPV6)
if icmpv6_pkt.type_ == icmpv6.ND_NEIGHBOR_SOLICIT:
dst = icmpv6_pkt.data.dst
ipv6_reply = ipv6.ipv6(
src=dst,
dst=ipv6_pkt.src,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=ipv6_pkt.hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=dst,
option=icmpv6.nd_option_tla(
hw_src=self.FAUCET_MAC), res=7))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
ofmsgs.extend([self.valve_packetout(in_port, pkt.data)])
elif icmpv6_pkt.type_ == icmpv6.ND_NEIGHBOR_ADVERT:
resolved_ip_gw = ipaddr.IPv6Address(icmpv6_pkt.data.dst)
self.logger.info('ND response %s for %s', eth_src, resolved_ip_gw)
is_updated = None
if resolved_ip_gw in vlan.nd_cache:
cached_eth_dst = vlan.nd_cache[resolved_ip_gw].eth_src
if cached_eth_dst != eth_src:
is_updated = True
else:
is_updated = False
for ip_dst, ip_gw in vlan.ipv6_routes.iteritems():
if ip_gw == resolved_ip_gw:
ofmsgs.extend(
self.add_resolved_route(
ether.ETH_TYPE_IPV6, self.dp.ipv6_fib_table,
vlan, vlan.nd_cache,
ip_gw, ip_dst, eth_src, is_updated))
elif icmpv6_pkt.type_ == icmpv6.ICMPV6_ECHO_REQUEST:
dst = ipv6_pkt.dst
ipv6_reply = ipv6.ipv6(
src=dst,
dst=ipv6_pkt.src,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=ipv6_pkt.hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REPLY,
data=icmpv6.echo(
id_=icmpv6_pkt.data.id,
seq=icmpv6_pkt.data.seq,
data=icmpv6_pkt.data.data))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
ofmsgs.extend([self.valve_packetout(in_port, pkt.data)])
return ofmsgs
@staticmethod
def to_faucet_ip(vlan, src_ip, dst_ip):
for controller_ip in vlan.controller_ips:
if src_ip in controller_ip or dst_ip in controller_ip:
return True
return False
def learn_host_on_vlan_port(self, port, vlan, eth_src):
ofmsgs = []
in_port = port.number
# hosts learned on this port never relearned
if port.permanent_learn:
learn_timeout = 0
# antispoof this host
ofmsgs.append(self.valve_flowdrop(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan, eth_src=eth_src),
priority=(self.dp.highest_priority - 2)))
else:
learn_timeout = self.dp.timeout
ofmsgs.extend(self.delete_host_from_vlan(eth_src, vlan))
mirror_acts = []
if in_port in self.dp.mirror_from_port:
mirror_port_num = self.dp.mirror_from_port[in_port]
mirror_acts = [parser.OFPActionOutput(mirror_port_num)]
# Update datapath to no longer send packets from this mac to controller
# note the use of hard_timeout here and idle_timeout for the dst table
# this is to ensure that the source rules will always be deleted before
# any rules on the dst table. Otherwise if the dst table rule expires
# but the src table rule is still being hit intermittantly the switch
# will flood packets to that dst and not realise it needs to relearn
# the rule
# NB: Must be lower than highest priority otherwise it can match
# flows destined to controller
ofmsgs.append(self.valve_flowmod(
self.dp.eth_src_table,
self.valve_in_match(in_port=in_port, vlan=vlan, eth_src=eth_src),
priority=(self.dp.highest_priority - 1),
inst=[self.goto_table(self.dp.eth_dst_table)],
hard_timeout=learn_timeout))
# update datapath to output packets to this mac via the associated port
if vlan.port_is_tagged(in_port):
dst_act = [parser.OFPActionOutput(in_port)]
else:
dst_act = [
parser.OFPActionPopVlan(),
parser.OFPActionOutput(in_port)]
if mirror_acts:
dst_act.extend(mirror_acts)
inst = [self.apply_actions(dst_act)]
ofmsgs.append(self.valve_flowmod(
self.dp.eth_dst_table,
self.valve_in_match(vlan=vlan, eth_dst=eth_src),
priority=self.dp.high_priority,
inst=inst,
idle_timeout=learn_timeout))
return ofmsgs
def handle_control_plane(self, in_port, vlan, eth_src, eth_dst, pkt):
ofmsgs = []
if eth_dst == self.FAUCET_MAC or not mac_addr_is_unicast(eth_dst):
arp_pkt = pkt.get_protocol(arp.arp)
ipv4_pkt = pkt.get_protocol(ipv4.ipv4)
ipv6_pkt = pkt.get_protocol(ipv6.ipv6)
if arp_pkt is not None:
src_ip = ipaddr.IPv4Address(arp_pkt.src_ip)
dst_ip = ipaddr.IPv4Address(arp_pkt.dst_ip)
if (arp_pkt.opcode == arp.ARP_REQUEST and
self.to_faucet_ip(vlan, src_ip, dst_ip)):
ofmsgs.extend(self.control_plane_arp_handler(
in_port, vlan, eth_src, arp_pkt))
elif (arp_pkt.opcode == arp.ARP_REPLY and
eth_dst == self.FAUCET_MAC):
ofmsgs.extend(self.control_plane_arp_handler(
in_port, vlan, eth_src, arp_pkt))
elif ipv4_pkt is not None:
icmp_pkt = pkt.get_protocol(icmp.icmp)
if icmp_pkt is not None:
src_ip = ipaddr.IPv4Address(ipv4_pkt.src)
dst_ip = ipaddr.IPv4Address(ipv4_pkt.dst)
if self.to_faucet_ip(vlan, src_ip, dst_ip):
ofmsgs.extend(self.control_plane_icmp_handler(
in_port, vlan, eth_src, ipv4_pkt, icmp_pkt))
elif ipv6_pkt is not None:
icmpv6_pkt = pkt.get_protocol(icmpv6.icmpv6)
if icmpv6_pkt is not None:
src_ip = ipaddr.IPv6Address(ipv6_pkt.src)
dst_ip = ipaddr.IPv6Address(ipv6_pkt.dst)
if self.to_faucet_ip(vlan, src_ip, dst_ip):
ofmsgs.extend(self.control_plane_icmpv6_handler(
in_port, vlan, eth_src, ipv6_pkt, icmpv6_pkt))
return ofmsgs
def known_up_dpid_and_port(self, dp_id, in_port):
if (not self.ignore_dpid(dp_id) and not self.ignore_port(in_port) and
self.dp.running and in_port in self.dp.ports):
return True
return False
def rcv_packet(self, dp_id, in_port, vlan_vid, pkt):
"""Generate openflow msgs to update datapath upon receipt of packet.
This involves asssociating the ethernet source address of the packet
with the given in_port (ethernet switching) ideally so that no packets
from this address are sent to the controller, and packets to this
address are output to in_port. This may not be fully possible depending
on the limitations of the datapath.
Depending on implementation this may involve updating a nw state db.
Arguments:
dp_id -- the unique id of the datapath that received the packet (64bit
int)
in_port -- the port number of the port that received the packet
vlan_vid -- the vlan_vid tagged to the packet.
pkt -- the packet send to us (Ryu ethernet object).
Returns
A list of flow mod messages to be sent to the datpath."""
if not self.known_up_dpid_and_port(dp_id, in_port):
return []
ofmsgs = []
eth_pkt = pkt.get_protocol(ethernet.ethernet)
eth_src = eth_pkt.src
eth_dst = eth_pkt.dst
vlan = self.dp.vlans[vlan_vid]
port = self.dp.ports[in_port]
if mac_addr_is_unicast(eth_src):
self.logger.debug(
'Packet_in dp_id: %x src:%s in_port:%d vid:%s',
dp_id, eth_src, in_port, vlan_vid)
ofmsgs.extend(self.handle_control_plane(
in_port, vlan, eth_src, eth_dst, pkt))
# ban learning new hosts if max_hosts reached on a VLAN.
if (vlan.max_hosts is not None and
len(vlan.host_cache) == vlan.max_hosts and
eth_src not in vlan.host_cache):
self.logger.info(
'max hosts %u reached on vlan %u, ' +
'temporarily banning learning on this vlan',
vlan.max_hosts, vlan.vid)
ofmsgs.extend([self.valve_flowdrop(
self.dp.eth_src_table,
self.valve_in_match(vlan=vlan),
priority=(self.dp.low_priority + 1),
hard_timeout=self.dp.timeout)])
else:
ofmsgs.extend(self.learn_host_on_vlan_port(
port, vlan, eth_src))
host_cache_entry = HostCacheEntry(
eth_src,
port.permanent_learn,
time.time())
vlan.host_cache[eth_src] = host_cache_entry
self.logger.info(
'learned %u hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
return ofmsgs
def reload_config(self, new_dp):
"""Reload the config from new_dp
KW Arguments:
new_dp -- A new DP object containing the updated config."""
ofmsgs = []
if self.dp.running:
self.dp = new_dp
ofmsgs = self.datapath_connect(
self.dp.dp_id, self.dp.ports.keys())
return ofmsgs
def arp_for_ip_gw(self, ip_gw, controller_ip, vlan, ports):
ofmsgs = []
if ports:
self.logger.info('Resolving %s', ip_gw)
arp_pkt = arp.arp(
opcode=arp.ARP_REQUEST, src_mac=self.FAUCET_MAC,
src_ip=str(controller_ip.ip), dst_mac=mac.DONTCARE_STR,
dst_ip=str(ip_gw))
port_num = ports[0].number
pkt = self.build_ethernet_pkt(
mac.BROADCAST_STR, port_num, vlan, ether.ETH_TYPE_ARP)
pkt.add_protocol(arp_pkt)
pkt.serialize()
for port in ports:
ofmsgs.append(self.valve_packetout(port.number, pkt.data))
return ofmsgs
@staticmethod
def ipv6_link_eth_mcast(ucast):
nd_mac_bytes = ipaddr.Bytes('\x33\x33') + ucast.packed[-4:]
nd_mac = ':'.join(['%02X' % ord(x) for x in nd_mac_bytes])
return nd_mac
@staticmethod
def ipv6_link_mcast_from_ucast(ucast):
link_mcast_prefix = ipaddr.IPv6Network('ff02::1:ff00:0/104')
mcast_bytes = ipaddr.Bytes(
link_mcast_prefix.packed[:13] + ucast.packed[-3:])
link_mcast = ipaddr.IPv6Address(mcast_bytes)
return link_mcast
def nd_solicit_ip_gw(self, ip_gw, controller_ip, vlan, ports):
ofmsgs = []
if ports:
self.logger.info('Resolving %s', ip_gw)
nd_mac = self.ipv6_link_eth_mcast(ip_gw)
ip_gw_mcast = self.ipv6_link_mcast_from_ucast(ip_gw)
port_num = ports[0].number
pkt = self.build_ethernet_pkt(
nd_mac, port_num, vlan, ether.ETH_TYPE_IPV6)
ipv6_pkt = ipv6.ipv6(
src=controller_ip.ip, dst=ip_gw_mcast, nxt=inet.IPPROTO_ICMPV6)
icmpv6_pkt = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=ip_gw,
option=icmpv6.nd_option_sla(hw_src=self.FAUCET_MAC)))
pkt.add_protocol(ipv6_pkt)
pkt.add_protocol(icmpv6_pkt)
pkt.serialize()
for port in ports:
ofmsgs.append(self.valve_packetout(port.number, pkt.data))
return ofmsgs
def resolve_gateways(self):
if not self.dp.running:
return []
ofmsgs = []
now = time.time()
for vlan in self.dp.vlans.itervalues():
untagged_ports = self.build_flood_ports_for_vlan(
vlan.untagged, None)
tagged_ports = self.build_flood_ports_for_vlan(
vlan.tagged, None)
for routes, neighbor_cache, neighbor_resolver in (
(vlan.ipv4_routes, vlan.arp_cache, self.arp_for_ip_gw),
(vlan.ipv6_routes, vlan.nd_cache, self.nd_solicit_ip_gw)):
for ip_gw in set(routes.values()):
for controller_ip in vlan.controller_ips:
if ip_gw in controller_ip:
cache_age = None
if ip_gw in neighbor_cache:
cache_time = neighbor_cache[ip_gw].cache_time
cache_age = now - cache_time
if (cache_age is None or
cache_age > self.dp.arp_neighbor_timeout):
for ports in untagged_ports, tagged_ports:
ofmsgs.extend(neighbor_resolver(
ip_gw, controller_ip, vlan, ports))
return ofmsgs
def host_expire(self):
if not self.dp.running:
return
now = time.time()
for vlan in self.dp.vlans.itervalues():
expired_hosts = []
for eth_src, host_cache_entry in vlan.host_cache.iteritems():
if not host_cache_entry.permanent:
host_cache_entry_age = now - host_cache_entry.cache_time
if host_cache_entry_age > self.dp.timeout:
expired_hosts.append(eth_src)
if expired_hosts:
for eth_src in expired_hosts:
del vlan.host_cache[eth_src]
self.logger.info(
'expiring host %s from vlan %u',
eth_src, vlan.vid)
self.logger.info(
'%u recently active hosts on vlan %u',
len(vlan.host_cache), vlan.vid)
class ArubaValve(Valve):
def switch_features(self, dp_id, msg):
ryu_table_loader = aruba.LoadRyuTables()
ryu_table_loader.load_tables(
os.path.join(aruba.CFG_PATH, 'aruba_pipeline.json'), parser)
ofmsgs = [parser.OFPTableFeaturesStatsRequest(
datapath=None,
body=ryu_table_loader.ryu_tables)]
return ofmsgs
|
from flask import Flask, jsonify, request, abort, make_response, render_template, url_for, redirect
from models import *
from app import db, app
import datetime, requests, json
from pprint import pprint
@app.route('/', methods = ['GET'])
def index():
return redirect (url_for('get_all_blobs'))
@app.route('/blob/', methods = ['GET'])
def get_all_blobs():
bl = Blob.query.all()
path = url_for('get_all_blobs', _external=True)
# Interate over files to fix displayable values
for b in bl:
b.filename = b.file_name()
b.size = b.file_size()
b.extension = b.icon_img()
return render_template("filedisplay.html",
files = bl,
download_url = path
)
@app.route('/blob/', methods =['POST'])
def upload_blob():
f = request.files['file']
fr = f.read()
b = Blob(item=fr, filename=f.filename, extension=f.content_type, size=len(fr), created_at=datetime.datetime.utcnow(), last_sync=datetime.datetime.utcnow())
db.session.add(b)
db.session.commit()
return redirect (url_for('get_all_blobs'))
@app.route('/blob/<int:id>', methods = ['PUT'])
def update_blob(id):
if request.files['file']:
b = Blob.query.get(id)
f = request.files['file']
b.item = f.read()
b.filename = f.filename
b.extension = f.content_type
b.size = len(f.read())
b.last_sync = datetime.datetime.utcnow()
db.session.add(b)
db.session.commit()
return jsonify ( { 'Blob': b.to_dict() } )
@app.route('/blob/<int:id>/', methods = ['GET'])
def download_blob(id):
b = Blob.query.get(id)
response = make_response(b.item)
response.headers['Content-Type'] = b.extension
response.headers['Content-Disposition'] = 'attachment; filename="%s"' % b.filename
return response
@app.route('/blob/<int:id>/', methods = ['DELETE'])
def delete_blob(id):
b = Blob.query.get(id)
db.session.delete(b)
db.session.commit()
return jsonify ( {'Deleted blob':id} )
# Register Node with MasterNode
@app.before_first_request
def initialize():
url = 'http://46.162.89.26:5000/' # IP for MasterNode server
ipaddr = ip_converter(url_for('index', _external=True))
payload = {'ip': ipaddr}
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
return ""
def ip_converter(server_url):
return str(server_url.split("/")[-2])
Node communication method added
from flask import Flask, jsonify, request, abort, make_response, render_template, url_for, redirect
from models import *
from app import db, app
import datetime, requests, json
from pprint import pprint
MASTER_URL = 'http://46.162.89.26:5000/' # API access point for MasterNode
nodelist = []
@app.route('/', methods = ['GET'])
def index():
return redirect (url_for('get_all_blobs'))
@app.route('/blob/', methods = ['GET'])
def get_all_blobs():
bl = Blob.query.all()
path = url_for('get_all_blobs', _external=True)
# Interate over files to fix displayable values
for b in bl:
b.filename = b.file_name()
b.size = b.file_size()
b.extension = b.icon_img()
return render_template("filedisplay.html",
files = bl,
download_url = path
)
@app.route('/blob/', methods =['POST'])
def upload_blob(json=0):
f = request.files['file']
fr = f.read()
b = Blob(item=fr, filename=f.filename, extension=f.content_type, size=len(fr), created_at=datetime.datetime.utcnow(), last_sync=datetime.datetime.utcnow())
db.session.add(b)
db.session.commit()
if json:
return jsonify ( { 'Blob': b.id} ), 200
else:
return redirect (url_for('get_all_blobs'))
#TODO: Fix this
@app.route('/blob/<int:id>', methods = ['PUT'])
def update_blob(id):
if request.files['file']:
b = Blob.query.get(id)
if b:
f = request.files['file']
b.item = f.read()
b.filename = f.filename
b.extension = f.content_type
b.size = len(f.read())
b.last_sync = datetime.datetime.utcnow()
db.session.add(b)
db.session.commit()
return jsonify ( { 'Blob': b.id } ), 200
else:
upload_blob(json=1)
@app.route('/blob/<int:id>/', methods = ['GET'])
def download_blob(id):
b = Blob.query.get(id)
response = make_response(b.item)
response.headers['Content-Type'] = b.extension
response.headers['Content-Disposition'] = 'attachment; filename="%s"' % b.filename
return response
@app.route('/blob/<int:id>/', methods = ['DELETE'])
def delete_blob(id):
b = Blob.query.get(id)
db.session.delete(b)
db.session.commit()
return jsonify ( {'Deleted blob':id} ), 200
# Register Node with MasterNode
@app.before_first_request
def initialize():
ipaddr = url_for('index', _external=True)
payload = {'ip': ipaddr}
headers = {'content-type': 'application/json'}
requests.post(MASTER_URL, data=json.dumps(payload), headers=headers)
update_nodelist()
# Fix the list of nodes in network(excluding self)
def update_nodelist():
nodeIP = url_for('index', _external=True)
r = requests.get(MASTER_URL)
r_json = convert(r.json())
for i in r_json['Nodes']:
if not i.get('ipaddr') == nodeIP:
nodelist.append(i.get('ipaddr'))
# Convert JSON results from unicode to utf-8
# Taken from: http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
def convert(input):
if isinstance(input, dict):
return {convert(key): convert(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
# This methods handles communication between nodes
# PARAMS: (str , int, str) -> REST method -> Which File -> Destination Node
def network_sync(method, fileID, node):
if method == 'POST':
url = node+'blob/'
f = Blob.query.get(fileID)
files = {'file':(f.filename, f.item)}
requests.post(url, files=files)
#TODO: Fix this
if method == 'PUT':
url = node+'blob/'+str(fileID)+'/'
f = Blob.query.get(fileID)
files = {'file':(f.filename, f.item)}
requests.put(url, files=files)
if method == 'DELETE':
url = node+'blob/'+str(fileID)+'/'
requests.delete(url)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import boto3
from configparser import ConfigParser, NoSectionError
from datetime import datetime
import logging
from os.path import expanduser, join
from os import environ
import requests
from warrant.aws_srp import AWSSRP
from amaascore.config import ENVIRONMENT, ENDPOINTS, CONFIGURATIONS
from amaascore.exceptions import AMaaSException
class AMaaSSession(object):
__shared_state = {}
def __init__(self, username, password, environment_config, logger, session_token=None):
if not AMaaSSession.__shared_state:
AMaaSSession.__shared_state = self.__dict__
self.refresh_period = 45 * 60 # minutes * seconds
self.username = username
self.password = password
self.tokens = None
self.session_token = session_token
self.last_authenticated = None
self.session = requests.Session()
self.client = boto3.client('cognito-idp', environment_config.cognito_region)
self.aws = AWSSRP(username=self.username, password=self.password, pool_id=environment_config.cognito_pool,
client_id=environment_config.cognito_client_id, client=self.client)
self.logger = logger
else:
self.__dict__ = AMaaSSession.__shared_state
if self.needs_refresh():
self.login()
def needs_refresh(self):
if not (self.last_authenticated and
(datetime.utcnow() - self.last_authenticated).seconds < self.refresh_period):
return True
else:
return False
def login(self):
if self.session_token:
self.logger.info("Skipping login since session token is provided.")
self.session.headers.update({'Authorization': self.session_token})
self.last_authenticated = datetime.utcnow()
else:
try:
self.logger.info("Attempting login for: %s", self.username)
self.tokens = self.aws.authenticate_user().get('AuthenticationResult')
self.logger.info("Login successful")
self.last_authenticated = datetime.utcnow()
self.session.headers.update({'Authorization': self.tokens.get('IdToken')})
except self.client.exceptions.NotAuthorizedException as e:
self.logger.info("Login failed")
self.logger.error(e.response.get('Error'))
self.last_authenticated = None
def put(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.put(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def post(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.post(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def delete(self, url, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.delete(url=url, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def get(self, url, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.get(url=url, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def patch(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.patch(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
class Interface(object):
"""
Currently this class doesn't do anything - but I anticipate it will be needed in the future.
"""
def __init__(self, endpoint_type, endpoint=None, environment=ENVIRONMENT, username=None, password=None,
config_filename=None, logger=None, session_token=None):
self.logger = logger or logging.getLogger(__name__)
self.config_filename = config_filename
self.endpoint_type = endpoint_type
self.environment = environment
self.environment_config = CONFIGURATIONS.get(environment)
self.endpoint = endpoint or self.get_endpoint()
self.json_header = {'Content-Type': 'application/json'}
username = username or environ.get('AMAAS_USERNAME') or self.read_config('username')
password = password or environ.get('AMAAS_PASSWORD') or self.read_config('password')
self.session = AMaaSSession(username, password, self.environment_config, self.logger, session_token)
self.logger.info('Interface Created')
def get_endpoint(self):
if self.environment == 'local':
return self.environment_config.base_url
if self.environment not in CONFIGURATIONS:
raise KeyError('Invalid environment specified.')
base_url = self.environment_config.base_url
endpoint = ENDPOINTS.get(self.endpoint_type)
api_version = self.environment_config.api_version
if not endpoint:
raise KeyError('Cannot find endpoint')
endpoint = '/'.join([base_url, api_version, endpoint])
self.logger.info("Using Endpoint: %s", endpoint)
return endpoint
@staticmethod
def generate_config_filename():
home = expanduser("~")
return join(home, '.amaas.cfg')
def read_config(self, option):
if self.config_filename is None:
self.config_filename = self.generate_config_filename()
parser = ConfigParser()
parser.read(self.config_filename)
try:
option = parser.get(section='auth', option=option)
except NoSectionError:
raise AMaaSException('Invalid AMaaS config file')
return option
made username and password optional in amaas session
from __future__ import absolute_import, division, print_function, unicode_literals
import boto3
from configparser import ConfigParser, NoSectionError
from datetime import datetime
import logging
from os.path import expanduser, join
from os import environ
import requests
from warrant.aws_srp import AWSSRP
from amaascore.config import ENVIRONMENT, ENDPOINTS, CONFIGURATIONS
from amaascore.exceptions import AMaaSException
class AMaaSSession(object):
__shared_state = {}
def __init__(self, environment_config, logger, username=None, password=None, session_token=None):
if not AMaaSSession.__shared_state:
AMaaSSession.__shared_state = self.__dict__
self.refresh_period = 45 * 60 # minutes * seconds
self.username = username
self.password = password
self.tokens = None
self.session_token = session_token
self.logger = logger
self.last_authenticated = None
self.session = requests.Session()
if not self.session_token:
self.client = boto3.client('cognito-idp', environment_config.cognito_region)
self.aws = AWSSRP(username=self.username, password=self.password,
pool_id=environment_config.cognito_pool,
client_id=environment_config.cognito_client_id, client=self.client)
else:
self.__dict__ = AMaaSSession.__shared_state
if self.needs_refresh():
self.login()
def needs_refresh(self):
if not (self.last_authenticated and
(datetime.utcnow() - self.last_authenticated).seconds < self.refresh_period):
return True
else:
return False
def login(self):
if self.session_token:
self.logger.info("Skipping login since session token is provided.")
self.session.headers.update({'Authorization': self.session_token})
self.last_authenticated = datetime.utcnow()
else:
try:
self.logger.info("Attempting login for: %s", self.username)
self.tokens = self.aws.authenticate_user().get('AuthenticationResult')
self.logger.info("Login successful")
self.last_authenticated = datetime.utcnow()
self.session.headers.update({'Authorization': self.tokens.get('IdToken')})
except self.client.exceptions.NotAuthorizedException as e:
self.logger.info("Login failed")
self.logger.error(e.response.get('Error'))
self.last_authenticated = None
def put(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.put(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def post(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.post(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def delete(self, url, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.delete(url=url, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def get(self, url, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.get(url=url, **kwargs)
else:
raise AMaaSException('Not Authenticated')
def patch(self, url, data=None, **kwargs):
# Add a refresh
if self.last_authenticated and not self.needs_refresh():
return self.session.patch(url=url, data=data, **kwargs)
else:
raise AMaaSException('Not Authenticated')
class Interface(object):
"""
Currently this class doesn't do anything - but I anticipate it will be needed in the future.
"""
def __init__(self, endpoint_type, endpoint=None, environment=ENVIRONMENT, username=None, password=None,
config_filename=None, logger=None, session_token=None):
self.logger = logger or logging.getLogger(__name__)
self.config_filename = config_filename
self.endpoint_type = endpoint_type
self.environment = environment
self.environment_config = CONFIGURATIONS.get(environment)
self.endpoint = endpoint or self.get_endpoint()
self.json_header = {'Content-Type': 'application/json'}
username = username or environ.get('AMAAS_USERNAME') or self.read_config('username')
password = password or environ.get('AMAAS_PASSWORD') or self.read_config('password')
self.session = AMaaSSession(username=username, password=password,
environment_config=self.environment_config,
logger=self.logger, session_token=session_token)
self.logger.info('Interface Created')
def get_endpoint(self):
if self.environment == 'local':
return self.environment_config.base_url
if self.environment not in CONFIGURATIONS:
raise KeyError('Invalid environment specified.')
base_url = self.environment_config.base_url
endpoint = ENDPOINTS.get(self.endpoint_type)
api_version = self.environment_config.api_version
if not endpoint:
raise KeyError('Cannot find endpoint')
endpoint = '/'.join([base_url, api_version, endpoint])
self.logger.info("Using Endpoint: %s", endpoint)
return endpoint
@staticmethod
def generate_config_filename():
home = expanduser("~")
return join(home, '.amaas.cfg')
def read_config(self, option):
if self.config_filename is None:
self.config_filename = self.generate_config_filename()
parser = ConfigParser()
parser.read(self.config_filename)
try:
option = parser.get(section='auth', option=option)
except NoSectionError:
raise AMaaSException('Invalid AMaaS config file')
return option
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Processing for baseline segmenter output
"""
import PIL
import logging
import warnings
import numpy as np
import shapely.geometry as geom
from PIL import Image, ImageDraw
from numpy.polynomial import Polynomial
from scipy.ndimage import label, black_tophat
from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d
from scipy.ndimage.morphology import grey_dilation
from scipy.spatial import distance_matrix, ConvexHull, Delaunay
from scipy.spatial.distance import cdist, pdist, squareform
from shapely.ops import nearest_points, unary_union
from skimage import draw
from skimage.filters import apply_hysteresis_threshold
from skimage.measure import approximate_polygon, find_contours
from skimage.morphology import skeletonize, watershed
from skimage.transform import PiecewiseAffineTransform, warp
from itertools import combinations
from collections import defaultdict, OrderedDict
from typing import List, Tuple, Optional, Generator, Union, Dict, Any, Sequence
from kraken.lib import morph, util
from kraken.binarization import nlbin
logger = logging.getLogger('kraken')
def reading_order(lines: Sequence, text_direction: str = 'lr') -> List:
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
logger.info('Compute reading order on {} lines in {} direction'.format(len(lines), text_direction))
order = np.zeros((len(lines), len(lines)), 'B')
def _x_overlaps(u, v):
return u[1].start < v[1].stop and u[1].stop > v[1].start
def _above(u, v):
return u[0].start < v[0].start
def _left_of(u, v):
return u[1].stop < v[1].start
def _separates(w, u, v):
if w[0].stop < min(u[0].start, v[0].start):
return 0
if w[0].start > max(u[0].stop, v[0].stop):
return 0
if w[1].start < u[1].stop and w[1].stop > v[1].start:
return 1
return 0
if text_direction == 'rl':
def horizontal_order(u, v):
return not _left_of(u, v)
else:
horizontal_order = _left_of
for i, u in enumerate(lines):
for j, v in enumerate(lines):
if _x_overlaps(u, v):
if _above(u, v):
order[i, j] = 1
else:
if [w for w in lines if _separates(w, u, v)] == []:
if horizontal_order(u, v):
order[i, j] = 1
return order
def topsort(order: np.array) -> np.array:
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
logger.info('Perform topological sort on partially ordered lines')
n = len(order)
visited = np.zeros(n)
L = []
def _visit(k):
if visited[k]:
return
visited[k] = 1
a, = np.nonzero(np.ravel(order[:, k]))
for l in a:
_visit(l)
L.append(k)
for k in range(n):
_visit(k)
return L
def denoising_hysteresis_thresh(im, low, high, sigma):
im = gaussian_filter(im, sigma)
return apply_hysteresis_threshold(im, low, high)
def _find_superpixels(skeleton, heatmap, min_sp_dist):
logger.debug('Finding superpixels')
conf_map = heatmap * skeleton
sp_idx = np.unravel_index(np.argsort(1.-conf_map, axis=None), conf_map.shape)
if not sp_idx[0].any():
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
zeroes_idx = conf_map[sp_idx].argmin()
if not zeroes_idx:
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
sp_idx = sp_idx[0][:zeroes_idx], sp_idx[1][:zeroes_idx]
sp_can = [(sp_idx[0][0], sp_idx[1][0])]
for x in range(len(sp_idx[0])):
loc = np.array([[sp_idx[0][x], sp_idx[1][x]]])
if min(cdist(sp_can, loc)) > min_sp_dist:
sp_can.extend(loc.tolist())
return np.array(sp_can)
def _compute_sp_states(sp_can, bl_map, sep_map):
"""
Estimates the superpixel state information.
"""
logger.debug('Triangulating superpixels')
tri = Delaunay(sp_can, qhull_options="QJ Pp")
indices, indptr = tri.vertex_neighbor_vertices
# dict mapping each edge to its intensity. Needed for subsequent clustering step.
intensities = {}
# radius of circular environment around SP for ILD estimation
logger.debug('Computing superpixel state information')
for vertex in range(len(sp_can)):
# look up neighboring indices
neighbors = tri.points[indptr[indices[vertex]:indices[vertex+1]]]
# calculate intensity of line segments to neighbors in both bl map and separator map
intensity = []
for nb in neighbors.astype('int'):
key = [tuple(sp_can[vertex]), tuple(nb)]
key.sort()
key = tuple(key)
line_locs = draw.line(*(key[0] + key[1]))
intensities[key] = (bl_map[line_locs].mean(), bl_map[line_locs].var(), sep_map[line_locs].mean(), sep_map[line_locs].max())
intensity.append(intensities[key][0])
# filter edges in triangulation
for k, v in list(intensities.items()):
if v[0] < 0.4:
del intensities[k]
continue
if v[1] > 5e-02:
del intensities[k]
continue
# filter edges with high separator affinity
if v[2] > 0.125 or v[3] > 0.25 or v[0] < 0.5:
del intensities[k]
continue
return intensities
def _cluster_lines(intensities):
"""
Clusters lines according to their intensities.
"""
edge_list = list(intensities.keys())
def _point_in_cluster(p):
for idx, cluster in enumerate(clusters[1:]):
if p in [point for edge in cluster for point in edge]:
return idx+1
return 0
# cluster
logger.debug('Computing clusters')
n = 0
clusters = [edge_list]
while len(edge_list) != n:
n = len(edge_list)
for edge in edge_list:
cl_p0 = _point_in_cluster(edge[0])
cl_p1 = _point_in_cluster(edge[1])
# new cluster casea
if not cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters.append([edge])
# extend case
elif cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters[cl_p0].append(edge)
elif cl_p1 and not cl_p0:
edge_list.remove(edge)
clusters[cl_p1].append(edge)
# merge case
elif cl_p0 != cl_p1 and cl_p0 and cl_p1:
edge_list.remove(edge)
clusters[min(cl_p0, cl_p1)].extend(clusters.pop(max(cl_p0, cl_p1)))
clusters[min(cl_p0, cl_p1)].append(edge)
return clusters
def _interpolate_lines(clusters):
"""
Interpolates the baseline clusters and adds polygonal information.
"""
logger.debug('Reticulating splines')
lines = []
for cluster in clusters[1:]:
points = sorted(set(point for edge in cluster for point in edge), key=lambda x: x[1])
x = [x[1] for x in points]
y = [x[0] for x in points]
# very short lines might not have enough superpixels to ensure a well-conditioned regression
deg = min(len(x)-1, 3)
poly = Polynomial.fit(x, y, deg=deg)
deriv = poly.deriv()
xp, yp = poly.linspace(max(np.diff(poly.domain)//deg, 2))
xp = xp.astype('int')
yp = yp.astype('int')
lines.append(list(zip(yp, xp)))
return lines
def vectorize_lines(im: np.ndarray, threshold: float = 0.2, min_sp_dist: int = 10):
"""
Vectorizes lines from a binarized array.
Args:
im (np.ndarray): Array of shape (3, H, W) with the first dimension
being a probability distribution over (background,
baseline, separators).
Returns:
[[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
A list of lists containing the points of all baseline polylines.
"""
# split into baseline and separator map
bl_map = im[1]
sep_map = im[2]
# binarize
bin = im > threshold
skel = skeletonize(bin[1])
sp_can = _find_superpixels(skel, heatmap=bl_map, min_sp_dist=min_sp_dist)
if not sp_can.size:
logger.warning('No superpixel candidates found in network output. Likely empty page.')
return []
intensities = _compute_sp_states(sp_can, bl_map, sep_map)
clusters = _cluster_lines(intensities)
lines = _interpolate_lines(clusters)
return lines
def calculate_polygonal_environment(im: PIL.Image.Image, baselines: Sequence[Tuple[int, int]]):
"""
Given a list of baselines and an input image, calculates a polygonal
environment around each baseline.
Args:
im (PIL.Image): Input image
baselines (sequence): List of lists containing a single baseline per
entry.
bl_mask (numpy.array): Optional raw baselines output maps from the
recognition net.
Returns:
List of tuples (polygonization, baseline) where each is a list of coordinates.
"""
bounds = np.array(im.size, dtype=np.float)
im = np.array(im)
# compute tophat features of input image
im_feats = black_tophat(im, 3)
def _ray_intersect_boundaries(ray, direction, aabb):
"""
Simplified version of [0] for 2d and AABB anchored at (0,0).
[0] http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
dir_fraction = np.empty(2, dtype=ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (-ray[0]) * dir_fraction[0]
t2 = (aabb[0] - ray[0]) * dir_fraction[0]
t3 = (-ray[1]) * dir_fraction[1]
t4 = (aabb[1] - ray[1]) * dir_fraction[1]
tmin = max(min(t1, t2), min(t3, t4))
tmax = min(max(t1, t2), max(t3, t4))
t = min(x for x in [tmin, tmax] if x >= 0)
return ray + (direction * t)
def _extract_patch(env_up, env_bottom, baseline):
"""
Calculate a line image batch from a ROI and the original baseline
"""
markers = np.zeros(bounds.astype('int')[::-1], dtype=np.int)
for l in zip(baseline[:-1], baseline[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 2
for l in zip(env_up[:-1], env_up[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
for l in zip(env_bottom[:-1], env_bottom[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
markers = grey_dilation(markers, size=3)
full_polygon = np.concatenate((env_up, env_bottom[::-1]))
r, c = draw.polygon(full_polygon[:,0], full_polygon[:,1])
mask = np.zeros(bounds.astype('int')[::-1], dtype=np.bool)
mask[c, r] = True
patch = im_feats.copy()
patch[mask != True] = 0
coords = np.argwhere(mask)
r_min, c_min = coords.min(axis=0)
r_max, c_max = coords.max(axis=0)
patch = patch[r_min:r_max+1, c_min:c_max+1]
markers = markers[r_min:r_max+1, c_min:c_max+1]
mask = mask[r_min:r_max+1, c_min:c_max+1]
# run watershed
ws = watershed(patch, markers, 8, mask=mask)
ws = grey_dilation(ws, size=3)
# pad output to ensure contour is closed
ws = np.pad(ws, 1)
# find contour of central basin
contours = find_contours(ws, 1.5, fully_connected='high')
contour = np.array(unary_union([geom.Polygon(contour.tolist()) for contour in contours]).boundary, dtype='uint')
## approximate + remove offsets + transpose
contour = np.transpose((approximate_polygon(contour, 5)-1+(r_min, c_min)), (0, 1)).astype('uint')
return contour.tolist()
polygons = []
for idx, line in enumerate(baselines):
# find intercepts with image bounds on each side of baseline
lr = np.array(line[:2], dtype=np.float)
lr_dir = lr[1] - lr[0]
lr_dir = (lr_dir.T / np.sqrt(np.sum(lr_dir**2,axis=-1)))
lr_up_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(-1,1))[::-1], bounds).astype('int')
lr_bottom_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(1,-1))[::-1], bounds).astype('int')
rr = np.array(line[-2:], dtype=np.float)
rr_dir = rr[1] - rr[0]
rr_dir = (rr_dir.T / np.sqrt(np.sum(rr_dir**2,axis=-1)))
rr_up_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(-1,1))[::-1], bounds).astype('int')
rr_bottom_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(1,-1))[::-1], bounds).astype('int')
# build polygon between baseline and bbox intersects
upper_polygon = geom.Polygon([lr_up_intersect.tolist()] + line + [rr_up_intersect.tolist()])
bottom_polygon = geom.Polygon([lr_bottom_intersect.tolist()] + line + [rr_bottom_intersect.tolist()])
# select baselines at least partially in each polygon
side_a = [geom.LineString([lr_up_intersect.tolist(), rr_up_intersect.tolist()])]
side_b = [geom.LineString([lr_bottom_intersect.tolist(), rr_bottom_intersect.tolist()])]
for adj_line in baselines[:idx] + baselines[idx+1:]:
adj_line = geom.LineString(adj_line)
if upper_polygon.intersects(adj_line):
side_a.append(adj_line)
elif bottom_polygon.intersects(adj_line):
side_b.append(adj_line)
side_a = unary_union(side_a)
side_b = unary_union(side_b)
env_up = []
env_bottom = []
# find nearest points from baseline to previously selected baselines
for point in line:
_, upper_limit = nearest_points(geom.Point(point), side_a)
_, bottom_limit = nearest_points(geom.Point(point), side_b)
env_up.extend(list(upper_limit.coords))
env_bottom.extend(list(bottom_limit.coords))
env_up = np.array(env_up, dtype='uint')
env_bottom = np.array(env_bottom, dtype='uint')
polygons.append(_extract_patch(env_up, env_bottom, line))
return polygons
def polygonal_reading_order(lines: Sequence[Tuple[List, List]], text_direction: str = 'lr') -> Sequence[Tuple[List, List]]:
"""
Given a list of baselines, calculates the correct reading order and applies
it to the input.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
text_direction (str): Set principal text direction for column ordering.
Can be 'lr' or 'rl'
Returns:
A reordered input.
"""
bounds = []
for line in lines:
l = geom.LineString(line[0]).bounds
bounds.append((slice(l[0], l[1]), slice(l[2], l[3])))
order = reading_order(bounds, text_direction)
lsort = topsort(order)
return [lines[i] for i in lsort]
def scale_polygonal_lines(lines: Sequence[Tuple[List, List]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]:
"""
Scales baselines/polygon coordinates by a certain factor.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
scale (float or tuple of floats): Scaling factor
"""
if isinstance(scale, float):
scale = (scale, scale)
scaled_lines = []
for line in lines:
bl, pl = line
scaled_lines.append(((np.array(bl) * scale).astype('int').tolist(),
(np.array(pl) * scale).astype('int').tolist()))
return scaled_lines
def _test_intersect(bp, uv, bs):
"""
Returns the intersection points of a ray with direction `uv` from
`bp` with a polygon `bs`.
"""
u = bp - np.roll(bs, 2)
v = bs - np.roll(bs, 2)
points = []
for dir in ((1,-1), (-1,1)):
w = (uv * dir * (1,-1))[::-1]
z = np.dot(v, w)
t1 = np.cross(v, u) / z
t2 = np.dot(u, w) / z
t1 = t1[np.logical_and(t2 >= 0.0, t2 <= 1.0)]
points.extend(bp + (t1[np.where(t1 >= 0)[0].min()] * (uv * dir)))
return np.array(points)
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
"""
Yields the subimages of image im defined in the list of bounding polygons
with baselines preserving order.
Args:
im (PIL.Image.Image): Input image
bounds (list): A list of tuples (x1, y1, x2, y2)
Yields:
(PIL.Image) the extracted subimage
"""
if 'type' in bounds and bounds['type'] == 'baselines':
old_settings = np.seterr(all='ignore')
siz = im.size
white = Image.new(im.mode, siz)
for line in bounds['lines']:
mask = Image.new('1', siz, 0)
draw = ImageDraw.Draw(mask)
draw.polygon([tuple(x) for x in line['boundary']], outline=1, fill=1)
masked_line = Image.composite(im, white, mask)
bl = np.array(line['baseline'])
ls = np.dstack((bl[:-1:], bl[1::]))
bisect_points = np.mean(ls, 2)
norm_vec = (ls[...,1] - ls[...,0])[:,::-1]
norm_vec_len = np.sqrt(np.sum(norm_vec**2, axis=1))
unit_vec = norm_vec / np.tile(norm_vec_len, (2, 1)).T # without
# multiplication
# with (1,-1)-upper/
# (-1, 1)-lower
bounds = np.array(line['boundary'])
src_points = np.stack([_test_intersect(bp, uv, bounds) for bp, uv in zip(bisect_points, unit_vec)])
upper_dist = np.diag(distance_matrix(src_points[:,:2], bisect_points))
upper_dist = np.dstack((np.zeros_like(upper_dist), upper_dist)).squeeze(0)
lower_dist = np.diag(distance_matrix(src_points[:,2:], bisect_points))
lower_dist = np.dstack((np.zeros_like(lower_dist), lower_dist)).squeeze(0)
# map baseline points to straight baseline
bl_dists = np.cumsum(np.diag(np.roll(squareform(pdist(bl)), 1)))
bl_dst_pts = bl[0] + np.dstack((bl_dists, np.zeros_like(bl_dists))).squeeze(0)
rect_bisect_pts = np.mean(np.dstack((bl_dst_pts[:-1:], bl_dst_pts[1::])), 2)
upper_dst_pts = rect_bisect_pts - upper_dist
lower_dst_pts = rect_bisect_pts + lower_dist
src_points = np.concatenate((bl, src_points[:,:2], src_points[:,2:]))
dst_points = np.concatenate((bl_dst_pts, upper_dst_pts, lower_dst_pts))
tform = PiecewiseAffineTransform()
tform.estimate(src_points, dst_points)
i = Image.fromarray((warp(masked_line, tform) * 255).astype('uint8'))
yield i.crop(i.getbbox()), line
else:
if bounds['text_direction'].startswith('vertical'):
angle = 90
else:
angle = 0
for box in bounds['boxes']:
if isinstance(box, tuple):
box = list(box)
if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
box[1::2] > [im.size[1], im.size[1]]):
logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
raise KrakenInputException('Line outside of image bounds')
yield im.crop(box).rotate(angle, expand=True), box
fix off-by-one error in intersect computation
# -*- coding: utf-8 -*-
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Processing for baseline segmenter output
"""
import PIL
import logging
import warnings
import numpy as np
import shapely.geometry as geom
from PIL import Image, ImageDraw
from numpy.polynomial import Polynomial
from scipy.ndimage import label, black_tophat
from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d
from scipy.ndimage.morphology import grey_dilation
from scipy.spatial import distance_matrix, ConvexHull, Delaunay
from scipy.spatial.distance import cdist, pdist, squareform
from shapely.ops import nearest_points, unary_union
from skimage import draw
from skimage.filters import apply_hysteresis_threshold
from skimage.measure import approximate_polygon, find_contours
from skimage.morphology import skeletonize, watershed
from skimage.transform import PiecewiseAffineTransform, warp
from itertools import combinations
from collections import defaultdict, OrderedDict
from typing import List, Tuple, Optional, Generator, Union, Dict, Any, Sequence
from kraken.lib import morph, util
from kraken.binarization import nlbin
logger = logging.getLogger('kraken')
def reading_order(lines: Sequence, text_direction: str = 'lr') -> List:
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
logger.info('Compute reading order on {} lines in {} direction'.format(len(lines), text_direction))
order = np.zeros((len(lines), len(lines)), 'B')
def _x_overlaps(u, v):
return u[1].start < v[1].stop and u[1].stop > v[1].start
def _above(u, v):
return u[0].start < v[0].start
def _left_of(u, v):
return u[1].stop < v[1].start
def _separates(w, u, v):
if w[0].stop < min(u[0].start, v[0].start):
return 0
if w[0].start > max(u[0].stop, v[0].stop):
return 0
if w[1].start < u[1].stop and w[1].stop > v[1].start:
return 1
return 0
if text_direction == 'rl':
def horizontal_order(u, v):
return not _left_of(u, v)
else:
horizontal_order = _left_of
for i, u in enumerate(lines):
for j, v in enumerate(lines):
if _x_overlaps(u, v):
if _above(u, v):
order[i, j] = 1
else:
if [w for w in lines if _separates(w, u, v)] == []:
if horizontal_order(u, v):
order[i, j] = 1
return order
def topsort(order: np.array) -> np.array:
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
logger.info('Perform topological sort on partially ordered lines')
n = len(order)
visited = np.zeros(n)
L = []
def _visit(k):
if visited[k]:
return
visited[k] = 1
a, = np.nonzero(np.ravel(order[:, k]))
for l in a:
_visit(l)
L.append(k)
for k in range(n):
_visit(k)
return L
def denoising_hysteresis_thresh(im, low, high, sigma):
im = gaussian_filter(im, sigma)
return apply_hysteresis_threshold(im, low, high)
def _find_superpixels(skeleton, heatmap, min_sp_dist):
logger.debug('Finding superpixels')
conf_map = heatmap * skeleton
sp_idx = np.unravel_index(np.argsort(1.-conf_map, axis=None), conf_map.shape)
if not sp_idx[0].any():
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
zeroes_idx = conf_map[sp_idx].argmin()
if not zeroes_idx:
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
sp_idx = sp_idx[0][:zeroes_idx], sp_idx[1][:zeroes_idx]
sp_can = [(sp_idx[0][0], sp_idx[1][0])]
for x in range(len(sp_idx[0])):
loc = np.array([[sp_idx[0][x], sp_idx[1][x]]])
if min(cdist(sp_can, loc)) > min_sp_dist:
sp_can.extend(loc.tolist())
return np.array(sp_can)
def _compute_sp_states(sp_can, bl_map, sep_map):
"""
Estimates the superpixel state information.
"""
logger.debug('Triangulating superpixels')
tri = Delaunay(sp_can, qhull_options="QJ Pp")
indices, indptr = tri.vertex_neighbor_vertices
# dict mapping each edge to its intensity. Needed for subsequent clustering step.
intensities = {}
# radius of circular environment around SP for ILD estimation
logger.debug('Computing superpixel state information')
for vertex in range(len(sp_can)):
# look up neighboring indices
neighbors = tri.points[indptr[indices[vertex]:indices[vertex+1]]]
# calculate intensity of line segments to neighbors in both bl map and separator map
intensity = []
for nb in neighbors.astype('int'):
key = [tuple(sp_can[vertex]), tuple(nb)]
key.sort()
key = tuple(key)
line_locs = draw.line(*(key[0] + key[1]))
intensities[key] = (bl_map[line_locs].mean(), bl_map[line_locs].var(), sep_map[line_locs].mean(), sep_map[line_locs].max())
intensity.append(intensities[key][0])
logger.debug('Filtering triangulation')
# filter edges in triangulation
for k, v in list(intensities.items()):
if v[0] < 0.4:
del intensities[k]
continue
if v[1] > 5e-02:
del intensities[k]
continue
# filter edges with high separator affinity
if v[2] > 0.125 or v[3] > 0.25 or v[0] < 0.5:
del intensities[k]
continue
return intensities
def _cluster_lines(intensities):
"""
Clusters lines according to their intensities.
"""
edge_list = list(intensities.keys())
def _point_in_cluster(p):
for idx, cluster in enumerate(clusters[1:]):
if p in [point for edge in cluster for point in edge]:
return idx+1
return 0
# cluster
logger.debug('Computing clusters')
n = 0
clusters = [edge_list]
while len(edge_list) != n:
n = len(edge_list)
for edge in edge_list:
cl_p0 = _point_in_cluster(edge[0])
cl_p1 = _point_in_cluster(edge[1])
# new cluster casea
if not cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters.append([edge])
# extend case
elif cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters[cl_p0].append(edge)
elif cl_p1 and not cl_p0:
edge_list.remove(edge)
clusters[cl_p1].append(edge)
# merge case
elif cl_p0 != cl_p1 and cl_p0 and cl_p1:
edge_list.remove(edge)
clusters[min(cl_p0, cl_p1)].extend(clusters.pop(max(cl_p0, cl_p1)))
clusters[min(cl_p0, cl_p1)].append(edge)
return clusters
def _interpolate_lines(clusters):
"""
Interpolates the baseline clusters and adds polygonal information.
"""
logger.debug('Reticulating splines')
lines = []
for cluster in clusters[1:]:
points = sorted(set(point for edge in cluster for point in edge), key=lambda x: x[1])
x = [x[1] for x in points]
y = [x[0] for x in points]
# very short lines might not have enough superpixels to ensure a well-conditioned regression
deg = min(len(x)-1, 3)
poly = Polynomial.fit(x, y, deg=deg)
deriv = poly.deriv()
xp, yp = poly.linspace(max(np.diff(poly.domain)//deg, 2))
xp = xp.astype('int')
yp = yp.astype('int')
lines.append(list(zip(xp, yp)))
return lines
def vectorize_lines(im: np.ndarray, threshold: float = 0.2, min_sp_dist: int = 10):
"""
Vectorizes lines from a binarized array.
Args:
im (np.ndarray): Array of shape (3, H, W) with the first dimension
being a probability distribution over (background,
baseline, separators).
Returns:
[[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
A list of lists containing the points of all baseline polylines.
"""
# split into baseline and separator map
bl_map = im[1]
sep_map = im[2]
# binarize
bin = im > threshold
skel = skeletonize(bin[1])
sp_can = _find_superpixels(skel, heatmap=bl_map, min_sp_dist=min_sp_dist)
if not sp_can.size:
logger.warning('No superpixel candidates found in network output. Likely empty page.')
return []
intensities = _compute_sp_states(sp_can, bl_map, sep_map)
clusters = _cluster_lines(intensities)
lines = _interpolate_lines(clusters)
return lines
def calculate_polygonal_environment(im: PIL.Image.Image, baselines: Sequence[Tuple[int, int]]):
"""
Given a list of baselines and an input image, calculates a polygonal
environment around each baseline.
Args:
im (PIL.Image): Input image
baselines (sequence): List of lists containing a single baseline per
entry.
bl_mask (numpy.array): Optional raw baselines output maps from the
recognition net.
Returns:
List of tuples (polygonization, baseline) where each is a list of coordinates.
"""
bounds = np.array(im.size, dtype=np.float)
im = np.array(im)
# compute tophat features of input image
im_feats = black_tophat(im, 3)
def _ray_intersect_boundaries(ray, direction, aabb):
"""
Simplified version of [0] for 2d and AABB anchored at (0,0).
[0] http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
dir_fraction = np.empty(2, dtype=ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (-ray[0]) * dir_fraction[0]
t2 = (aabb[0] - ray[0]) * dir_fraction[0]
t3 = (-ray[1]) * dir_fraction[1]
t4 = (aabb[1] - ray[1]) * dir_fraction[1]
tmin = max(min(t1, t2), min(t3, t4))
tmax = min(max(t1, t2), max(t3, t4))
t = min(x for x in [tmin, tmax] if x >= 0)
return ray + (direction * t)
def _extract_patch(env_up, env_bottom, baseline):
"""
Calculate a line image batch from a ROI and the original baseline
"""
markers = np.zeros(bounds.astype('int')[::-1], dtype=np.int)
for l in zip(baseline[:-1], baseline[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 2
for l in zip(env_up[:-1], env_up[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
for l in zip(env_bottom[:-1], env_bottom[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
markers = grey_dilation(markers, size=3)
full_polygon = np.concatenate((env_up, env_bottom[::-1]))
r, c = draw.polygon(full_polygon[:,0], full_polygon[:,1])
mask = np.zeros(bounds.astype('int')[::-1], dtype=np.bool)
mask[c, r] = True
patch = im_feats.copy()
patch[mask != True] = 0
coords = np.argwhere(mask)
r_min, c_min = coords.min(axis=0)
r_max, c_max = coords.max(axis=0)
patch = patch[r_min:r_max+1, c_min:c_max+1]
markers = markers[r_min:r_max+1, c_min:c_max+1]
mask = mask[r_min:r_max+1, c_min:c_max+1]
# run watershed
ws = watershed(patch, markers, 8, mask=mask)
ws = grey_dilation(ws, size=3)
# pad output to ensure contour is closed
ws = np.pad(ws, 1)
# find contour of central basin
contours = find_contours(ws, 1.5, fully_connected='high')
contour = np.array(unary_union([geom.Polygon(contour.tolist()) for contour in contours]).boundary, dtype='uint')
## approximate + remove offsets + transpose
contour = np.transpose((approximate_polygon(contour, 5)-1+(r_min, c_min)), (0, 1)).astype('uint')
return contour.tolist()
polygons = []
for idx, line in enumerate(baselines):
# find intercepts with image bounds on each side of baseline
lr = np.array(line[:2], dtype=np.float)
lr_dir = lr[1] - lr[0]
lr_dir = (lr_dir.T / np.sqrt(np.sum(lr_dir**2,axis=-1)))
lr_up_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(-1,1))[::-1], bounds-1).astype('int')
lr_bottom_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(1,-1))[::-1], bounds-1).astype('int')
rr = np.array(line[-2:], dtype=np.float)
rr_dir = rr[1] - rr[0]
rr_dir = (rr_dir.T / np.sqrt(np.sum(rr_dir**2,axis=-1)))
rr_up_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(-1,1))[::-1], bounds-1).astype('int')
rr_bottom_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(1,-1))[::-1], bounds-1).astype('int')
# build polygon between baseline and bbox intersects
upper_polygon = geom.Polygon([lr_up_intersect.tolist()] + line + [rr_up_intersect.tolist()])
bottom_polygon = geom.Polygon([lr_bottom_intersect.tolist()] + line + [rr_bottom_intersect.tolist()])
# select baselines at least partially in each polygon
side_a = [geom.LineString([lr_up_intersect.tolist(), rr_up_intersect.tolist()])]
side_b = [geom.LineString([lr_bottom_intersect.tolist(), rr_bottom_intersect.tolist()])]
for adj_line in baselines[:idx] + baselines[idx+1:]:
adj_line = geom.LineString(adj_line)
if upper_polygon.intersects(adj_line):
side_a.append(adj_line)
elif bottom_polygon.intersects(adj_line):
side_b.append(adj_line)
side_a = unary_union(side_a)
side_b = unary_union(side_b)
env_up = []
env_bottom = []
# find nearest points from baseline to previously selected baselines
for point in line:
_, upper_limit = nearest_points(geom.Point(point), side_a)
_, bottom_limit = nearest_points(geom.Point(point), side_b)
env_up.extend(list(upper_limit.coords))
env_bottom.extend(list(bottom_limit.coords))
env_up = np.array(env_up, dtype='uint')
env_bottom = np.array(env_bottom, dtype='uint')
print(env_up)
print(env_bottom)
polygons.append(_extract_patch(env_up, env_bottom, line))
return polygons
def polygonal_reading_order(lines: Sequence[Tuple[List, List]], text_direction: str = 'lr') -> Sequence[Tuple[List, List]]:
"""
Given a list of baselines, calculates the correct reading order and applies
it to the input.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
text_direction (str): Set principal text direction for column ordering.
Can be 'lr' or 'rl'
Returns:
A reordered input.
"""
bounds = []
for line in lines:
l = geom.LineString(line[0]).bounds
bounds.append((slice(l[0], l[1]), slice(l[2], l[3])))
order = reading_order(bounds, text_direction)
lsort = topsort(order)
return [lines[i] for i in lsort]
def scale_polygonal_lines(lines: Sequence[Tuple[List, List]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]:
"""
Scales baselines/polygon coordinates by a certain factor.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
scale (float or tuple of floats): Scaling factor
"""
if isinstance(scale, float):
scale = (scale, scale)
scaled_lines = []
for line in lines:
bl, pl = line
scaled_lines.append(((np.array(bl) * scale).astype('int').tolist(),
(np.array(pl) * scale).astype('int').tolist()))
return scaled_lines
def _test_intersect(bp, uv, bs):
"""
Returns the intersection points of a ray with direction `uv` from
`bp` with a polygon `bs`.
"""
u = bp - np.roll(bs, 2)
v = bs - np.roll(bs, 2)
points = []
for dir in ((1,-1), (-1,1)):
w = (uv * dir * (1,-1))[::-1]
z = np.dot(v, w)
t1 = np.cross(v, u) / z
t2 = np.dot(u, w) / z
t1 = t1[np.logical_and(t2 >= 0.0, t2 <= 1.0)]
points.extend(bp + (t1[np.where(t1 >= 0)[0].min()] * (uv * dir)))
return np.array(points)
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
"""
Yields the subimages of image im defined in the list of bounding polygons
with baselines preserving order.
Args:
im (PIL.Image.Image): Input image
bounds (list): A list of tuples (x1, y1, x2, y2)
Yields:
(PIL.Image) the extracted subimage
"""
if 'type' in bounds and bounds['type'] == 'baselines':
old_settings = np.seterr(all='ignore')
siz = im.size
white = Image.new(im.mode, siz)
for line in bounds['lines']:
mask = Image.new('1', siz, 0)
draw = ImageDraw.Draw(mask)
draw.polygon([tuple(x) for x in line['boundary']], outline=1, fill=1)
masked_line = Image.composite(im, white, mask)
bl = np.array(line['baseline'])
ls = np.dstack((bl[:-1:], bl[1::]))
bisect_points = np.mean(ls, 2)
norm_vec = (ls[...,1] - ls[...,0])[:,::-1]
norm_vec_len = np.sqrt(np.sum(norm_vec**2, axis=1))
unit_vec = norm_vec / np.tile(norm_vec_len, (2, 1)).T # without
# multiplication
# with (1,-1)-upper/
# (-1, 1)-lower
bounds = np.array(line['boundary'])
src_points = np.stack([_test_intersect(bp, uv, bounds) for bp, uv in zip(bisect_points, unit_vec)])
upper_dist = np.diag(distance_matrix(src_points[:,:2], bisect_points))
upper_dist = np.dstack((np.zeros_like(upper_dist), upper_dist)).squeeze(0)
lower_dist = np.diag(distance_matrix(src_points[:,2:], bisect_points))
lower_dist = np.dstack((np.zeros_like(lower_dist), lower_dist)).squeeze(0)
# map baseline points to straight baseline
bl_dists = np.cumsum(np.diag(np.roll(squareform(pdist(bl)), 1)))
bl_dst_pts = bl[0] + np.dstack((bl_dists, np.zeros_like(bl_dists))).squeeze(0)
rect_bisect_pts = np.mean(np.dstack((bl_dst_pts[:-1:], bl_dst_pts[1::])), 2)
upper_dst_pts = rect_bisect_pts - upper_dist
lower_dst_pts = rect_bisect_pts + lower_dist
src_points = np.concatenate((bl, src_points[:,:2], src_points[:,2:]))
dst_points = np.concatenate((bl_dst_pts, upper_dst_pts, lower_dst_pts))
tform = PiecewiseAffineTransform()
tform.estimate(src_points, dst_points)
i = Image.fromarray((warp(masked_line, tform) * 255).astype('uint8'))
yield i.crop(i.getbbox()), line
else:
if bounds['text_direction'].startswith('vertical'):
angle = 90
else:
angle = 0
for box in bounds['boxes']:
if isinstance(box, tuple):
box = list(box)
if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
box[1::2] > [im.size[1], im.size[1]]):
logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
raise KrakenInputException('Line outside of image bounds')
yield im.crop(box).rotate(angle, expand=True), box
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPygobject(PythonPackage):
"""bindings for the GLib, and GObject,
to be used in Python."""
homepage = "https://pypi.python.org/pypi/pygobject"
version('3.28.3', '3bac63c86bb963aa401f97859464aa90')
version('2.28.6', '9415cb7f2b3a847f2310ccea258b101e')
version('2.28.3', 'aa64900b274c4661a5c32e52922977f9',
url='http://ftp.gnome.org/pub/GNOME/sources/pygobject/2.28/pygobject-2.28.3.tar.bz2')
extends('python')
depends_on('py-setuptools', type=('build'))
depends_on("libffi")
depends_on('glib')
depends_on('python@2:2.99', when='@2:2.99', type=('build', 'run'))
depends_on('py-pycairo', type=('build', 'run'), when='@3:')
depends_on('py-py2cairo', type=('build', 'run'), when='@2:2.99')
depends_on('gobject-introspection')
depends_on('gtkplus', when='@3:')
patch('pygobject-2.28.6-introspection-1.patch', when='@2.28.3:2.28.6')
# patch from https://raw.githubusercontent.com/NixOS/nixpkgs/master/pkgs/development/python-modules/pygobject/pygobject-2.28.6-gio-types-2.32.patch
# for https://bugzilla.gnome.org/show_bug.cgi?id=668522
patch('pygobject-2.28.6-gio-types-2.32.patch', when='@2.28.6')
def url_for_version(self, version):
url = 'http://ftp.gnome.org/pub/GNOME/sources/pygobject'
return url + '/%s/pygobject-%s.tar.xz' % (version.up_to(2), version)
# pygobject version 2 requires an autotools build
@when('@2:2.99')
def build(self, spec, prefix):
configure('--prefix=%s' % spec.prefix)
@when('@2:2.99')
def install(self, spec, prefix):
make('install', parallel=False)
@when('^python@3:')
def patch(self):
filter_file(
r'Pycairo_IMPORT',
r'//Pycairo_IMPORT',
'gi/pygi-foreign-cairo.c')
py-pygobject: add missing pkgconfig dependency (#9955)
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPygobject(PythonPackage):
"""bindings for the GLib, and GObject,
to be used in Python."""
homepage = "https://pypi.python.org/pypi/pygobject"
version('3.28.3', '3bac63c86bb963aa401f97859464aa90')
version('2.28.6', '9415cb7f2b3a847f2310ccea258b101e')
version('2.28.3', 'aa64900b274c4661a5c32e52922977f9',
url='http://ftp.gnome.org/pub/GNOME/sources/pygobject/2.28/pygobject-2.28.3.tar.bz2')
extends('python')
depends_on('py-setuptools', type='build')
depends_on('pkgconfig', type='build')
depends_on("libffi")
depends_on('glib')
depends_on('python@2:2.99', when='@2:2.99', type=('build', 'run'))
depends_on('py-pycairo', type=('build', 'run'), when='@3:')
depends_on('py-py2cairo', type=('build', 'run'), when='@2:2.99')
depends_on('gobject-introspection')
depends_on('gtkplus', when='@3:')
patch('pygobject-2.28.6-introspection-1.patch', when='@2.28.3:2.28.6')
# patch from https://raw.githubusercontent.com/NixOS/nixpkgs/master/pkgs/development/python-modules/pygobject/pygobject-2.28.6-gio-types-2.32.patch
# for https://bugzilla.gnome.org/show_bug.cgi?id=668522
patch('pygobject-2.28.6-gio-types-2.32.patch', when='@2.28.6')
def url_for_version(self, version):
url = 'http://ftp.gnome.org/pub/GNOME/sources/pygobject'
return url + '/%s/pygobject-%s.tar.xz' % (version.up_to(2), version)
# pygobject version 2 requires an autotools build
@when('@2:2.99')
def build(self, spec, prefix):
configure('--prefix=%s' % spec.prefix)
@when('@2:2.99')
def install(self, spec, prefix):
make('install', parallel=False)
@when('^python@3:')
def patch(self):
filter_file(
r'Pycairo_IMPORT',
r'//Pycairo_IMPORT',
'gi/pygi-foreign-cairo.c')
|
#!/usr/bin/env python
import sys
import locale
import argparse
import tweepy
def write(data, stream = sys.stdout):
"""
Write "data" on the given stream, then flush, silently handling broken pipes.
"""
try:
stream.write(data)
stream.flush()
# Silently handle broken pipes
except IOError:
try:
stream.close()
except IOError:
pass
def readline( stream_in ):
while True:
try:
line = stream_in.readline().decode(stream_in.encoding or locale.getpreferredencoding(True)).strip()
except UnicodeDecodeError:
continue
except KeyboardInterrupt:
break
if not line:
break
yield line
raise StopIteration
def setup_adorn( data, asked ):
assert(asked.adorn)
# unroll everything
data = list(data)
nb = len(data)
# " int/int"
adorn_size = len(str(nb))*2 + 1 + 1
for i,line in enumerate(data):
adorn = " %i/%i" % (i+1,nb)
curmax = asked.max_len - len(adorn)
if len(line) > curmax:
if asked.ignore:
data[i] = line
elif asked.trim:
data[i] = line[:curmax]
data[i] = data[i] + adorn
return data
def setup_hem(data, asked):
for line in data:
if len(line) > asked.max_len:
if asked.ignore:
pass
elif asked.trim:
line = line[:asked.max_len]
yield line
def setup( data, asked ):
if asked.adorn:
f = setup_adorn
else:
f = setup_hem
for line in f(data, asked):
yield line
def on_stdout( data, asked, endline="\n" ):
lines = setup(data, asked)
# You can do something on the whole set of lines if you want.
if asked.chain:
prefix = " "
else:
prefix = ""
for i,line in enumerate(lines):
if endline:
if line[-1] != endline:
line += endline
yield prefix*i + line
def on_twitter( data, api, asked, endline="\n" ):
lines = setup(data, asked)
prev_status_id = None
for line in lines:
# API.update_status(status[, in_reply_to_status_id][, lat][, long][, source][, place_id])
status = api.update_status(line, prev_status_id)
if asked.chain:
prev_status_id = status.id
yield status.text + endline
def operate( func, *args ):
for line in func( readline(sys.stdin), *args ):
write(line)
usage = "Post text read on the standard input to a website or an API."
apis =["stdout", "twitter"] # TODO "http_post", "http_get",
parser = argparse.ArgumentParser( description=usage,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--api", choices=apis, default="stdout",
help="Name of the API to use.")
# Generic options
parser.add_argument("-m", "--max-len", metavar="MAXLEN", type=int, default=140,
help="Maximum number of characters in the lines.")
parser.add_argument("-i", "--ignore", action="store_true",
help="Ignore lines that are longer than MAXLEN")
parser.add_argument("-t", "--trim", action="store_true",
help="Trim down lines that are longer than MAXLEN.")
parser.add_argument("-d", "--adorn", action="store_true",
help="Add a counter of the form \" x/N\" at the end of the lines, \
with N being the number of lines read and x the current index of the line. \
NOTE: this necessitate to read all the input before processing it.")
parser.add_argument("-q", "--quiet", action="store_true",
help="Do not print errors and warnings on the standard error output.")
# TODO option: rate at which to post lines
# API-dependant options
parser.add_argument("-c", "--chain", action="store_true",
help="Chained actions. Whatever that means depends on the chosen API.")
asked = parser.parse_args()
# Consistency checks
if asked.ignore and asked.trim:
if not asked.quiet:
sys.stderr.write("WARNING: asking to trim AND to ignore is not logical, I will ignore.")
assert( not (asked.ignore and asked.trim) )
# APIs
if asked.api == "stdout":
operate( on_stdout, asked )
elif asked.api == "twitter":
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('twitter.conf')
consumer_key = config.get("Auth","key")
consumer_secret = config.get("Auth","key_secret")
try:
verifier_code = config.get("Auth","code")
except:
access_token = config.get("Auth","token")
access_token_secret = config.get("Auth","token_secret")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, "https://api.twitter.com/1.1/")
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
operate( on_twitter, api, asked )
Add an option to upload images on twitter.
#!/usr/bin/env python
import sys
import locale
import argparse
import tweepy
def write(data, stream = sys.stdout):
"""
Write "data" on the given stream, then flush, silently handling broken pipes.
"""
try:
stream.write(data)
stream.flush()
# Silently handle broken pipes
except IOError:
try:
stream.close()
except IOError:
pass
def readline( stream_in ):
while True:
try:
line = stream_in.readline().decode(stream_in.encoding or locale.getpreferredencoding(True)).strip()
except UnicodeDecodeError:
continue
except KeyboardInterrupt:
break
if not line:
break
yield line
raise StopIteration
def setup_adorn( data, asked ):
assert(asked.adorn)
# unroll everything
data = list(data)
nb = len(data)
# " int/int"
adorn_size = len(str(nb))*2 + 1 + 1
for i,line in enumerate(data):
adorn = " %i/%i" % (i+1,nb)
curmax = asked.max_len - len(adorn)
if len(line) > curmax:
if asked.ignore:
data[i] = line
elif asked.trim:
data[i] = line[:curmax]
data[i] = data[i] + adorn
return data
def setup_hem(data, asked):
for line in data:
if len(line) > asked.max_len:
if asked.ignore:
pass
elif asked.trim:
line = line[:asked.max_len]
yield line
def setup( data, asked ):
if asked.adorn:
f = setup_adorn
else:
f = setup_hem
for line in f(data, asked):
yield line
def on_stdout( data, asked, endline="\n" ):
lines = setup(data, asked)
# You can do something on the whole set of lines if you want.
if asked.chain:
prefix = " "
else:
prefix = ""
for i,line in enumerate(lines):
if endline:
if line[-1] != endline:
line += endline
yield prefix*i + line
def on_twitter( data, api, asked, endline="\n" ):
lines = setup(data, asked)
prev_status_id = None
images = asked.twitter_images
images.reverse()
for line in lines:
if images:
img = images.pop()
else:
img = None
if img:
# API.update_with_media(filename[, status][, in_reply_to_status_id][, lat][, long][, source][, place_id][, file])
status = api.update_with_media(img, line, prev_status_id)
else:
# API.update_status(status[, in_reply_to_status_id][, lat][, long][, source][, place_id])
status = api.update_status(line, prev_status_id)
if asked.chain:
prev_status_id = status.id
yield status.text + endline
def operate( func, *args ):
for line in func( readline(sys.stdin), *args ):
write(line)
usage = "Post text read on the standard input to a website or an API."
apis =["stdout", "twitter"] # TODO "http_post", "http_get",
parser = argparse.ArgumentParser( description=usage,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--api", choices=apis, default="stdout",
help="Name of the API to use.")
# Generic options
parser.add_argument("-m", "--max-len", metavar="MAXLEN", type=int, default=140,
help="Maximum number of characters in the lines.")
parser.add_argument("-i", "--ignore", action="store_true",
help="Ignore lines that are longer than MAXLEN")
parser.add_argument("-t", "--trim", action="store_true",
help="Trim down lines that are longer than MAXLEN.")
parser.add_argument("-d", "--adorn", action="store_true",
help="Add a counter of the form \" x/N\" at the end of the lines, \
with N being the number of lines read and x the current index of the line. \
NOTE: this necessitate to read all the input before processing it.")
parser.add_argument("-q", "--quiet", action="store_true",
help="Do not print errors and warnings on the standard error output.")
# TODO option: rate at which to post lines
# API-dependant options
parser.add_argument("-c", "--chain", action="store_true",
help="Chained actions. Whatever that means depends on the chosen API.")
# Twitter
parser.add_argument("--twitter-images", metavar="FILENAME(S)", nargs="+", type=str,
help="Upload each given image files along with the corresponding tweets in sequence. If there are more images than tweets, they are silently ignored.")
asked = parser.parse_args()
# Consistency checks
if asked.ignore and asked.trim:
if not asked.quiet:
sys.stderr.write("WARNING: asking to trim AND to ignore is not logical, I will ignore.")
assert( not (asked.ignore and asked.trim) )
if asked.twitter_images:
if not asked.api == "twitter":
if not asked.quiet:
sys.stderr.write("WARNING: asking to upload images on twitter while not using the twitter API is not logical, I will ignore.")
assert( not (asked.twitter_images and not asked.api=="twitter") )
else: # Test readable images
cannot=[]
for img in asked.twitter_images:
try:
with open(img) as f:
f.read()
except OSError:
cannot.append(img)
if cannot:
print("Cannot open the following image files, I will not continue: ")
for img in cannot:
print(img)
sys.exit(5) # I/O Error
# APIs
if asked.api == "stdout":
operate( on_stdout, asked )
elif asked.api == "twitter":
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('twitter.conf')
consumer_key = config.get("Auth","key")
consumer_secret = config.get("Auth","key_secret")
try:
verifier_code = config.get("Auth","code")
except:
access_token = config.get("Auth","token")
access_token_secret = config.get("Auth","token_secret")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, "https://api.twitter.com/1.1/")
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
operate( on_twitter, api, asked )
|
from django.contrib import admin
from .models import Artist, Song
admin.AdminSite.site_title = 'Chords administration'
admin.AdminSite.site_header = 'Chords Administration'
class ArtistAdmin(admin.ModelAdmin):
exclude = ['slug']
actions = ['delete_selected']
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected artists (custom)'
class SongAdmin(admin.ModelAdmin):
fieldsets = [
('General', {'fields': ['title', 'artist', 'genre']}),
('User information', {'fields': ['sender'], 'classes': ['collapse']}),
('Content', {'fields': ['content', 'tabs', 'video']}),
('Published', {'fields': ['published']}),
]
list_display = ['full_title', 'reg_date', 'pub_date', 'published']
list_filter = ['pub_date', 'reg_date', 'genre', 'tabs']
search_fields = ['sender__username', 'artist__name']
actions = ['delete_selected', 'publish_songs', 'unpublish_songs']
def publish_songs(self, request, queryset):
for song in queryset:
if not song.published:
song.publish()
publish_songs.short_description = 'Publish all selected songs'
def unpublish_songs(self, request, queryset):
for song in queryset:
if song.published:
song.unpublish()
unpublish_songs.short_description = 'Unpublish all selected songs'
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected songs (custom)'
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Song, SongAdmin)
add search for artists in admin panel and fix search for songs
from django.contrib import admin
from .models import Artist, Song
admin.AdminSite.site_title = 'Chords administration'
admin.AdminSite.site_header = 'Chords Administration'
class ArtistAdmin(admin.ModelAdmin):
exclude = ['slug']
actions = ['delete_selected']
search_fields = ['name']
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected artists (custom)'
class SongAdmin(admin.ModelAdmin):
fieldsets = [
('General', {'fields': ['title', 'artist', 'genre']}),
('User information', {'fields': ['sender'], 'classes': ['collapse']}),
('Content', {'fields': ['content', 'tabs', 'video']}),
('Published', {'fields': ['published']}),
]
list_display = ['full_title', 'reg_date', 'pub_date', 'published']
list_filter = ['pub_date', 'reg_date', 'genre', 'tabs']
search_fields = ['title', 'artist__name']
actions = ['delete_selected', 'publish_songs', 'unpublish_songs']
def publish_songs(self, request, queryset):
for song in queryset:
if not song.published:
song.publish()
publish_songs.short_description = 'Publish all selected songs'
def unpublish_songs(self, request, queryset):
for song in queryset:
if song.published:
song.unpublish()
unpublish_songs.short_description = 'Unpublish all selected songs'
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected songs (custom)'
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Song, SongAdmin)
|
from __future__ import print_function
import os, sys
import subprocess
import time
import tempfile
import threading
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
# print("list1 names = {}".format([el["Name"] for el in list1]), file=sys.stderr)
# print("list2 names = {}".format([el["Name"] for el in list2]), file=sys.stderr)
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
# print("Checking snapname {}".format(snapname), file=sys.stderr)
if snapname in dict2:
rv.append(snapname)
else:
pass; #print("\tNot in list2?")
return rv
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
"""
def __init__(self):
pass
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return "Null Filter"
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
class ZFSBackupFilterThread(ZFSBackupFilter, threading.Thread):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__()
threading.Thread.__init__(self)
(self.input_pipe, self.output_pipe) = os.pipe()
self._source = None
self._done = threading.Event()
self._done.clear()
self._process = process
if self._process is None:
self._name = "Null Thread Filter"
else:
self._name = name
@property
def backup_command(self):
return ["<thread>"]
@property
def restore_command(self):
return ["<thread>"]
@property
def input_pipe(self):
return self._input
@input_pipe.setter
def input_pipe(self, p):
self._input = p
@property
def output_pipe(self):
return self._output
@output_pipe.setter
def output_pipe(self, p):
self._output = p
@property
def source(self):
return self._source
@property
def mode(self):
return self._mode
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
def run(self):
while True:
b = self.source.read(1024*1024)
if b:
os.write(self.output_pipe, self.process(b))
else:
break
self._done.set()
os.close(self.output_pipe)
def start_backup(self, source):
self._mode = "backup"
self._source = source
self._py_output = os.fdopen(self.input_pipe, "rb")
self.start()
return self._py_output
def start_restore(self, source):
self._mode = "restore"
self._source = source
rv = os.fdopen(self.input_pipe, "rb")
self.start()
return rv
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
"""
def __init__(self, backup_command=["/bin/cat"],
restore_command=["/bin/cat"], error=None):
super(ZFSBackupFilterCommand, self).__init__()
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = subprocess.Popen(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return p.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = subprocess.Popen(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
return p.stdout
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
else:
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command])
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None):
super(ZFSBackupFilterCounter, self).__init__()
self._count = 0
self.handler = handler
def name(self):
return "ZFS Count Filter"
def process(self, b):
self._count += len(b)
return b
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
self._done.wait()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _filter_backup(self, source, error=None):
# Private method, to stitch the backup filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_restore(input)
return input
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
subprocess.check_call(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
if not self.source_snapshots:
# A source with no snapshots cannot be backed up
raise ZFSBackupError("Source {} does not have snapshots".format(self.source))
return
def backup_handler(self, stream):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
subprocess.call(command, stdout=devnull, stderr=devnull)
except:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
subprocess.check_call(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None, force_full=False):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
By default, it will first find a list of snapshots in common with the
source and target, ordered chronologically (based on the source).
If force_full is True, then the snapshot chosen will be sent in its entirety,
rather than trying to find a common ancestor for an incremental snapshot.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupError("Specified snapshot {} does not exist".foramt(snapname))
# We want to remove everything in source_snapshots up to the given one
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
last_snapshot = source_snapshots[-1]
last_common_snapshot = None
if force_full:
common_snapshots = []
else:
common_snapshots = _merge_snapshots(source_snapshots, self.target_snapshots)
# At this point, common_snapshots has a list of snapshot names on both.
# If there are no common snapshots, then we back up everything up to last_snapshot
print("ZFSBackup: last_snapshot = {}, common_snapshots = {}".format(last_snapshot,
common_snapshots),
file=sys.stderr)
if last_snapshot["Name"] not in common_snapshots:
print("We have to do some sends/receives", file=sys.stderr)
# We need to do incremental snapshots from the last common snapshot to
# last_snapshot.
if common_snapshots:
# Don't bother doing this if we have no snapshots in common
last_common_snapshot = common_snapshots[-1]
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
for indx, snap in enumerate(source_snapshots):
if snap["Name"] == last_common_snapshot:
break
snapshot_list = source_snapshots[indx:]
else:
# Either it's been deleted on the remote end, or it's newer than the list.
# So we start at a full dump from last_snapshot
snapshot_list = [last_snapshot]
else:
snapshot_list = [last_snapshot]
# There are two approaches that could be done here.
# One is to do incremental sends for every snapshot; the other
# is simply to do a send -I. I'm choosing the latter.
# If we have a last common snapshot, we can do an incremental from it to
# the last snapshot; if we don't, we'll need to do a full send.
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
if last_common_snapshot:
command.extend(["-I", "{}".format(last_common_snapshot)])
command.append("{}@{}".format(self.source, last_snapshot["Name"]))
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = subprocess.Popen(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
self.backup_handler(send_proc.stdout)
if send_proc.returncode:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def replicate(self, source, snapname, previous=None, date=int(time.time())):
"""
Replicate from source. source must be an object that supports
read(). If date is not given, we will use the current time, so
it should really be set. The full snapshot name from the source
would be dataset@snapname. If previous is set, it indicates this
is an incremental snapshot.
The snapname, previous, and date parameters are for informational purposes only;
the base class doesn't use them, but derived classes may.
"""
destination = os.path.join(self.target, self.dataset)
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
# fobj = self._filter(source, error=error_output)
fobj = source
try:
subprocess.check_call(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
name = "{}@{}".format(self.dataset, snapname)
error_output.seek(0)
print("`{}` failed: {}".format(" ".join(command), error_output.read()),
file=sys.stderr)
raise ZFSBackupError("Could not replicate {} to target {}".format(name, self.target))
return
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, target, recursive=False):
super(ZFSBackupCount, self).__init__(source, target, recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, target={}, recursive={})".format(self.__class__.__name__,
self.source,
self.target,
self.recursive)
def validate(self):
return
def backup_handler(self, stream):
count = 0
mByte = 1024 * 1024
fobj = self._filter_backup(stream)
while True:
b = fobj.read(mByte)
if b:
count += len(b)
else:
break
self._count = count
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def main():
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument('--recursive', '-R', dest='recursive',
type=bool,
default=False,
help='Recursively replicate')
parser.add_argument('--snapshot', '-S', dest='snapshot_name',
default=None,
help='Snapshot to replicate')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
args = parser.parse_args()
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
print("Invalid snapshot name {}".format(args.snapshot_name), file=sys.stderr)
sys.exit(1)
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, "<none>", recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
if args.compressed:
backup.AddFilter(ZFSBackupFilterCompressed(pigz=True))
backup.backup(snapname=args.snapshot_name)
print("Done with backup");
if isinstance(backup, ZFSBackupCount):
print("{} bytes".format(backup.count))
sys.exit(0)
if args.compressed:
replicator.AddFilter("/usr/local/bin/pigz")
command = ["/sbin/zfs", "send"]
if args.recursive:
command.append("-R")
command.append("{}@{}".format(dataset, snapname))
with tempfile.TemporaryFile() as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
snap_io = subprocess.Popen(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
replicator.replicate(snap_io.stdout, snapname)
if snap_io.returncode:
error_output.seek(0)
print("`{}` failed: {}".format(command, error_output.read()), file=sys.stderr)
sys.exit(1)
if args.subcommand == 'counter':
print("Counted {} bytes".format(replicator.count))
if __name__ == "__main__":
main()
Clean-up, and move options around.
from __future__ import print_function
import os, sys
import subprocess
import time
import tempfile
import threading
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
# print("list1 names = {}".format([el["Name"] for el in list1]), file=sys.stderr)
# print("list2 names = {}".format([el["Name"] for el in list2]), file=sys.stderr)
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
# print("Checking snapname {}".format(snapname), file=sys.stderr)
if snapname in dict2:
rv.append(snapname)
else:
pass; #print("\tNot in list2?")
return rv
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
"""
def __init__(self):
pass
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return "Null Filter"
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
class ZFSBackupFilterThread(ZFSBackupFilter, threading.Thread):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__()
threading.Thread.__init__(self)
(self.input_pipe, self.output_pipe) = os.pipe()
self._source = None
self._done = threading.Event()
self._done.clear()
self._process = process
if self._process is None:
self._name = "Null Thread Filter"
else:
self._name = name
@property
def backup_command(self):
return ["<thread>"]
@property
def restore_command(self):
return ["<thread>"]
@property
def input_pipe(self):
return self._input
@input_pipe.setter
def input_pipe(self, p):
self._input = p
@property
def output_pipe(self):
return self._output
@output_pipe.setter
def output_pipe(self, p):
self._output = p
@property
def source(self):
return self._source
@property
def mode(self):
return self._mode
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
def run(self):
while True:
b = self.source.read(1024*1024)
if b:
os.write(self.output_pipe, self.process(b))
else:
break
self._done.set()
os.close(self.output_pipe)
def start_backup(self, source):
self._mode = "backup"
self._source = source
self._py_output = os.fdopen(self.input_pipe, "rb")
self.start()
return self._py_output
def start_restore(self, source):
self._mode = "restore"
self._source = source
rv = os.fdopen(self.input_pipe, "rb")
self.start()
return rv
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
"""
def __init__(self, backup_command=["/bin/cat"],
restore_command=["/bin/cat"], error=None):
super(ZFSBackupFilterCommand, self).__init__()
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = subprocess.Popen(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return p.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = subprocess.Popen(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
return p.stdout
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
else:
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command])
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None):
super(ZFSBackupFilterCounter, self).__init__()
self._count = 0
self.handler = handler
def name(self):
return "ZFS Count Filter"
def process(self, b):
self._count += len(b)
return b
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
self._done.wait()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _filter_backup(self, source, error=None):
# Private method, to stitch the backup filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_restore(input)
return input
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
subprocess.check_call(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
if not self.source_snapshots:
# A source with no snapshots cannot be backed up
raise ZFSBackupError("Source {} does not have snapshots".format(self.source))
return
def backup_handler(self, stream):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
subprocess.call(command, stdout=devnull, stderr=devnull)
except:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
subprocess.check_call(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None, force_full=False):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
By default, it will first find a list of snapshots in common with the
source and target, ordered chronologically (based on the source).
If force_full is True, then the snapshot chosen will be sent in its entirety,
rather than trying to find a common ancestor for an incremental snapshot.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupError("Specified snapshot {} does not exist".foramt(snapname))
# We want to remove everything in source_snapshots up to the given one
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
last_snapshot = source_snapshots[-1]
last_common_snapshot = None
if force_full:
common_snapshots = []
else:
common_snapshots = _merge_snapshots(source_snapshots, self.target_snapshots)
# At this point, common_snapshots has a list of snapshot names on both.
# If there are no common snapshots, then we back up everything up to last_snapshot
print("ZFSBackup: last_snapshot = {}, common_snapshots = {}".format(last_snapshot,
common_snapshots),
file=sys.stderr)
if last_snapshot["Name"] not in common_snapshots:
print("We have to do some sends/receives", file=sys.stderr)
# We need to do incremental snapshots from the last common snapshot to
# last_snapshot.
if common_snapshots:
# Don't bother doing this if we have no snapshots in common
last_common_snapshot = common_snapshots[-1]
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
for indx, snap in enumerate(source_snapshots):
if snap["Name"] == last_common_snapshot:
break
snapshot_list = source_snapshots[indx:]
else:
# Either it's been deleted on the remote end, or it's newer than the list.
# So we start at a full dump from last_snapshot
snapshot_list = [last_snapshot]
else:
snapshot_list = [last_snapshot]
# There are two approaches that could be done here.
# One is to do incremental sends for every snapshot; the other
# is simply to do a send -I. I'm choosing the latter.
# If we have a last common snapshot, we can do an incremental from it to
# the last snapshot; if we don't, we'll need to do a full send.
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
if last_common_snapshot:
command.extend(["-I", "{}".format(last_common_snapshot)])
command.append("{}@{}".format(self.source, last_snapshot["Name"]))
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = subprocess.Popen(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
self.backup_handler(send_proc.stdout)
if send_proc.returncode:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def replicate(self, source, snapname, previous=None, date=int(time.time())):
"""
Replicate from source. source must be an object that supports
read(). If date is not given, we will use the current time, so
it should really be set. The full snapshot name from the source
would be dataset@snapname. If previous is set, it indicates this
is an incremental snapshot.
The snapname, previous, and date parameters are for informational purposes only;
the base class doesn't use them, but derived classes may.
"""
destination = os.path.join(self.target, self.dataset)
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
# fobj = self._filter(source, error=error_output)
fobj = source
try:
subprocess.check_call(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
name = "{}@{}".format(self.dataset, snapname)
error_output.seek(0)
print("`{}` failed: {}".format(" ".join(command), error_output.read()),
file=sys.stderr)
raise ZFSBackupError("Could not replicate {} to target {}".format(name, self.target))
return
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, recursive=False):
super(ZFSBackupCount, self).__init__(source, "", recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, target={}, recursive={})".format(self.__class__.__name__,
self.source,
self.target,
self.recursive)
def validate(self):
return
def backup_handler(self, stream):
count = 0
mByte = 1024 * 1024
fobj = self._filter_backup(stream)
while True:
b = fobj.read(mByte)
if b:
count += len(b)
else:
break
self._count = count
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def main():
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument('--recursive', '-R', dest='recursive',
type=bool,
default=False,
help='Recursively replicate')
parser.add_argument('--snapshot', '-S', dest='snapshot_name',
default=None,
help='Snapshot to replicate')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
parser.add_argument('--pigz', action='store_true',
dest='use_pigz', default=False,
help='Use pigz to compress')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
args = parser.parse_args()
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
print("Invalid snapshot name {}".format(args.snapshot_name), file=sys.stderr)
sys.exit(1)
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
if args.compressed:
backup.AddFilter(ZFSBackupFilterCompressed(pigz=args.use_pigz))
print("Starting backup of {}".format(dataset))
backup.backup(snapname=args.snapshot_name)
print("Done with backup");
if isinstance(backup, ZFSBackupCount):
print("{} bytes".format(backup.count))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import json
import argparse
import docker
class SwarmInspect:
def __init__(self, func=None):
cli = docker.from_env()
if func:
self.execute = func(cli)
def execute(self):
print("No EndPoint Resource Provided")
def discovery_nodes(cli):
node_list = cli.nodes.list()
nodes = [{
"{#NODE_NAME}": item.attrs["Description"]["Hostname"],
"{#NODE_ID}": item.id,
}
for item in node_list]
print(json.dumps({'data': nodes}))
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls and isinstance(urls,list):
return urls[-1]
return urls
def discovery_services(cli):
services_list = cli.services.list()
services = [{
"{#SERVICE_ID}": item.id,
"{#SERVICE_NAME}": item.name,
"{#SERVICE_HTTPSUPPORT}":
extrode_multiple_urls(item.attrs['Spec']['Labels'].get('com.df.serviceDomain', False))
} for item in services_list]
print(json.dumps({'data': services}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resource', type=str, choices=['nodes','services'], required=True)
args = parser.parse_args()
if 'nodes' in args.resource:
sinspect = SwarmInspect(discovery_nodes)
else:
sinspect = SwarmInspect(discovery_services)
Still trying to fix multiple urls problem.
#!/usr/bin/env python
import json
import argparse
import docker
class SwarmInspect:
def __init__(self, func=None):
cli = docker.from_env()
if func:
self.execute = func(cli)
def execute(self):
print("No EndPoint Resource Provided")
def discovery_nodes(cli):
node_list = cli.nodes.list()
nodes = [{
"{#NODE_NAME}": item.attrs["Description"]["Hostname"],
"{#NODE_ID}": item.id,
}
for item in node_list]
print(json.dumps({'data': nodes}))
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls:
return urls.split(',')[-1]
return urls
def discovery_services(cli):
services_list = cli.services.list()
services = [{
"{#SERVICE_ID}": item.id,
"{#SERVICE_NAME}": item.name,
"{#SERVICE_HTTPSUPPORT}":
extrode_multiple_urls(item.attrs['Spec']['Labels'].get('com.df.serviceDomain', False))
} for item in services_list]
print(json.dumps({'data': services}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resource', type=str, choices=['nodes','services'], required=True)
args = parser.parse_args()
if 'nodes' in args.resource:
sinspect = SwarmInspect(discovery_nodes)
else:
sinspect = SwarmInspect(discovery_services)
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import deform.widget
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from dace.util import getSite
from pontus.form import FormView
from pontus.schema import Schema
from novaideo.content.processes.invitation_management.behaviors import (
AcceptInvitation)
from novaideo.content.invitation import Invitation
from novaideo.views.widget import TOUCheckboxWidget
from novaideo import _
@colander.deferred
def conditions_widget(node, kw):
root = getSite()
terms_of_use = root.terms_of_use
return TOUCheckboxWidget(tou_file=terms_of_use)
class AcceptInvitationSchema(Schema):
password = colander.SchemaNode(
colander.String(),
widget = deform.widget.CheckedPasswordWidget(),
validator=colander.Length(min=3, max=100),
title=_('Password')
)
accept_conditions = colander.SchemaNode(
colander.Boolean(),
widget=conditions_widget,
label=_('I have read and accept the terms and conditions'),
title ='',
missing=False
)
@view_config(
name='accept_invitation',
context=Invitation,
renderer='pontus:templates/views_templates/grid.pt',
)
class AcceptInvitationView(FormView):
title = _('Validate the invitation')
schema = AcceptInvitationSchema()
behaviors = [AcceptInvitation]
formid = 'formacceptinvitation'
name = 'accept_invitation'
#wrapper_template = 'daceui:templates/simple_view_wrapper.pt'
requirements = {'css_links':[],
'js_links':['novaideo:static/js/user_registration.js']}
DEFAULTMAPPING_ACTIONS_VIEWS.update({AcceptInvitation:AcceptInvitationView})
fix accept invitation view
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import deform.widget
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from dace.util import getSite
from pontus.form import FormView
from pontus.schema import Schema
from novaideo.content.processes.invitation_management.behaviors import (
AcceptInvitation)
from novaideo.content.invitation import Invitation
from novaideo.views.widget import TOUCheckboxWidget
from novaideo import _
@colander.deferred
def conditions_widget(node, kw):
root = getSite()
terms_of_use = root.terms_of_use
return TOUCheckboxWidget(tou_file=terms_of_use)
class AcceptInvitationSchema(Schema):
password = colander.SchemaNode(
colander.String(),
widget = deform.widget.CheckedPasswordWidget(),
validator=colander.Length(min=3, max=100),
title=_('Password')
)
accept_conditions = colander.SchemaNode(
colander.Boolean(),
widget=conditions_widget,
label=_('I have read and accept the terms and conditions'),
title ='',
missing=False
)
@view_config(
name='accept_invitation',
context=Invitation,
renderer='pontus:templates/views_templates/grid.pt',
)
class AcceptInvitationView(FormView):
title = _('Validate the invitation')
schema = AcceptInvitationSchema()
behaviors = [AcceptInvitation]
formid = 'formacceptinvitation'
name = 'accept_invitation'
wrapper_template = 'daceui:templates/simple_view_wrapper.pt'
requirements = {'css_links':[],
'js_links':['novaideo:static/js/user_registration.js']}
DEFAULTMAPPING_ACTIONS_VIEWS.update({AcceptInvitation:AcceptInvitationView})
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI specific views for Student.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>'
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
from google.appengine.ext import blobstore
from django import forms
from django import http
from django.forms import fields as django_fields
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.views.helper import decorators
from soc.views.helper import dynaform
from soc.views.helper import lists
from soc.views.helper import params as params_helper
from soc.views.helper import responses
from soc.views.helper import widgets
from soc.views.models import student
from soc.modules.gci.logic.models import mentor as gci_mentor_logic
from soc.modules.gci.logic.models import org_admin as gci_org_admin_logic
from soc.modules.gci.logic.models import program as gci_program_logic
from soc.modules.gci.logic.models.student import logic as gci_student_logic
from soc.modules.gci.views.helper import access as gci_access
from soc.modules.gci.views.helper import redirects as gci_redirects
from soc.modules.gci.views.models import program as gci_program_view
import soc.modules.gci.logic.models.student
class View(student.View):
"""View methods for the GCI Student model.
"""
DEF_NO_TASKS_MSG = ugettext(
'There are no tasks affiliated to you.')
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59'
def __init__(self, params=None):
"""Defines the fields and methods required for the student View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
patterns = []
patterns += [
(r'^%(url_name)s/(?P<access_type>submit_forms)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.submit_forms',
'Submit forms'),
]
rights = gci_access.GCIChecker(params)
rights['edit'] = [('checkIsMyActiveRole', gci_student_logic)]
rights['apply'] = [
'checkIsUser',
('checkIsActivePeriod', ['student_signup', 'scope_path',
gci_program_logic.logic]),
('checkIsNotParticipatingInProgramInScope',
[gci_program_logic.logic, gci_student_logic,
gci_org_admin_logic.logic, gci_mentor_logic.logic]),
'checkCanApply']
rights['manage'] = [('checkIsMyActiveRole', gci_student_logic)]
rights['submit_forms'] = [('checkIsMyActiveRole', gci_student_logic)]
new_params = {}
new_params['logic'] = gci_student_logic
new_params['rights'] = rights
new_params['group_logic'] = gci_program_logic.logic
new_params['group_view'] = gci_program_view.view
new_params['scope_view'] = gci_program_view
new_params['name'] = "GCI Student"
new_params['module_name'] = "student"
new_params['sidebar_grouping'] = 'Students'
new_params['module_package'] = 'soc.modules.gci.views.models'
new_params['url_name'] = 'gci/student'
new_params['extra_dynaexclude'] = [
'agreed_to_tos', 'school', 'parental_form_mail',
'consent_form', 'student_id_form',
]
new_params['extra_django_patterns'] = patterns
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
base_form = self._params['dynabase']
gci_student_model = gci_student_logic.getModel()
def getUploadForms(name, label, help_text):
dynafields = [
{'name': name,
'base': forms.FileField,
'label': label,
'required': False,
'help_text': help_text,
}
]
dynaproperties = params_helper.getDynaFields(dynafields)
add_form = dynaform.newDynaForm(dynabase=base_form,
dynaproperties=dynaproperties)
dynaproperties = {
'name': django_fields.CharField(
label='Name', required=False,
widget=widgets.HTMLTextWidget),
'uploaded': django_fields.CharField(
label='Uploaded on', required=False,
widget=widgets.PlainTextWidget),
'size': django_fields.CharField(
label='Size', required=False,
widget=widgets.PlainTextWidget),
}
edit_form = dynaform.extendDynaForm(
add_form, dynaproperties=dynaproperties,
dynainclude=['name', 'size', 'uploaded', name])
return add_form, edit_form
self._params['consent_form_upload_form'] = getUploadForms(
'consent_form_upload', 'Consent Form',
gci_student_model.consent_form.help_text)
self._params['student_id_form_upload_form'] = getUploadForms(
'student_id_form_upload', 'Student ID Form',
gci_student_model.student_id_form.help_text)
@decorators.merge_params
@decorators.check_access
def submitForms(self, request, access_type, page_name=None,
params=None, **kwargs):
"""Form upload page for a given student.
See base.View.public() for more details.
"""
template = 'modules/gci/student/submit_forms.html'
context = responses.getUniversalContext(request)
context['page_name'] = page_name
logic = params['logic']
entity = logic.getFromKeyFieldsOr404(kwargs)
if request.method == 'POST':
return self.submitFormsPost(request, params, context, entity)
else:
return self.submitFormsGet(request, params, template, context, entity)
def submitFormsGet(self, request, params, template, context, entity):
if lists.isJsonRequest(request):
url = blobstore.create_upload_url(
gci_redirects.getSubmitFormsRedirect(entity, params))
return responses.jsonResponse(request, url)
def setForm(param_name, blob_info):
add_form, edit_form = params[param_name]
if blob_info:
form = edit_form(initial={
'name': blob_info.filename,
'size': blob_info.size,
'uploaded': blob_info.creation.strftime(self.DATETIME_FORMAT),
})
else:
form = add_form()
context[param_name] = form
setForm('consent_form_upload_form', entity.consent_form)
setForm('student_id_form_upload_form', entity.student_id_form)
return responses.respond(request, template, context)
def submitFormsPost(self, request, params, context, entity):
form = request.POST.get('form')
if not (request.file_uploads and form):
# no file uploaded
logging.error("No file_uploads valid form value")
return http.HttpResponseRedirect(
gci_redirects.getSubmitFormsRedirect(entity, params))
# TODO(SRabbelier): handle multiple file uploads
upload = request.file_uploads[0]
if form == 'consent':
entity.consent_form = upload
elif form=='student_id':
entity.student_id_form = upload
else:
logging.warning("Invalid value for form '%s'" % form)
entity.put()
return http.HttpResponseRedirect(
gci_redirects.getSubmitFormsRedirect(entity, params))
view = View()
apply = decorators.view(view.apply)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
list = decorators.view(view.list)
public = decorators.view(view.public)
export = decorators.view(view.export)
submit_forms = decorators.view(view.submitForms)
Use the download_blob view in the student class for forms.
We also make the name of the file a link. Upon clicking the student
is redirected to download the corresponding file.
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI specific views for Student.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>'
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
from google.appengine.ext import blobstore
from django import forms
from django import http
from django.forms import fields as django_fields
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.views.helper import decorators
from soc.views.helper import dynaform
from soc.views.helper import lists
from soc.views.helper import params as params_helper
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.helper import widgets
from soc.views.models import student
from soc.modules.gci.logic.models import mentor as gci_mentor_logic
from soc.modules.gci.logic.models import org_admin as gci_org_admin_logic
from soc.modules.gci.logic.models import program as gci_program_logic
from soc.modules.gci.logic.models.student import logic as gci_student_logic
from soc.modules.gci.views.helper import access as gci_access
from soc.modules.gci.views.helper import redirects as gci_redirects
from soc.modules.gci.views.models import program as gci_program_view
import soc.modules.gci.logic.models.student
class View(student.View):
"""View methods for the GCI Student model.
"""
DEF_NO_TASKS_MSG = ugettext(
'There are no tasks affiliated to you.')
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59'
def __init__(self, params=None):
"""Defines the fields and methods required for the student View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
patterns = []
patterns += [
(r'^%(url_name)s/(?P<access_type>submit_forms)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.submit_forms',
'Submit forms'),
(r'^%(url_name)s/(?P<access_type>download_blob)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.download_blob',
'Download the blob'),
]
rights = gci_access.GCIChecker(params)
rights['edit'] = [('checkIsMyActiveRole', gci_student_logic)]
rights['apply'] = [
'checkIsUser',
('checkIsActivePeriod', ['student_signup', 'scope_path',
gci_program_logic.logic]),
('checkIsNotParticipatingInProgramInScope',
[gci_program_logic.logic, gci_student_logic,
gci_org_admin_logic.logic, gci_mentor_logic.logic]),
'checkCanApply']
rights['manage'] = [('checkIsMyActiveRole', gci_student_logic)]
rights['submit_forms'] = [('checkIsMyActiveRole', gci_student_logic)]
rights['download_blob'] = [
('checkCanDownloadConsentForms', gci_student_logic)]
new_params = {}
new_params['logic'] = gci_student_logic
new_params['rights'] = rights
new_params['group_logic'] = gci_program_logic.logic
new_params['group_view'] = gci_program_view.view
new_params['scope_view'] = gci_program_view
new_params['name'] = "GCI Student"
new_params['module_name'] = "student"
new_params['sidebar_grouping'] = 'Students'
new_params['module_package'] = 'soc.modules.gci.views.models'
new_params['url_name'] = 'gci/student'
new_params['extra_dynaexclude'] = [
'agreed_to_tos', 'school', 'parental_form_mail',
'consent_form', 'student_id_form',
]
new_params['extra_django_patterns'] = patterns
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
base_form = self._params['dynabase']
gci_student_model = gci_student_logic.getModel()
def getUploadForms(name, label, help_text):
dynafields = [
{'name': name,
'base': forms.FileField,
'label': label,
'required': False,
'help_text': help_text,
}
]
dynaproperties = params_helper.getDynaFields(dynafields)
add_form = dynaform.newDynaForm(dynabase=base_form,
dynaproperties=dynaproperties)
dynaproperties = {
'name': django_fields.CharField(
label='Name', required=False,
widget=widgets.HTMLTextWidget),
'uploaded': django_fields.CharField(
label='Uploaded on', required=False,
widget=widgets.PlainTextWidget),
'size': django_fields.CharField(
label='Size', required=False,
widget=widgets.PlainTextWidget),
}
edit_form = dynaform.extendDynaForm(
add_form, dynaproperties=dynaproperties,
dynainclude=['name', 'size', 'uploaded', name])
return add_form, edit_form
self._params['consent_form_upload_form'] = getUploadForms(
'consent_form_upload', 'Consent Form',
gci_student_model.consent_form.help_text)
self._params['student_id_form_upload_form'] = getUploadForms(
'student_id_form_upload', 'Student ID Form',
gci_student_model.student_id_form.help_text)
@decorators.merge_params
@decorators.check_access
def submitForms(self, request, access_type, page_name=None,
params=None, **kwargs):
"""Form upload page for a given student.
See base.View.public() for more details.
"""
template = 'modules/gci/student/submit_forms.html'
context = responses.getUniversalContext(request)
context['page_name'] = page_name
logic = params['logic']
entity = logic.getFromKeyFieldsOr404(kwargs)
if request.method == 'POST':
return self.submitFormsPost(request, params, context, entity)
else:
return self.submitFormsGet(request, params, template, context, entity)
def submitFormsGet(self, request, params, template, context, entity):
if lists.isJsonRequest(request):
url = blobstore.create_upload_url(
gci_redirects.getSubmitFormsRedirect(entity, params))
return responses.jsonResponse(request, url)
def setForm(param_name, blob_info):
add_form, edit_form = params[param_name]
if blob_info:
form = edit_form(initial={
'name': '<a href="%(url)s">%(name)s</a>' % {
'name': blob_info.filename,
'url': redirects.getDownloadBlobRedirectWithGet(
blob_info, params, scope_path=entity.key().id_or_name(), type=param_name)},
'size': blob_info.size,
'uploaded': blob_info.creation.strftime(self.DATETIME_FORMAT),
})
else:
form = add_form()
context[param_name] = form
setForm('consent_form_upload_form', entity.consent_form)
setForm('student_id_form_upload_form', entity.student_id_form)
return responses.respond(request, template, context)
def submitFormsPost(self, request, params, context, entity):
form = request.POST.get('form')
if not (request.file_uploads and form):
# no file uploaded
logging.error("No file_uploads valid form value")
return http.HttpResponseRedirect(
gci_redirects.getSubmitFormsRedirect(entity, params))
# TODO(SRabbelier): handle multiple file uploads
upload = request.file_uploads[0]
if form == 'consent':
entity.consent_form = upload
elif form=='student_id':
entity.student_id_form = upload
else:
logging.warning("Invalid value for form '%s'" % form)
entity.put()
return http.HttpResponseRedirect(
gci_redirects.getSubmitFormsRedirect(entity, params))
view = View()
apply = decorators.view(view.apply)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
list = decorators.view(view.list)
public = decorators.view(view.public)
export = decorators.view(view.export)
submit_forms = decorators.view(view.submitForms)
download_blob = decorators.view(view.downloadBlob)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Cyril Bonté
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import cgi
import re
import time
import datetime
from optparse import OptionParser
from mako.template import Template
from mako.lookup import TemplateLookup
from mako.exceptions import TopLevelLookupException
from parser import PContext
from parser import remove_indent
from parser import *
from urllib import quote
VERSION = ""
HAPROXY_GIT_VERSION = False
def main():
global VERSION, HAPROXY_GIT_VERSION
usage="Usage: %prog [options] file..."
optparser = OptionParser(description='Generate HTML Document from HAProxy configuation.txt',
version=VERSION,
usage=usage)
optparser.add_option('--git-directory','-g', help='Optional git directory for input files, to determine haproxy details')
optparser.add_option('--output-directory','-o', default='.', help='Destination directory to store files, instead of the current working directory')
optparser.add_option('--base','-b', default = '', help='Base directory for relative links')
(option, files) = optparser.parse_args()
if not files:
optparser.print_help()
exit(1)
option.output_directory = os.path.abspath(option.output_directory)
if option.git_directory:
option.git_directory = os.path.abspath(option.git_directory)
os.chdir(os.path.dirname(__file__))
VERSION = get_git_version()
if not VERSION:
sys.exit(1)
HAPROXY_GIT_VERSION = get_haproxy_git_version(option.git_directory)
convert_all(files, option.output_directory, option.base)
# Temporarily determine the version from git to follow which commit generated
# the documentation
def get_git_version():
if not os.path.isdir(".git"):
print >> sys.stderr, "This does not appear to be a Git repository."
return
try:
p = subprocess.Popen(["git", "describe", "--tags", "--match", "v*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError:
print >> sys.stderr, "Unable to run git"
return
version = p.communicate()[0]
if p.returncode != 0:
print >> sys.stderr, "Unable to run git"
return
if len(version) < 2:
return
version = version[1:].strip()
version = re.sub(r'-g.*', '', version)
return version
def get_haproxy_git_version(path):
if not path:
return False
try:
p = subprocess.Popen(["git", "describe", "--tags", "--match", "v*"], cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError:
return False
version = p.communicate()[0]
if p.returncode != 0:
return False
if len(version) < 2:
return False
version = version[1:].strip()
version = re.sub(r'-g.*', '', version)
return version
def getTitleDetails(string):
array = string.split(".")
title = array.pop().strip()
chapter = ".".join(array)
level = max(1, len(array))
if array:
toplevel = array[0]
else:
toplevel = False
return {
"title" : title,
"chapter" : chapter,
"level" : level,
"toplevel": toplevel
}
# Parse the whole document to insert links on keywords
def createLinks():
global document, keywords, keywordsCount, keyword_conflicts, chapters
print >> sys.stderr, "Generating keywords links..."
delimiters = [
dict(start='"', end='"', multi=True ),
dict(start='- ' , end='\n' , multi=False),
]
for keyword in keywords:
keywordsCount[keyword] = 0
for delimiter in delimiters:
keywordsCount[keyword] += document.count(delimiter['start'] + keyword + delimiter['end'])
if (keyword in keyword_conflicts) and (not keywordsCount[keyword]):
# The keyword is never used, we can remove it from the conflicts list
del keyword_conflicts[keyword]
if keyword in keyword_conflicts:
chapter_list = ""
for chapter in keyword_conflicts[keyword]:
chapter_list += '<li><a href="#%s">%s</a></li>' % (quote("%s (%s)" % (keyword, chapters[chapter]['title'])), chapters[chapter]['title'])
for delimiter in delimiters:
if delimiter['multi']:
document = document.replace(delimiter['start'] + keyword + delimiter['end'],
delimiter['start'] + '<span class="dropdown">' +
'<a class="dropdown-toggle" data-toggle="dropdown" href="#">' +
keyword +
'<span class="caret"></span>' +
'</a>' +
'<ul class="dropdown-menu">' +
'<li class="dropdown-header">This keyword is available in sections :</li>' +
chapter_list +
'</ul>' +
'</span>' + delimiter['end'])
else:
document = document.replace(delimiter['start'] + keyword + delimiter['end'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + keyword + '</a>' + delimiter['end'])
else:
for delimiter in delimiters:
document = document.replace(delimiter['start'] + keyword + delimiter['end'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + keyword + '</a>' + delimiter['end'])
if keyword.startswith("option "):
shortKeyword = keyword[len("option "):]
keywordsCount[shortKeyword] = 0
for delimiter in delimiters:
keywordsCount[keyword] += document.count(delimiter['start'] + shortKeyword + delimiter['end'])
if (shortKeyword in keyword_conflicts) and (not keywordsCount[shortKeyword]):
# The keyword is never used, we can remove it from the conflicts list
del keyword_conflicts[shortKeyword]
for delimiter in delimiters:
document = document.replace(delimiter['start'] + shortKeyword + delimiter['start'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + shortKeyword + '</a>' + delimiter['end'])
def documentAppend(text, retline = True):
global document
document += text
if retline:
document += "\n"
def init_parsers(pctxt):
return [
underline.Parser(pctxt),
arguments.Parser(pctxt),
seealso.Parser(pctxt),
example.Parser(pctxt),
table.Parser(pctxt),
underline.Parser(pctxt),
keyword.Parser(pctxt),
]
# The parser itself
def convert_all(infiles, outdir, base=''):
for infile in infiles:
outfile = os.path.join(
outdir,
os.path.basename(infile).replace(".txt", ".html")
)
convert(infile, outfile, base)
def convert(infile, outfile, base=''):
global document, keywords, keywordsCount, chapters, keyword_conflicts
if len(base) > 0 and base[:-1] != '/':
base += '/'
hasSummary = False
data = []
fd = file(infile,"r")
for line in fd:
line.replace("\t", " " * 8)
line = line.rstrip()
data.append(line)
fd.close()
pctxt = PContext(
TemplateLookup(
directories=[
'templates'
]
)
)
parsers = init_parsers(pctxt)
pctxt.context = {
'headers': {},
'document': "",
'base': base,
}
sections = []
currentSection = {
"details": getTitleDetails(""),
"content": "",
}
chapters = {}
keywords = {}
keywordsCount = {}
specialSections = {
"default": {
"hasKeywords": True,
},
"4.1": {
"hasKeywords": True,
},
}
pctxt.keywords = keywords
pctxt.keywordsCount = keywordsCount
pctxt.chapters = chapters
print >> sys.stderr, "Importing %s..." % infile
nblines = len(data)
i = j = 0
while i < nblines:
line = data[i].rstrip()
if i < nblines - 1:
next = data[i + 1].rstrip()
else:
next = ""
if (line == "Summary" or re.match("^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-') \
and ("-" * len(line)).startswith(next): # Fuzzy underline length detection
sections.append(currentSection)
currentSection = {
"details": getTitleDetails(line),
"content": "",
}
j = 0
i += 1 # Skip underline
while not data[i + 1].rstrip():
i += 1 # Skip empty lines
else:
if len(line) > 80:
print >> sys.stderr, "Line `%i' exceeds 80 columns" % (i + 1)
currentSection["content"] = currentSection["content"] + line + "\n"
j += 1
if currentSection["details"]["title"] == "Summary" and line != "":
hasSummary = True
# Learn chapters from the summary
details = getTitleDetails(line)
if details["chapter"]:
chapters[details["chapter"]] = details
i += 1
sections.append(currentSection)
chapterIndexes = sorted(chapters.keys(), key=lambda chapter: map(int, chapter.split('.')))
document = ""
# Complete the summary
for section in sections:
details = section["details"]
title = details["title"]
if title:
fulltitle = title
if details["chapter"]:
#documentAppend("<a name=\"%s\"></a>" % details["chapter"])
fulltitle = details["chapter"] + ". " + title
if not details["chapter"] in chapters:
print >> sys.stderr, "Adding '%s' to the summary" % details["title"]
chapters[details["chapter"]] = details
chapterIndexes = sorted(chapters.keys())
for section in sections:
details = section["details"]
pctxt.details = details
level = details["level"]
title = details["title"]
content = section["content"].rstrip()
print >> sys.stderr, "Parsing chapter %s..." % title
if (title == "Summary") or (title and not hasSummary):
summaryTemplate = pctxt.templates.get_template('summary.html')
documentAppend(summaryTemplate.render(
pctxt = pctxt,
chapters = chapters,
chapterIndexes = chapterIndexes,
))
if title and not hasSummary:
hasSummary = True
else:
continue
if title:
documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"]))
if level == 1:
documentAppend("<div class=\"page-header\">", False)
documentAppend('<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level))
if level == 1:
documentAppend("</div>", False)
if content:
if False and title:
# Display a navigation bar
documentAppend('<ul class="well pager">')
documentAppend('<li><a href="#top">Top</a></li>', False)
index = chapterIndexes.index(details["chapter"])
if index > 0:
documentAppend('<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False)
if index < len(chapterIndexes) - 1:
documentAppend('<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False)
documentAppend('</ul>', False)
content = cgi.escape(content, True)
content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content)
pctxt.set_content(content)
if not title:
lines = pctxt.get_lines()
pctxt.context['headers'] = {
'title': '',
'subtitle': '',
'version': '',
'author': '',
'date': ''
}
if re.match("^-+$", pctxt.get_line().strip()):
# Try to analyze the header of the file, assuming it follows
# those rules :
# - it begins with a "separator line" (several '-' chars)
# - then the document title
# - an optional subtitle
# - a new separator line
# - the version
# - the author
# - the date
pctxt.next()
pctxt.context['headers']['title'] = pctxt.get_line().strip()
pctxt.next()
subtitle = ""
while not re.match("^-+$", pctxt.get_line().strip()):
subtitle += " " + pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['subtitle'] += subtitle.strip()
if not pctxt.context['headers']['subtitle']:
# No subtitle, try to guess one from the title if it
# starts with the word "HAProxy"
if pctxt.context['headers']['title'].startswith('HAProxy '):
pctxt.context['headers']['subtitle'] = pctxt.context['headers']['title'][8:]
pctxt.context['headers']['title'] = 'HAProxy'
pctxt.next()
pctxt.context['headers']['version'] = pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['author'] = pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['date'] = pctxt.get_line().strip()
pctxt.next()
if HAPROXY_GIT_VERSION:
pctxt.context['headers']['version'] = 'version ' + HAPROXY_GIT_VERSION
# Skip header lines
pctxt.eat_lines()
pctxt.eat_empty_lines()
documentAppend('<div>', False)
delay = []
while pctxt.has_more_lines():
try:
specialSection = specialSections[details["chapter"]]
except:
specialSection = specialSections["default"]
line = pctxt.get_line()
if i < nblines - 1:
nextline = pctxt.get_line(1)
else:
nextline = ""
oldline = line
pctxt.stop = False
for parser in parsers:
line = parser.parse(line)
if pctxt.stop:
break
if oldline == line:
# nothing has changed,
# delays the rendering
if delay or line != "":
delay.append(line)
pctxt.next()
elif pctxt.stop:
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend(line, False)
else:
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend(line, True)
pctxt.next()
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend('</div>')
if not hasSummary:
summaryTemplate = pctxt.templates.get_template('summary.html')
print chapters
document = summaryTemplate.render(
pctxt = pctxt,
chapters = chapters,
chapterIndexes = chapterIndexes,
) + document
# Log warnings for keywords defined in several chapters
keyword_conflicts = {}
for keyword in keywords:
keyword_chapters = list(keywords[keyword])
keyword_chapters.sort()
if len(keyword_chapters) > 1:
print >> sys.stderr, 'Multi section keyword : "%s" in chapters %s' % (keyword, list(keyword_chapters))
keyword_conflicts[keyword] = keyword_chapters
keywords = list(keywords)
keywords.sort()
createLinks()
# Add the keywords conflicts to the keywords list to make them available in the search form
# And remove the original keyword which is now useless
for keyword in keyword_conflicts:
sections = keyword_conflicts[keyword]
offset = keywords.index(keyword)
for section in sections:
keywords.insert(offset, "%s (%s)" % (keyword, chapters[section]['title']))
offset += 1
keywords.remove(keyword)
print >> sys.stderr, "Exporting to %s..." % outfile
template = pctxt.templates.get_template('template.html')
try:
footerTemplate = pctxt.templates.get_template('footer.html')
footer = footerTemplate.render(
pctxt = pctxt,
headers = pctxt.context['headers'],
document = document,
chapters = chapters,
chapterIndexes = chapterIndexes,
keywords = keywords,
keywordsCount = keywordsCount,
keyword_conflicts = keyword_conflicts,
version = VERSION,
date = datetime.datetime.now().strftime("%Y/%m/%d"),
)
except TopLevelLookupException:
footer = ""
fd = open(outfile,'w')
print >> fd, template.render(
pctxt = pctxt,
headers = pctxt.context['headers'],
base = base,
document = document,
chapters = chapters,
chapterIndexes = chapterIndexes,
keywords = keywords,
keywordsCount = keywordsCount,
keyword_conflicts = keyword_conflicts,
version = VERSION,
date = datetime.datetime.now().strftime("%Y/%m/%d"),
footer = footer
)
fd.close()
if __name__ == '__main__':
main()
Delay the files rendering after everything is parsed
By delaying the rendering, it will allow to generate a navigation menu to
switch from one file to another, with titles guessed from the parsed data.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Cyril Bonté
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import cgi
import re
import time
import datetime
from optparse import OptionParser
from mako.template import Template
from mako.lookup import TemplateLookup
from mako.exceptions import TopLevelLookupException
from parser import PContext
from parser import remove_indent
from parser import *
from urllib import quote
VERSION = ""
HAPROXY_GIT_VERSION = False
def main():
global VERSION, HAPROXY_GIT_VERSION
usage="Usage: %prog [options] file..."
optparser = OptionParser(description='Generate HTML Document from HAProxy configuation.txt',
version=VERSION,
usage=usage)
optparser.add_option('--git-directory','-g', help='Optional git directory for input files, to determine haproxy details')
optparser.add_option('--output-directory','-o', default='.', help='Destination directory to store files, instead of the current working directory')
optparser.add_option('--base','-b', default = '', help='Base directory for relative links')
(option, files) = optparser.parse_args()
if not files:
optparser.print_help()
exit(1)
option.output_directory = os.path.abspath(option.output_directory)
if option.git_directory:
option.git_directory = os.path.abspath(option.git_directory)
os.chdir(os.path.dirname(__file__))
VERSION = get_git_version()
if not VERSION:
sys.exit(1)
HAPROXY_GIT_VERSION = get_haproxy_git_version(option.git_directory)
convert_all(files, option.output_directory, option.base)
# Temporarily determine the version from git to follow which commit generated
# the documentation
def get_git_version():
if not os.path.isdir(".git"):
print >> sys.stderr, "This does not appear to be a Git repository."
return
try:
p = subprocess.Popen(["git", "describe", "--tags", "--match", "v*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError:
print >> sys.stderr, "Unable to run git"
return
version = p.communicate()[0]
if p.returncode != 0:
print >> sys.stderr, "Unable to run git"
return
if len(version) < 2:
return
version = version[1:].strip()
version = re.sub(r'-g.*', '', version)
return version
def get_haproxy_git_version(path):
if not path:
return False
try:
p = subprocess.Popen(["git", "describe", "--tags", "--match", "v*"], cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError:
return False
version = p.communicate()[0]
if p.returncode != 0:
return False
if len(version) < 2:
return False
version = version[1:].strip()
version = re.sub(r'-g.*', '', version)
return version
def getTitleDetails(string):
array = string.split(".")
title = array.pop().strip()
chapter = ".".join(array)
level = max(1, len(array))
if array:
toplevel = array[0]
else:
toplevel = False
return {
"title" : title,
"chapter" : chapter,
"level" : level,
"toplevel": toplevel
}
# Parse the whole document to insert links on keywords
def createLinks():
global document, keywords, keywordsCount, keyword_conflicts, chapters
print >> sys.stderr, "Generating keywords links..."
delimiters = [
dict(start='"', end='"', multi=True ),
dict(start='- ' , end='\n' , multi=False),
]
for keyword in keywords:
keywordsCount[keyword] = 0
for delimiter in delimiters:
keywordsCount[keyword] += document.count(delimiter['start'] + keyword + delimiter['end'])
if (keyword in keyword_conflicts) and (not keywordsCount[keyword]):
# The keyword is never used, we can remove it from the conflicts list
del keyword_conflicts[keyword]
if keyword in keyword_conflicts:
chapter_list = ""
for chapter in keyword_conflicts[keyword]:
chapter_list += '<li><a href="#%s">%s</a></li>' % (quote("%s (%s)" % (keyword, chapters[chapter]['title'])), chapters[chapter]['title'])
for delimiter in delimiters:
if delimiter['multi']:
document = document.replace(delimiter['start'] + keyword + delimiter['end'],
delimiter['start'] + '<span class="dropdown">' +
'<a class="dropdown-toggle" data-toggle="dropdown" href="#">' +
keyword +
'<span class="caret"></span>' +
'</a>' +
'<ul class="dropdown-menu">' +
'<li class="dropdown-header">This keyword is available in sections :</li>' +
chapter_list +
'</ul>' +
'</span>' + delimiter['end'])
else:
document = document.replace(delimiter['start'] + keyword + delimiter['end'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + keyword + '</a>' + delimiter['end'])
else:
for delimiter in delimiters:
document = document.replace(delimiter['start'] + keyword + delimiter['end'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + keyword + '</a>' + delimiter['end'])
if keyword.startswith("option "):
shortKeyword = keyword[len("option "):]
keywordsCount[shortKeyword] = 0
for delimiter in delimiters:
keywordsCount[keyword] += document.count(delimiter['start'] + shortKeyword + delimiter['end'])
if (shortKeyword in keyword_conflicts) and (not keywordsCount[shortKeyword]):
# The keyword is never used, we can remove it from the conflicts list
del keyword_conflicts[shortKeyword]
for delimiter in delimiters:
document = document.replace(delimiter['start'] + shortKeyword + delimiter['start'], delimiter['start'] + '<a href="#' + quote(keyword) + '">' + shortKeyword + '</a>' + delimiter['end'])
def documentAppend(text, retline = True):
global document
document += text
if retline:
document += "\n"
def init_parsers(pctxt):
return [
underline.Parser(pctxt),
arguments.Parser(pctxt),
seealso.Parser(pctxt),
example.Parser(pctxt),
table.Parser(pctxt),
underline.Parser(pctxt),
keyword.Parser(pctxt),
]
# The parser itself
def convert_all(infiles, outdir, base=''):
converted = []
for infile in infiles:
outfile = os.path.join(
outdir,
os.path.basename(infile).replace(".txt", ".html")
)
pctxt = PContext(
TemplateLookup(
directories=[
'templates'
]
)
)
converted.append((outfile, convert(pctxt, infile, outfile, base)))
for item in converted:
outfile, data = item
print >> sys.stderr, "Exporting to %s..." % outfile
template = pctxt.templates.get_template('template.html')
with open(outfile,'w') as fd:
print >> fd, template.render(**data)
def convert(pctxt, infile, outfile, base=''):
global document, keywords, keywordsCount, chapters, keyword_conflicts
if len(base) > 0 and base[:-1] != '/':
base += '/'
hasSummary = False
data = []
fd = file(infile,"r")
for line in fd:
line.replace("\t", " " * 8)
line = line.rstrip()
data.append(line)
fd.close()
parsers = init_parsers(pctxt)
pctxt.context = {
'headers': {},
'document': "",
'base': base,
}
sections = []
currentSection = {
"details": getTitleDetails(""),
"content": "",
}
chapters = {}
keywords = {}
keywordsCount = {}
specialSections = {
"default": {
"hasKeywords": True,
},
"4.1": {
"hasKeywords": True,
},
}
pctxt.keywords = keywords
pctxt.keywordsCount = keywordsCount
pctxt.chapters = chapters
print >> sys.stderr, "Importing %s..." % infile
nblines = len(data)
i = j = 0
while i < nblines:
line = data[i].rstrip()
if i < nblines - 1:
next = data[i + 1].rstrip()
else:
next = ""
if (line == "Summary" or re.match("^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-') \
and ("-" * len(line)).startswith(next): # Fuzzy underline length detection
sections.append(currentSection)
currentSection = {
"details": getTitleDetails(line),
"content": "",
}
j = 0
i += 1 # Skip underline
while not data[i + 1].rstrip():
i += 1 # Skip empty lines
else:
if len(line) > 80:
print >> sys.stderr, "Line `%i' exceeds 80 columns" % (i + 1)
currentSection["content"] = currentSection["content"] + line + "\n"
j += 1
if currentSection["details"]["title"] == "Summary" and line != "":
hasSummary = True
# Learn chapters from the summary
details = getTitleDetails(line)
if details["chapter"]:
chapters[details["chapter"]] = details
i += 1
sections.append(currentSection)
chapterIndexes = sorted(chapters.keys(), key=lambda chapter: map(int, chapter.split('.')))
document = ""
# Complete the summary
for section in sections:
details = section["details"]
title = details["title"]
if title:
fulltitle = title
if details["chapter"]:
#documentAppend("<a name=\"%s\"></a>" % details["chapter"])
fulltitle = details["chapter"] + ". " + title
if not details["chapter"] in chapters:
print >> sys.stderr, "Adding '%s' to the summary" % details["title"]
chapters[details["chapter"]] = details
chapterIndexes = sorted(chapters.keys())
for section in sections:
details = section["details"]
pctxt.details = details
level = details["level"]
title = details["title"]
content = section["content"].rstrip()
print >> sys.stderr, "Parsing chapter %s..." % title
if (title == "Summary") or (title and not hasSummary):
summaryTemplate = pctxt.templates.get_template('summary.html')
documentAppend(summaryTemplate.render(
pctxt = pctxt,
chapters = chapters,
chapterIndexes = chapterIndexes,
))
if title and not hasSummary:
hasSummary = True
else:
continue
if title:
documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"]))
if level == 1:
documentAppend("<div class=\"page-header\">", False)
documentAppend('<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level))
if level == 1:
documentAppend("</div>", False)
if content:
if False and title:
# Display a navigation bar
documentAppend('<ul class="well pager">')
documentAppend('<li><a href="#top">Top</a></li>', False)
index = chapterIndexes.index(details["chapter"])
if index > 0:
documentAppend('<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False)
if index < len(chapterIndexes) - 1:
documentAppend('<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False)
documentAppend('</ul>', False)
content = cgi.escape(content, True)
content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content)
pctxt.set_content(content)
if not title:
lines = pctxt.get_lines()
pctxt.context['headers'] = {
'title': '',
'subtitle': '',
'version': '',
'author': '',
'date': ''
}
if re.match("^-+$", pctxt.get_line().strip()):
# Try to analyze the header of the file, assuming it follows
# those rules :
# - it begins with a "separator line" (several '-' chars)
# - then the document title
# - an optional subtitle
# - a new separator line
# - the version
# - the author
# - the date
pctxt.next()
pctxt.context['headers']['title'] = pctxt.get_line().strip()
pctxt.next()
subtitle = ""
while not re.match("^-+$", pctxt.get_line().strip()):
subtitle += " " + pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['subtitle'] += subtitle.strip()
if not pctxt.context['headers']['subtitle']:
# No subtitle, try to guess one from the title if it
# starts with the word "HAProxy"
if pctxt.context['headers']['title'].startswith('HAProxy '):
pctxt.context['headers']['subtitle'] = pctxt.context['headers']['title'][8:]
pctxt.context['headers']['title'] = 'HAProxy'
pctxt.next()
pctxt.context['headers']['version'] = pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['author'] = pctxt.get_line().strip()
pctxt.next()
pctxt.context['headers']['date'] = pctxt.get_line().strip()
pctxt.next()
if HAPROXY_GIT_VERSION:
pctxt.context['headers']['version'] = 'version ' + HAPROXY_GIT_VERSION
# Skip header lines
pctxt.eat_lines()
pctxt.eat_empty_lines()
documentAppend('<div>', False)
delay = []
while pctxt.has_more_lines():
try:
specialSection = specialSections[details["chapter"]]
except:
specialSection = specialSections["default"]
line = pctxt.get_line()
if i < nblines - 1:
nextline = pctxt.get_line(1)
else:
nextline = ""
oldline = line
pctxt.stop = False
for parser in parsers:
line = parser.parse(line)
if pctxt.stop:
break
if oldline == line:
# nothing has changed,
# delays the rendering
if delay or line != "":
delay.append(line)
pctxt.next()
elif pctxt.stop:
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend(line, False)
else:
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend(line, True)
pctxt.next()
while delay and delay[-1].strip() == "":
del delay[-1]
if delay:
remove_indent(delay)
documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False)
delay = []
documentAppend('</div>')
if not hasSummary:
summaryTemplate = pctxt.templates.get_template('summary.html')
print chapters
document = summaryTemplate.render(
pctxt = pctxt,
chapters = chapters,
chapterIndexes = chapterIndexes,
) + document
# Log warnings for keywords defined in several chapters
keyword_conflicts = {}
for keyword in keywords:
keyword_chapters = list(keywords[keyword])
keyword_chapters.sort()
if len(keyword_chapters) > 1:
print >> sys.stderr, 'Multi section keyword : "%s" in chapters %s' % (keyword, list(keyword_chapters))
keyword_conflicts[keyword] = keyword_chapters
keywords = list(keywords)
keywords.sort()
createLinks()
# Add the keywords conflicts to the keywords list to make them available in the search form
# And remove the original keyword which is now useless
for keyword in keyword_conflicts:
sections = keyword_conflicts[keyword]
offset = keywords.index(keyword)
for section in sections:
keywords.insert(offset, "%s (%s)" % (keyword, chapters[section]['title']))
offset += 1
keywords.remove(keyword)
try:
footerTemplate = pctxt.templates.get_template('footer.html')
footer = footerTemplate.render(
pctxt = pctxt,
headers = pctxt.context['headers'],
document = document,
chapters = chapters,
chapterIndexes = chapterIndexes,
keywords = keywords,
keywordsCount = keywordsCount,
keyword_conflicts = keyword_conflicts,
version = VERSION,
date = datetime.datetime.now().strftime("%Y/%m/%d"),
)
except TopLevelLookupException:
footer = ""
return {
'pctxt': pctxt,
'headers': pctxt.context['headers'],
'base': base,
'document': document,
'chapters': chapters,
'chapterIndexes': chapterIndexes,
'keywords': keywords,
'keywordsCount': keywordsCount,
'keyword_conflicts': keyword_conflicts,
'version': VERSION,
'date': datetime.datetime.now().strftime("%Y/%m/%d"),
'footer': footer
}
if __name__ == '__main__':
main()
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
import glob
import logging
import os
import os.path
import re
import shutil
import six
import tempfile
import time
import yaml
from heatclient.common import template_utils
from heatclient import exc as hc_exc
from keystoneclient import exceptions as kscexc
from os_cloud_config import keystone
from os_cloud_config import keystone_pki
from os_cloud_config.utils import clients as occ_clients
from osc_lib.command import command
from osc_lib import exceptions as oscexc
from osc_lib.i18n import _
from osc_lib import utils as osc_utils
from swiftclient.exceptions import ClientException
from tripleo_common import update
from tripleoclient import constants
from tripleoclient import exceptions
from tripleoclient import utils
from tripleoclient.workflows import deployment
from tripleoclient.workflows import parameters as workflow_params
from tripleoclient.workflows import plan_management
class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def _update_parameters(self, args, network_client, stack):
parameters = {}
stack_is_new = stack is None
timestamp = int(time.time())
parameters['DeployIdentifier'] = timestamp
parameters['UpdateIdentifier'] = ''
parameters['StackAction'] = 'CREATE' if stack_is_new else 'UPDATE'
# Update parameters from answers file:
if args.answers_file is not None:
with open(args.answers_file, 'r') as answers_file:
answers = yaml.load(answers_file)
if args.templates is None:
args.templates = answers['templates']
if 'environments' in answers:
if args.environment_files is not None:
answers['environments'].extend(args.environment_files)
args.environment_files = answers['environments']
param_args = (
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
)
if stack_is_new:
new_stack_args = (
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = int(parameters.get('ControllerCount', 0))
dhcp_agents_per_network = (min(number_controllers, 3) if
number_controllers else 1)
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
return parameters
def _create_registration_env(self, args):
tht_root = args.templates
env_file = os.path.join(
tht_root,
constants.RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(
tht_root,
constants.RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = {'rhel_reg_method': args.reg_method,
'rhel_reg_org': args.reg_org,
'rhel_reg_force': args.reg_force,
'rhel_reg_sat_url': args.reg_sat_url,
'rhel_reg_activation_key': args.reg_activation_key}
return [registry, env_file], {"parameter_defaults": user_env}
def _create_parameters_env(self, parameters):
parameter_defaults = {"parameter_defaults": parameters}
return parameter_defaults
def _process_multiple_environments(self, created_env_files, added_files,
tht_root, user_tht_root, cleanup=True):
env_files = {}
localenv = {}
for env_path in created_env_files:
self.log.debug("Processing environment files %s" % env_path)
abs_env_path = os.path.abspath(env_path)
if abs_env_path.startswith(user_tht_root):
new_env_path = abs_env_path.replace(user_tht_root, tht_root)
self.log.debug("Redirecting env file %s to %s"
% (abs_env_path, new_env_path))
env_path = new_env_path
try:
files, env = template_utils.process_environment_and_files(
env_path=env_path)
except hc_exc.CommandError as ex:
# This provides fallback logic so that we can reference files
# inside the resource_registry values that may be rendered via
# j2.yaml templates, where the above will fail because the
# file doesn't exist in user_tht_root, but it is in tht_root
# See bug https://bugs.launchpad.net/tripleo/+bug/1625783
# for details on why this is needed (backwards-compatibility)
self.log.debug("Error %s processing environment file %s"
% (six.text_type(ex), env_path))
with open(abs_env_path, 'r') as f:
env_map = yaml.safe_load(f)
env_registry = env_map.get('resource_registry', {})
env_dirname = os.path.dirname(os.path.abspath(env_path))
for rsrc, rsrc_path in six.iteritems(env_registry):
# We need to calculate the absolute path relative to
# env_path not cwd (which is what abspath uses).
abs_rsrc_path = os.path.normpath(
os.path.join(env_dirname, rsrc_path))
# If the absolute path matches user_tht_root, rewrite
# a temporary environment pointing at tht_root instead
if abs_rsrc_path.startswith(user_tht_root):
new_rsrc_path = abs_rsrc_path.replace(user_tht_root,
tht_root)
self.log.debug("Rewriting %s %s path to %s"
% (env_path, rsrc, new_rsrc_path))
env_registry[rsrc] = new_rsrc_path
else:
env_registry[rsrc] = rsrc_path
env_map['resource_registry'] = env_registry
f_name = os.path.basename(os.path.splitext(abs_env_path)[0])
with tempfile.NamedTemporaryFile(dir=tht_root,
prefix="env-%s-" % f_name,
suffix=".yaml",
mode="w",
delete=cleanup) as f:
self.log.debug("Rewriting %s environment to %s"
% (env_path, f.name))
f.write(yaml.safe_dump(env_map, default_flow_style=False))
f.flush()
files, env = template_utils.process_environment_and_files(
env_path=f.name)
if files:
self.log.debug("Adding files %s for %s" % (files, env_path))
env_files.update(files)
# 'env' can be a deeply nested dictionary, so a simple update is
# not enough
localenv = template_utils.deep_update(localenv, env)
return env_files, localenv
def _heat_deploy(self, stack, stack_name, template_path, parameters,
env_files, timeout, tht_root, env, update_plan_only):
"""Verify the Baremetal nodes are available and do a stack update"""
clients = self.app.client_manager
workflow_client = clients.workflow_engine
if stack:
update.add_breakpoints_cleanup_into_env(env)
self.log.debug("Getting template contents from plan %s" % stack_name)
# We need to reference the plan here, not the local
# tht root, as we need template_object to refer to
# the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml
# FIXME(shardy) we need to move more of this into mistral actions
plan_yaml_path = os.path.relpath(template_path, tht_root)
# heatclient template_utils needs a function that can
# retrieve objects from a container by name/path
objectclient = clients.tripleoclient.object_store
def do_object_request(method='GET', object_path=None):
obj = objectclient.get_object(stack_name, object_path)
return obj and obj[1]
template_files, template = template_utils.get_template_contents(
template_object=plan_yaml_path,
object_request=do_object_request)
files = dict(list(template_files.items()) + list(env_files.items()))
number_controllers = int(parameters.get('ControllerCount', 0))
if number_controllers > 1:
if not env.get('parameter_defaults').get('NtpServer'):
raise exceptions.InvalidConfiguration(
'Specify --ntp-server as parameter or NtpServer in '
'environments when using multiple controllers '
'(with HA).')
clients = self.app.client_manager
moved_files = self._upload_missing_files(
stack_name, objectclient, files, tht_root)
self._process_and_upload_environment(
stack_name, objectclient, env, moved_files, tht_root,
workflow_client)
if not update_plan_only:
deployment.deploy_and_wait(self.log, clients, stack, stack_name,
self.app_args.verbose_level, timeout)
def _load_environment_directories(self, directories):
if os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY'):
directories.append(os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY'))
environments = []
for d in directories:
if os.path.exists(d) and d != '.':
self.log.debug("Environment directory: %s" % d)
for f in sorted(glob.glob(os.path.join(d, '*.yaml'))):
self.log.debug("Environment directory file: %s" % f)
if os.path.isfile(f):
environments.append(f)
return environments
def _process_and_upload_environment(self, container_name, swift_client,
env, moved_files, tht_root, mistral):
"""Process the environment and upload to Swift
The environment at this point should be the result of the merged
custom user environments. We need to look at the paths in the
environment and update any that changed when they were uploaded to
swift.
"""
file_prefix = "file://"
if 'resource_registry' in env:
for name, path in env['resource_registry'].items():
if not isinstance(path, six.string_types):
continue
if path in moved_files:
new_path = moved_files[path]
env['resource_registry'][name] = new_path
elif path.startswith(file_prefix):
path = path[len(file_prefix):]
if path.startswith(tht_root):
path = path[len(tht_root):]
# We want to make sure all the paths are relative.
if path.startswith("/"):
path = path[1:]
env['resource_registry'][name] = path
# Parameters are removed from the environment and sent to the update
# parameters action, this stores them in the Mistral environment and
# means the UI can find them.
if 'parameter_defaults' in env:
params = env.pop('parameter_defaults')
workflow_params.update_parameters(
mistral, container=container_name, parameters=params)
contents = yaml.safe_dump(env)
# Until we have a well defined plan update workflow in tripleo-common
# we need to manually add an environment in swift and mistral for users
# custom environments passed to the deploy command.
# See bug: https://bugs.launchpad.net/tripleo/+bug/1623431
swift_path = "user-environment.yaml"
swift_client.put_object(container_name, swift_path, contents)
mistral_env = mistral.environments.get(container_name)
user_env = {'path': swift_path}
if user_env not in mistral_env.variables['environments']:
mistral_env.variables['environments'].append(user_env)
mistral.environments.update(
name=container_name,
variables=mistral_env.variables
)
def _upload_missing_files(self, container_name, swift_client, files_dict,
tht_root):
"""Find the files referenced in custom environments and upload them
Heat environments can be passed to be included in the deployment, these
files can include references to other files anywhere on the local
file system. These need to be discovered and uploaded to Swift. When
they have been uploaded to Swift the path to them will be different,
the new paths are store din the file_relocation dict, which is returned
and used by _process_and_upload_environment which will merge the
environment and update paths to the relative Swift path.
"""
file_relocation = {}
file_prefix = "file://"
# select files files for relocation & upload
for fullpath in files_dict.keys():
if not fullpath.startswith(file_prefix):
continue
path = fullpath[len(file_prefix):]
if path.startswith(tht_root):
# This should already be uploaded.
continue
file_relocation[fullpath] = "user-files/{}".format(path[1:])
# make sure links within files point to new locations, and upload them
for orig_path, reloc_path in file_relocation.items():
link_replacement = utils.relative_link_replacement(
file_relocation, os.path.dirname(reloc_path))
contents = utils.replace_links_in_template_contents(
files_dict[orig_path], link_replacement)
swift_client.put_object(container_name, reloc_path, contents)
return file_relocation
def _download_missing_files_from_plan(self, tht_dir, plan_name):
# get and download missing files into tmp directory
clients = self.app.client_manager
objectclient = clients.tripleoclient.object_store
plan_list = objectclient.get_container(plan_name)
plan_filenames = [f['name'] for f in plan_list[1]]
added_files = {}
for pf in plan_filenames:
file_path = os.path.join(tht_dir, pf)
if not os.path.isfile(file_path):
self.log.debug("Missing in templates directory, downloading \
%s from swift into %s" % (pf, file_path))
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, 'w') as f:
f.write(objectclient.get_object(plan_name, pf)[1])
added_files[pf] = file_path
self.log.debug("added_files = %s" % added_files)
return added_files
def _deploy_tripleo_heat_templates_tmpdir(self, stack, parsed_args):
# copy tht_root to temporary directory because we need to
# download any missing (e.g j2 rendered) files from the plan
tht_root = os.path.abspath(parsed_args.templates)
tht_tmp = tempfile.mkdtemp(prefix='tripleoclient-')
new_tht_root = "%s/tripleo-heat-templates" % tht_tmp
self.log.debug("Creating temporary templates tree in %s"
% new_tht_root)
try:
shutil.copytree(tht_root, new_tht_root, symlinks=True)
self._deploy_tripleo_heat_templates(stack, parsed_args,
new_tht_root, tht_root)
finally:
if parsed_args.no_cleanup:
self.log.warning("Not cleaning temporary directory %s"
% tht_tmp)
else:
shutil.rmtree(tht_tmp)
def _deploy_tripleo_heat_templates(self, stack, parsed_args,
tht_root, user_tht_root):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
workflow_client = clients.workflow_engine
parameters = self._update_parameters(
parsed_args, network_client, stack)
plans = plan_management.list_deployment_plans(workflow_client)
# TODO(d0ugal): We need to put a more robust strategy in place here to
# handle updating plans.
if parsed_args.stack in plans:
# Upload the new plan templates to swift to replace the existing
# templates.
plan_management.update_plan_from_templates(
clients, parsed_args.stack, tht_root, parsed_args.roles_file)
else:
plan_management.create_plan_from_templates(
clients, parsed_args.stack, tht_root, parsed_args.roles_file)
# Get any missing (e.g j2 rendered) files from the plan to tht_root
added_files = self._download_missing_files_from_plan(
tht_root, parsed_args.stack)
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
# TODO(jprovazn): env file generated by create_environment_file()
# is not very usable any more, scale params are included in
# parameters and keystone cert is generated on create only
env_path = utils.create_environment_file()
env = {}
created_env_files = []
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
created_env_files.append(env_path)
if parsed_args.environment_directories:
created_env_files.extend(self._load_environment_directories(
parsed_args.environment_directories))
env.update(self._create_parameters_env(parameters))
if parsed_args.rhel_reg:
reg_env_files, reg_env = self._create_registration_env(parsed_args)
created_env_files.extend(reg_env_files)
template_utils.deep_update(env, reg_env)
if parsed_args.environment_files:
created_env_files.extend(parsed_args.environment_files)
self.log.debug("Processing environment files %s" % created_env_files)
env_files, localenv = self._process_multiple_environments(
created_env_files, added_files, tht_root, user_tht_root,
cleanup=not parsed_args.no_cleanup)
template_utils.deep_update(env, localenv)
self._try_overcloud_deploy_with_compat_yaml(
tht_root, stack, parsed_args.stack, parameters, env_files,
parsed_args.timeout, env, parsed_args.update_plan_only)
def _try_overcloud_deploy_with_compat_yaml(self, tht_root, stack,
stack_name, parameters,
env_files, timeout,
env, update_plan_only):
overcloud_yaml = os.path.join(tht_root, constants.OVERCLOUD_YAML_NAME)
try:
self._heat_deploy(stack, stack_name, overcloud_yaml,
parameters, env_files, timeout,
tht_root, env, update_plan_only)
except ClientException as e:
messages = 'Failed to deploy: %s' % str(e)
raise ValueError(messages)
def _is_tls_enabled(self, overcloud_endpoint):
return overcloud_endpoint.startswith('https')
def _keystone_init(self, overcloud_endpoint, overcloud_ip_or_fqdn,
parsed_args, stack):
keystone_admin_ip = utils.get_endpoint('KeystoneAdmin', stack)
keystone_admin_ip = utils.unbracket_ipv6(keystone_admin_ip)
keystone_internal_ip = utils.get_endpoint('KeystoneInternal', stack)
keystone_internal_ip = utils.unbracket_ipv6(keystone_internal_ip)
tls_enabled = self._is_tls_enabled(overcloud_endpoint)
keystone_tls_host = None
if tls_enabled:
# NOTE(jaosorior): This triggers set up the keystone endpoint with
# the https protocol and the required port set in
# keystone.initialize.
keystone_tls_host = overcloud_ip_or_fqdn
keystone_client = occ_clients.get_keystone_client(
'admin',
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminPassword'),
'admin',
overcloud_endpoint)
services = {}
for service, data in six.iteritems(constants.SERVICE_LIST):
try:
keystone_client.services.find(name=service)
except kscexc.NotFound:
service_data = self._set_service_data(service, data, stack)
if service_data:
services.update({service: service_data})
if services:
# This was deprecated in Newton. The deprecation message and
# os-cloud-config keystone init should remain until at least the
# Pike release to ensure users have a chance to update their
# templates, including ones for the previous release.
self.log.warning('DEPRECATED: '
'It appears Keystone was not initialized by '
'Puppet. Will do initialization via '
'os-cloud-config, but this behavior is '
'deprecated. Please update your templates to a '
'version that has Puppet initialization of '
'Keystone.'
)
# NOTE(jaosorior): These ports will be None if the templates
# don't support the EndpointMap as an output yet. And so the
# default values will be taken.
public_port = None
admin_port = None
internal_port = None
endpoint_map = utils.get_endpoint_map(stack)
if endpoint_map:
public_port = endpoint_map.get('KeystonePublic').get('port')
admin_port = endpoint_map.get('KeystoneAdmin').get('port')
internal_port = endpoint_map.get(
'KeystoneInternal').get('port')
# TODO(rbrady): check usages of get_password
keystone.initialize(
keystone_admin_ip,
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminToken'),
'admin@example.com',
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminPassword'),
ssl=keystone_tls_host,
public=overcloud_ip_or_fqdn,
user=parsed_args.overcloud_ssh_user,
admin=keystone_admin_ip,
internal=keystone_internal_ip,
public_port=public_port,
admin_port=admin_port,
internal_port=internal_port)
if not tls_enabled:
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.items():
data.pop('ssl_port', None)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip_or_fqdn)
# End of deprecated Keystone init
def _set_service_data(self, service, data, stack):
self.log.debug("Setting data for service '%s'" % service)
service_data = data.copy()
service_data.pop('password_field', None)
endpoint_map = utils.get_endpoint_map(stack)
try:
service_data.update(
self._get_base_service_data(service, data, stack))
except KeyError:
output_source = "service IPs"
if endpoint_map:
output_source = "endpoint map"
self.log.debug(
("Skipping \"{}\" postconfig because it wasn't found in the "
"{} output").format(service, output_source))
return None
if not endpoint_map:
return service_data
service_data.update(self._get_endpoint_data(service, endpoint_map,
stack))
return service_data
def _get_base_service_data(self, service, data, stack):
service_data = {}
password_field = data.get('password_field')
if password_field:
service_data['password'] = utils.get_password(
self.app.client_manager,
stack.stack_name,
password_field)
# Set internal endpoint
service_name_internal = self._format_endpoint_name(service, 'internal')
service_data['internal_host'] = utils.get_endpoint(
service_name_internal, stack)
return service_data
def _get_endpoint_data(self, service, endpoint_map, stack):
endpoint_data = {}
# Set standard port
service_name_internal = self._format_endpoint_name(service, 'internal')
endpoint_data['port'] = endpoint_map[service_name_internal]['port']
# Set public endpoint
service_name_public = self._format_endpoint_name(service, 'public')
public_endpoint_data = endpoint_map.get(service_name_public)
endpoint_data['public_host'] = public_endpoint_data['host']
# Set SSL port
if public_endpoint_data['uri'].startswith('https'):
endpoint_data['ssl_port'] = public_endpoint_data['port']
return endpoint_data
def _format_endpoint_name(self, service, interface):
return re.sub('v[0-9]+', '',
service.capitalize() + interface.capitalize())
def _endpoints_managed(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'ManagedEndpoints':
# NOTE(jaosorior): We don't really care about the value as
# long as the key is there.
return output['output_value']
return False
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
overcloud_endpoint = utils.get_overcloud_endpoint(stack)
# NOTE(jaosorior): The overcloud endpoint can contain an IP address or
# an FQDN depending on how what it's configured to output in the
# tripleo-heat-templates. Such a configuration can be done by
# overriding the EndpointMap through parameter_defaults.
overcloud_ip_or_fqdn = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
keystone_admin_ip = utils.get_endpoint('KeystoneAdmin', stack)
no_proxy = os.environ.get('no_proxy', overcloud_ip_or_fqdn)
no_proxy_list = map(utils.bracket_ipv6,
[no_proxy, overcloud_ip_or_fqdn,
keystone_admin_ip])
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy_list if x is not None])
utils.remove_known_hosts(overcloud_ip_or_fqdn)
if not self._endpoints_managed(stack):
self._keystone_init(overcloud_endpoint, overcloud_ip_or_fqdn,
parsed_args, stack)
else:
self.log.debug("Keystone endpoints and services are managed by "
"puppet. Skipping post-config.")
def _validate_args(self, parsed_args):
if parsed_args.templates is None and parsed_args.answers_file is None:
raise oscexc.CommandError(
"You must specify either --templates or --answers-file")
if parsed_args.environment_files:
nonexisting_envs = []
jinja2_envs = []
for env_file in parsed_args.environment_files:
if env_file.endswith(".j2.yaml"):
jinja2_envs.append(env_file)
elif not os.path.isfile(env_file):
# Tolerate missing file if there's a j2.yaml file that will
# be rendered in the plan but not available locally (yet)
if not os.path.isfile(env_file.replace(".yaml",
".j2.yaml")):
nonexisting_envs.append(env_file)
if jinja2_envs:
rewritten_paths = [e.replace(".j2.yaml", ".yaml")
for e in jinja2_envs]
raise oscexc.CommandError(
"Error: The the following jinja2 files were provided: -e "
"{}. Did you mean -e {}?".format(
' -e '.join(jinja2_envs),
' -e '.join(rewritten_paths)))
if nonexisting_envs:
raise oscexc.CommandError(
"Error: The following files were not found: {0}".format(
", ".join(nonexisting_envs)))
def _get_default_role_counts(self, parsed_args):
if parsed_args.roles_file:
roles_data = yaml.safe_load(open(parsed_args.roles_file).read())
else:
# Assume default role counts
return {
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0
}
default_role_counts = {}
for r in roles_data:
count_default = r.get('CountDefault', 0)
default_role_counts.setdefault(
"%sCount" % r['name'],
count_default)
return default_role_counts
def _predeploy_verify_capabilities(self, stack, parameters, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.baremetal
self._check_boot_images()
flavors = self._collect_flavors(parsed_args)
self._check_ironic_boot_configuration(bm_client)
errors, warnings = utils.assign_and_verify_profiles(
bm_client, flavors,
assign_profiles=False,
dry_run=parsed_args.dry_run
)
self.predeploy_errors += errors
self.predeploy_warnings += warnings
compute_client = self.app.client_manager.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
self.log.error("Expected hypervisor stats not met")
self.predeploy_errors += 1
self.log.debug("Checking nodes count")
default_role_counts = self._get_default_role_counts(parsed_args)
enough_nodes, count, ironic_nodes_count = utils.check_nodes_count(
bm_client,
stack,
parameters,
default_role_counts
)
if not enough_nodes:
self.log.error(
"Not enough nodes - available: {0}, requested: {1}".format(
ironic_nodes_count, count))
self.predeploy_errors += 1
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError:
self.log.exception("Please make sure there is only one image "
"named 'bm-deploy-kernel' in glance.")
except oscexc.CommandError:
self.log.exception("Error finding 'bm-deploy-kernel' in "
"glance.")
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError:
self.log.exception("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
except oscexc.CommandError:
self.log.exception("Error finding 'bm-deploy-ramdisk' in "
"glance.")
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _collect_flavors(self, parsed_args):
"""Validate and collect nova flavors in use.
Ensure that selected flavors (--ROLE-flavor) are valid in nova.
Issue a warning of local boot is not set for a flavor.
:returns: dictionary flavor name -> (flavor object, scale)
"""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
result = {}
message = "Provided --{}-flavor, '{}', does not exist"
for target, (flavor_name, scale) in (
utils.get_roles_info(parsed_args).items()
):
if flavor_name is None or not scale:
self.log.debug("--{}-flavor not used".format(target))
continue
try:
flavor, old_scale = result[flavor_name]
except KeyError:
pass
else:
result[flavor_name] = (flavor, old_scale + scale)
continue
try:
flavor = flavors[flavor_name]
except KeyError:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor_name))
continue
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.warning(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor_name)
self.log.warning(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor_name)
result[flavor_name] = (flavor, scale)
return result
def _check_ironic_boot_configuration(self, bm_client):
for node in bm_client.node.list(detail=True, maintenance=False):
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_node_boot_configuration(node)
def _check_node_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=kernel_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
parser.add_argument(
'--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"),
)
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('--timeout', '-t', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
utils.add_deployment_plan_arguments(parser)
parser.add_argument('--libvirt-type',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type.'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes. '))
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'--overcloud-ssh-user',
default='heat-admin',
help=_('User for ssh access to overcloud nodes')
)
parser.add_argument(
'--environment-file', '-e', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--environment-directory', metavar='<HEAT ENVIRONMENT DIRECTORY>',
action='append', dest='environment_directories',
default=[os.path.join(os.environ.get('HOME', ''), '.tripleo',
'environments')],
help=_('Environment file directories that are automatically '
' added to the heat stack-create or heat stack-update'
' commands. Can be specified more than once. Files in'
' directories are loaded in ascending sort order.')
)
parser.add_argument(
'--roles-file', '-r', dest='roles_file',
help=_('Roles file, overrides the default %s in the --templates '
'directory') % constants.OVERCLOUD_ROLES_FILE
)
parser.add_argument(
'--no-cleanup', action='store_true',
help=_('Don\'t cleanup temporary files, just log their location')
)
parser.add_argument(
'--update-plan-only',
action='store_true',
help=_('Only update the plan. Do not perform the actual '
'deployment. NOTE: Will move to a discrete command in a '
'future release.')
)
parser.add_argument(
'--validation-errors-nonfatal',
dest='validation_errors_fatal',
action='store_false',
default=True,
help=_('Allow the deployment to continue in spite of validation '
'errors. Note that attempting deployment while errors '
'exist is likely to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help=_('Only run validations, but do not apply any changes.')
)
parser.add_argument(
'--skip-postconfig',
action='store_true',
default=False,
help=_('Skip the overcloud post-deployment configuration.')
)
parser.add_argument(
'--force-postconfig',
action='store_true',
default=False,
help=_('Force the overcloud post-deployment configuration.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
parser.add_argument(
'--answers-file',
help=_('Path to a YAML file with arguments and parameters.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
# Swiftclient logs things like 404s at error level, which is a problem
# because we use EAFP to check for the existence of files. Turn off
# most swiftclient logging to avoid cluttering up our output with
# pointless tracebacks.
sc_logger = logging.getLogger("swiftclient")
sc_logger.setLevel(logging.CRITICAL)
self._validate_args(parsed_args)
clients = self.app.client_manager
orchestration_client = clients.orchestration
stack = utils.get_stack(orchestration_client, parsed_args.stack)
if stack and stack.stack_status == 'IN_PROGRESS':
raise exceptions.StackInProgress(
"Unable to deploy as the stack '{}' status is '{}'".format(
stack.stack_name, stack.stack_status))
parameters = self._update_parameters(
parsed_args, clients.network, stack)
errors, warnings = self._predeploy_verify_capabilities(
stack, parameters, parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
stack_create = stack is None
if stack_create:
self.log.info("No stack found, will be doing a stack create")
else:
self.log.info("Stack found, will be doing a stack update")
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.dry_run:
print("Validation Finished")
return
self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = utils.get_stack(orchestration_client, parsed_args.stack)
# Force fetching of attributes
stack.get()
utils.create_overcloudrc(clients, stack, parsed_args.no_proxy)
utils.create_tempest_deployer_input()
# Run postconfig on create or force. Use force to makes sure endpoints
# are created with deploy reruns and upgrades
if (stack_create or parsed_args.force_postconfig
and not parsed_args.skip_postconfig):
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = utils.get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
Use the absolute path for temp templates
If the template is not in the user's t-h-t root, we should use the
absolute path instead of the one in the template since we've already
copied these files in the temp dir.
Change-Id: I9a575575fc3c0fc78848239fc7b663ca4508fcfa
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
import glob
import logging
import os
import os.path
import re
import shutil
import six
import tempfile
import time
import yaml
from heatclient.common import template_utils
from heatclient import exc as hc_exc
from keystoneclient import exceptions as kscexc
from os_cloud_config import keystone
from os_cloud_config import keystone_pki
from os_cloud_config.utils import clients as occ_clients
from osc_lib.command import command
from osc_lib import exceptions as oscexc
from osc_lib.i18n import _
from osc_lib import utils as osc_utils
from swiftclient.exceptions import ClientException
from tripleo_common import update
from tripleoclient import constants
from tripleoclient import exceptions
from tripleoclient import utils
from tripleoclient.workflows import deployment
from tripleoclient.workflows import parameters as workflow_params
from tripleoclient.workflows import plan_management
class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def _update_parameters(self, args, network_client, stack):
parameters = {}
stack_is_new = stack is None
timestamp = int(time.time())
parameters['DeployIdentifier'] = timestamp
parameters['UpdateIdentifier'] = ''
parameters['StackAction'] = 'CREATE' if stack_is_new else 'UPDATE'
# Update parameters from answers file:
if args.answers_file is not None:
with open(args.answers_file, 'r') as answers_file:
answers = yaml.load(answers_file)
if args.templates is None:
args.templates = answers['templates']
if 'environments' in answers:
if args.environment_files is not None:
answers['environments'].extend(args.environment_files)
args.environment_files = answers['environments']
param_args = (
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
)
if stack_is_new:
new_stack_args = (
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = int(parameters.get('ControllerCount', 0))
dhcp_agents_per_network = (min(number_controllers, 3) if
number_controllers else 1)
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
return parameters
def _create_registration_env(self, args):
tht_root = args.templates
env_file = os.path.join(
tht_root,
constants.RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(
tht_root,
constants.RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = {'rhel_reg_method': args.reg_method,
'rhel_reg_org': args.reg_org,
'rhel_reg_force': args.reg_force,
'rhel_reg_sat_url': args.reg_sat_url,
'rhel_reg_activation_key': args.reg_activation_key}
return [registry, env_file], {"parameter_defaults": user_env}
def _create_parameters_env(self, parameters):
parameter_defaults = {"parameter_defaults": parameters}
return parameter_defaults
def _process_multiple_environments(self, created_env_files, added_files,
tht_root, user_tht_root, cleanup=True):
env_files = {}
localenv = {}
for env_path in created_env_files:
self.log.debug("Processing environment files %s" % env_path)
abs_env_path = os.path.abspath(env_path)
if abs_env_path.startswith(user_tht_root):
new_env_path = abs_env_path.replace(user_tht_root, tht_root)
self.log.debug("Redirecting env file %s to %s"
% (abs_env_path, new_env_path))
env_path = new_env_path
try:
files, env = template_utils.process_environment_and_files(
env_path=env_path)
except hc_exc.CommandError as ex:
# This provides fallback logic so that we can reference files
# inside the resource_registry values that may be rendered via
# j2.yaml templates, where the above will fail because the
# file doesn't exist in user_tht_root, but it is in tht_root
# See bug https://bugs.launchpad.net/tripleo/+bug/1625783
# for details on why this is needed (backwards-compatibility)
self.log.debug("Error %s processing environment file %s"
% (six.text_type(ex), env_path))
with open(abs_env_path, 'r') as f:
env_map = yaml.safe_load(f)
env_registry = env_map.get('resource_registry', {})
env_dirname = os.path.dirname(os.path.abspath(env_path))
for rsrc, rsrc_path in six.iteritems(env_registry):
# We need to calculate the absolute path relative to
# env_path not cwd (which is what abspath uses).
abs_rsrc_path = os.path.normpath(
os.path.join(env_dirname, rsrc_path))
# If the absolute path matches user_tht_root, rewrite
# a temporary environment pointing at tht_root instead
if abs_rsrc_path.startswith(user_tht_root):
new_rsrc_path = abs_rsrc_path.replace(user_tht_root,
tht_root)
self.log.debug("Rewriting %s %s path to %s"
% (env_path, rsrc, new_rsrc_path))
env_registry[rsrc] = new_rsrc_path
else:
env_registry[rsrc] = abs_rsrc_path
env_map['resource_registry'] = env_registry
f_name = os.path.basename(os.path.splitext(abs_env_path)[0])
with tempfile.NamedTemporaryFile(dir=tht_root,
prefix="env-%s-" % f_name,
suffix=".yaml",
mode="w",
delete=cleanup) as f:
self.log.debug("Rewriting %s environment to %s"
% (env_path, f.name))
f.write(yaml.safe_dump(env_map, default_flow_style=False))
f.flush()
files, env = template_utils.process_environment_and_files(
env_path=f.name)
if files:
self.log.debug("Adding files %s for %s" % (files, env_path))
env_files.update(files)
# 'env' can be a deeply nested dictionary, so a simple update is
# not enough
localenv = template_utils.deep_update(localenv, env)
return env_files, localenv
def _heat_deploy(self, stack, stack_name, template_path, parameters,
env_files, timeout, tht_root, env, update_plan_only):
"""Verify the Baremetal nodes are available and do a stack update"""
clients = self.app.client_manager
workflow_client = clients.workflow_engine
if stack:
update.add_breakpoints_cleanup_into_env(env)
self.log.debug("Getting template contents from plan %s" % stack_name)
# We need to reference the plan here, not the local
# tht root, as we need template_object to refer to
# the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml
# FIXME(shardy) we need to move more of this into mistral actions
plan_yaml_path = os.path.relpath(template_path, tht_root)
# heatclient template_utils needs a function that can
# retrieve objects from a container by name/path
objectclient = clients.tripleoclient.object_store
def do_object_request(method='GET', object_path=None):
obj = objectclient.get_object(stack_name, object_path)
return obj and obj[1]
template_files, template = template_utils.get_template_contents(
template_object=plan_yaml_path,
object_request=do_object_request)
files = dict(list(template_files.items()) + list(env_files.items()))
number_controllers = int(parameters.get('ControllerCount', 0))
if number_controllers > 1:
if not env.get('parameter_defaults').get('NtpServer'):
raise exceptions.InvalidConfiguration(
'Specify --ntp-server as parameter or NtpServer in '
'environments when using multiple controllers '
'(with HA).')
clients = self.app.client_manager
moved_files = self._upload_missing_files(
stack_name, objectclient, files, tht_root)
self._process_and_upload_environment(
stack_name, objectclient, env, moved_files, tht_root,
workflow_client)
if not update_plan_only:
deployment.deploy_and_wait(self.log, clients, stack, stack_name,
self.app_args.verbose_level, timeout)
def _load_environment_directories(self, directories):
if os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY'):
directories.append(os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY'))
environments = []
for d in directories:
if os.path.exists(d) and d != '.':
self.log.debug("Environment directory: %s" % d)
for f in sorted(glob.glob(os.path.join(d, '*.yaml'))):
self.log.debug("Environment directory file: %s" % f)
if os.path.isfile(f):
environments.append(f)
return environments
def _process_and_upload_environment(self, container_name, swift_client,
env, moved_files, tht_root, mistral):
"""Process the environment and upload to Swift
The environment at this point should be the result of the merged
custom user environments. We need to look at the paths in the
environment and update any that changed when they were uploaded to
swift.
"""
file_prefix = "file://"
if 'resource_registry' in env:
for name, path in env['resource_registry'].items():
if not isinstance(path, six.string_types):
continue
if path in moved_files:
new_path = moved_files[path]
env['resource_registry'][name] = new_path
elif path.startswith(file_prefix):
path = path[len(file_prefix):]
if path.startswith(tht_root):
path = path[len(tht_root):]
# We want to make sure all the paths are relative.
if path.startswith("/"):
path = path[1:]
env['resource_registry'][name] = path
# Parameters are removed from the environment and sent to the update
# parameters action, this stores them in the Mistral environment and
# means the UI can find them.
if 'parameter_defaults' in env:
params = env.pop('parameter_defaults')
workflow_params.update_parameters(
mistral, container=container_name, parameters=params)
contents = yaml.safe_dump(env)
# Until we have a well defined plan update workflow in tripleo-common
# we need to manually add an environment in swift and mistral for users
# custom environments passed to the deploy command.
# See bug: https://bugs.launchpad.net/tripleo/+bug/1623431
swift_path = "user-environment.yaml"
swift_client.put_object(container_name, swift_path, contents)
mistral_env = mistral.environments.get(container_name)
user_env = {'path': swift_path}
if user_env not in mistral_env.variables['environments']:
mistral_env.variables['environments'].append(user_env)
mistral.environments.update(
name=container_name,
variables=mistral_env.variables
)
def _upload_missing_files(self, container_name, swift_client, files_dict,
tht_root):
"""Find the files referenced in custom environments and upload them
Heat environments can be passed to be included in the deployment, these
files can include references to other files anywhere on the local
file system. These need to be discovered and uploaded to Swift. When
they have been uploaded to Swift the path to them will be different,
the new paths are store din the file_relocation dict, which is returned
and used by _process_and_upload_environment which will merge the
environment and update paths to the relative Swift path.
"""
file_relocation = {}
file_prefix = "file://"
# select files files for relocation & upload
for fullpath in files_dict.keys():
if not fullpath.startswith(file_prefix):
continue
path = fullpath[len(file_prefix):]
if path.startswith(tht_root):
# This should already be uploaded.
continue
file_relocation[fullpath] = "user-files/{}".format(path[1:])
# make sure links within files point to new locations, and upload them
for orig_path, reloc_path in file_relocation.items():
link_replacement = utils.relative_link_replacement(
file_relocation, os.path.dirname(reloc_path))
contents = utils.replace_links_in_template_contents(
files_dict[orig_path], link_replacement)
swift_client.put_object(container_name, reloc_path, contents)
return file_relocation
def _download_missing_files_from_plan(self, tht_dir, plan_name):
# get and download missing files into tmp directory
clients = self.app.client_manager
objectclient = clients.tripleoclient.object_store
plan_list = objectclient.get_container(plan_name)
plan_filenames = [f['name'] for f in plan_list[1]]
added_files = {}
for pf in plan_filenames:
file_path = os.path.join(tht_dir, pf)
if not os.path.isfile(file_path):
self.log.debug("Missing in templates directory, downloading \
%s from swift into %s" % (pf, file_path))
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, 'w') as f:
f.write(objectclient.get_object(plan_name, pf)[1])
added_files[pf] = file_path
self.log.debug("added_files = %s" % added_files)
return added_files
def _deploy_tripleo_heat_templates_tmpdir(self, stack, parsed_args):
# copy tht_root to temporary directory because we need to
# download any missing (e.g j2 rendered) files from the plan
tht_root = os.path.abspath(parsed_args.templates)
tht_tmp = tempfile.mkdtemp(prefix='tripleoclient-')
new_tht_root = "%s/tripleo-heat-templates" % tht_tmp
self.log.debug("Creating temporary templates tree in %s"
% new_tht_root)
try:
shutil.copytree(tht_root, new_tht_root, symlinks=True)
self._deploy_tripleo_heat_templates(stack, parsed_args,
new_tht_root, tht_root)
finally:
if parsed_args.no_cleanup:
self.log.warning("Not cleaning temporary directory %s"
% tht_tmp)
else:
shutil.rmtree(tht_tmp)
def _deploy_tripleo_heat_templates(self, stack, parsed_args,
tht_root, user_tht_root):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
workflow_client = clients.workflow_engine
parameters = self._update_parameters(
parsed_args, network_client, stack)
plans = plan_management.list_deployment_plans(workflow_client)
# TODO(d0ugal): We need to put a more robust strategy in place here to
# handle updating plans.
if parsed_args.stack in plans:
# Upload the new plan templates to swift to replace the existing
# templates.
plan_management.update_plan_from_templates(
clients, parsed_args.stack, tht_root, parsed_args.roles_file)
else:
plan_management.create_plan_from_templates(
clients, parsed_args.stack, tht_root, parsed_args.roles_file)
# Get any missing (e.g j2 rendered) files from the plan to tht_root
added_files = self._download_missing_files_from_plan(
tht_root, parsed_args.stack)
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
# TODO(jprovazn): env file generated by create_environment_file()
# is not very usable any more, scale params are included in
# parameters and keystone cert is generated on create only
env_path = utils.create_environment_file()
env = {}
created_env_files = []
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
created_env_files.append(env_path)
if parsed_args.environment_directories:
created_env_files.extend(self._load_environment_directories(
parsed_args.environment_directories))
env.update(self._create_parameters_env(parameters))
if parsed_args.rhel_reg:
reg_env_files, reg_env = self._create_registration_env(parsed_args)
created_env_files.extend(reg_env_files)
template_utils.deep_update(env, reg_env)
if parsed_args.environment_files:
created_env_files.extend(parsed_args.environment_files)
self.log.debug("Processing environment files %s" % created_env_files)
env_files, localenv = self._process_multiple_environments(
created_env_files, added_files, tht_root, user_tht_root,
cleanup=not parsed_args.no_cleanup)
template_utils.deep_update(env, localenv)
self._try_overcloud_deploy_with_compat_yaml(
tht_root, stack, parsed_args.stack, parameters, env_files,
parsed_args.timeout, env, parsed_args.update_plan_only)
def _try_overcloud_deploy_with_compat_yaml(self, tht_root, stack,
stack_name, parameters,
env_files, timeout,
env, update_plan_only):
overcloud_yaml = os.path.join(tht_root, constants.OVERCLOUD_YAML_NAME)
try:
self._heat_deploy(stack, stack_name, overcloud_yaml,
parameters, env_files, timeout,
tht_root, env, update_plan_only)
except ClientException as e:
messages = 'Failed to deploy: %s' % str(e)
raise ValueError(messages)
def _is_tls_enabled(self, overcloud_endpoint):
return overcloud_endpoint.startswith('https')
def _keystone_init(self, overcloud_endpoint, overcloud_ip_or_fqdn,
parsed_args, stack):
keystone_admin_ip = utils.get_endpoint('KeystoneAdmin', stack)
keystone_admin_ip = utils.unbracket_ipv6(keystone_admin_ip)
keystone_internal_ip = utils.get_endpoint('KeystoneInternal', stack)
keystone_internal_ip = utils.unbracket_ipv6(keystone_internal_ip)
tls_enabled = self._is_tls_enabled(overcloud_endpoint)
keystone_tls_host = None
if tls_enabled:
# NOTE(jaosorior): This triggers set up the keystone endpoint with
# the https protocol and the required port set in
# keystone.initialize.
keystone_tls_host = overcloud_ip_or_fqdn
keystone_client = occ_clients.get_keystone_client(
'admin',
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminPassword'),
'admin',
overcloud_endpoint)
services = {}
for service, data in six.iteritems(constants.SERVICE_LIST):
try:
keystone_client.services.find(name=service)
except kscexc.NotFound:
service_data = self._set_service_data(service, data, stack)
if service_data:
services.update({service: service_data})
if services:
# This was deprecated in Newton. The deprecation message and
# os-cloud-config keystone init should remain until at least the
# Pike release to ensure users have a chance to update their
# templates, including ones for the previous release.
self.log.warning('DEPRECATED: '
'It appears Keystone was not initialized by '
'Puppet. Will do initialization via '
'os-cloud-config, but this behavior is '
'deprecated. Please update your templates to a '
'version that has Puppet initialization of '
'Keystone.'
)
# NOTE(jaosorior): These ports will be None if the templates
# don't support the EndpointMap as an output yet. And so the
# default values will be taken.
public_port = None
admin_port = None
internal_port = None
endpoint_map = utils.get_endpoint_map(stack)
if endpoint_map:
public_port = endpoint_map.get('KeystonePublic').get('port')
admin_port = endpoint_map.get('KeystoneAdmin').get('port')
internal_port = endpoint_map.get(
'KeystoneInternal').get('port')
# TODO(rbrady): check usages of get_password
keystone.initialize(
keystone_admin_ip,
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminToken'),
'admin@example.com',
utils.get_password(self.app.client_manager,
stack.stack_name,
'AdminPassword'),
ssl=keystone_tls_host,
public=overcloud_ip_or_fqdn,
user=parsed_args.overcloud_ssh_user,
admin=keystone_admin_ip,
internal=keystone_internal_ip,
public_port=public_port,
admin_port=admin_port,
internal_port=internal_port)
if not tls_enabled:
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.items():
data.pop('ssl_port', None)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip_or_fqdn)
# End of deprecated Keystone init
def _set_service_data(self, service, data, stack):
self.log.debug("Setting data for service '%s'" % service)
service_data = data.copy()
service_data.pop('password_field', None)
endpoint_map = utils.get_endpoint_map(stack)
try:
service_data.update(
self._get_base_service_data(service, data, stack))
except KeyError:
output_source = "service IPs"
if endpoint_map:
output_source = "endpoint map"
self.log.debug(
("Skipping \"{}\" postconfig because it wasn't found in the "
"{} output").format(service, output_source))
return None
if not endpoint_map:
return service_data
service_data.update(self._get_endpoint_data(service, endpoint_map,
stack))
return service_data
def _get_base_service_data(self, service, data, stack):
service_data = {}
password_field = data.get('password_field')
if password_field:
service_data['password'] = utils.get_password(
self.app.client_manager,
stack.stack_name,
password_field)
# Set internal endpoint
service_name_internal = self._format_endpoint_name(service, 'internal')
service_data['internal_host'] = utils.get_endpoint(
service_name_internal, stack)
return service_data
def _get_endpoint_data(self, service, endpoint_map, stack):
endpoint_data = {}
# Set standard port
service_name_internal = self._format_endpoint_name(service, 'internal')
endpoint_data['port'] = endpoint_map[service_name_internal]['port']
# Set public endpoint
service_name_public = self._format_endpoint_name(service, 'public')
public_endpoint_data = endpoint_map.get(service_name_public)
endpoint_data['public_host'] = public_endpoint_data['host']
# Set SSL port
if public_endpoint_data['uri'].startswith('https'):
endpoint_data['ssl_port'] = public_endpoint_data['port']
return endpoint_data
def _format_endpoint_name(self, service, interface):
return re.sub('v[0-9]+', '',
service.capitalize() + interface.capitalize())
def _endpoints_managed(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'ManagedEndpoints':
# NOTE(jaosorior): We don't really care about the value as
# long as the key is there.
return output['output_value']
return False
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
overcloud_endpoint = utils.get_overcloud_endpoint(stack)
# NOTE(jaosorior): The overcloud endpoint can contain an IP address or
# an FQDN depending on how what it's configured to output in the
# tripleo-heat-templates. Such a configuration can be done by
# overriding the EndpointMap through parameter_defaults.
overcloud_ip_or_fqdn = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
keystone_admin_ip = utils.get_endpoint('KeystoneAdmin', stack)
no_proxy = os.environ.get('no_proxy', overcloud_ip_or_fqdn)
no_proxy_list = map(utils.bracket_ipv6,
[no_proxy, overcloud_ip_or_fqdn,
keystone_admin_ip])
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy_list if x is not None])
utils.remove_known_hosts(overcloud_ip_or_fqdn)
if not self._endpoints_managed(stack):
self._keystone_init(overcloud_endpoint, overcloud_ip_or_fqdn,
parsed_args, stack)
else:
self.log.debug("Keystone endpoints and services are managed by "
"puppet. Skipping post-config.")
def _validate_args(self, parsed_args):
if parsed_args.templates is None and parsed_args.answers_file is None:
raise oscexc.CommandError(
"You must specify either --templates or --answers-file")
if parsed_args.environment_files:
nonexisting_envs = []
jinja2_envs = []
for env_file in parsed_args.environment_files:
if env_file.endswith(".j2.yaml"):
jinja2_envs.append(env_file)
elif not os.path.isfile(env_file):
# Tolerate missing file if there's a j2.yaml file that will
# be rendered in the plan but not available locally (yet)
if not os.path.isfile(env_file.replace(".yaml",
".j2.yaml")):
nonexisting_envs.append(env_file)
if jinja2_envs:
rewritten_paths = [e.replace(".j2.yaml", ".yaml")
for e in jinja2_envs]
raise oscexc.CommandError(
"Error: The the following jinja2 files were provided: -e "
"{}. Did you mean -e {}?".format(
' -e '.join(jinja2_envs),
' -e '.join(rewritten_paths)))
if nonexisting_envs:
raise oscexc.CommandError(
"Error: The following files were not found: {0}".format(
", ".join(nonexisting_envs)))
def _get_default_role_counts(self, parsed_args):
if parsed_args.roles_file:
roles_data = yaml.safe_load(open(parsed_args.roles_file).read())
else:
# Assume default role counts
return {
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0
}
default_role_counts = {}
for r in roles_data:
count_default = r.get('CountDefault', 0)
default_role_counts.setdefault(
"%sCount" % r['name'],
count_default)
return default_role_counts
def _predeploy_verify_capabilities(self, stack, parameters, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.baremetal
self._check_boot_images()
flavors = self._collect_flavors(parsed_args)
self._check_ironic_boot_configuration(bm_client)
errors, warnings = utils.assign_and_verify_profiles(
bm_client, flavors,
assign_profiles=False,
dry_run=parsed_args.dry_run
)
self.predeploy_errors += errors
self.predeploy_warnings += warnings
compute_client = self.app.client_manager.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
self.log.error("Expected hypervisor stats not met")
self.predeploy_errors += 1
self.log.debug("Checking nodes count")
default_role_counts = self._get_default_role_counts(parsed_args)
enough_nodes, count, ironic_nodes_count = utils.check_nodes_count(
bm_client,
stack,
parameters,
default_role_counts
)
if not enough_nodes:
self.log.error(
"Not enough nodes - available: {0}, requested: {1}".format(
ironic_nodes_count, count))
self.predeploy_errors += 1
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError:
self.log.exception("Please make sure there is only one image "
"named 'bm-deploy-kernel' in glance.")
except oscexc.CommandError:
self.log.exception("Error finding 'bm-deploy-kernel' in "
"glance.")
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError:
self.log.exception("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
except oscexc.CommandError:
self.log.exception("Error finding 'bm-deploy-ramdisk' in "
"glance.")
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _collect_flavors(self, parsed_args):
"""Validate and collect nova flavors in use.
Ensure that selected flavors (--ROLE-flavor) are valid in nova.
Issue a warning of local boot is not set for a flavor.
:returns: dictionary flavor name -> (flavor object, scale)
"""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
result = {}
message = "Provided --{}-flavor, '{}', does not exist"
for target, (flavor_name, scale) in (
utils.get_roles_info(parsed_args).items()
):
if flavor_name is None or not scale:
self.log.debug("--{}-flavor not used".format(target))
continue
try:
flavor, old_scale = result[flavor_name]
except KeyError:
pass
else:
result[flavor_name] = (flavor, old_scale + scale)
continue
try:
flavor = flavors[flavor_name]
except KeyError:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor_name))
continue
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.warning(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor_name)
self.log.warning(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor_name)
result[flavor_name] = (flavor, scale)
return result
def _check_ironic_boot_configuration(self, bm_client):
for node in bm_client.node.list(detail=True, maintenance=False):
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_node_boot_configuration(node)
def _check_node_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=kernel_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
parser.add_argument(
'--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"),
)
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('--timeout', '-t', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
utils.add_deployment_plan_arguments(parser)
parser.add_argument('--libvirt-type',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type.'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes. '))
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'--overcloud-ssh-user',
default='heat-admin',
help=_('User for ssh access to overcloud nodes')
)
parser.add_argument(
'--environment-file', '-e', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--environment-directory', metavar='<HEAT ENVIRONMENT DIRECTORY>',
action='append', dest='environment_directories',
default=[os.path.join(os.environ.get('HOME', ''), '.tripleo',
'environments')],
help=_('Environment file directories that are automatically '
' added to the heat stack-create or heat stack-update'
' commands. Can be specified more than once. Files in'
' directories are loaded in ascending sort order.')
)
parser.add_argument(
'--roles-file', '-r', dest='roles_file',
help=_('Roles file, overrides the default %s in the --templates '
'directory') % constants.OVERCLOUD_ROLES_FILE
)
parser.add_argument(
'--no-cleanup', action='store_true',
help=_('Don\'t cleanup temporary files, just log their location')
)
parser.add_argument(
'--update-plan-only',
action='store_true',
help=_('Only update the plan. Do not perform the actual '
'deployment. NOTE: Will move to a discrete command in a '
'future release.')
)
parser.add_argument(
'--validation-errors-nonfatal',
dest='validation_errors_fatal',
action='store_false',
default=True,
help=_('Allow the deployment to continue in spite of validation '
'errors. Note that attempting deployment while errors '
'exist is likely to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help=_('Only run validations, but do not apply any changes.')
)
parser.add_argument(
'--skip-postconfig',
action='store_true',
default=False,
help=_('Skip the overcloud post-deployment configuration.')
)
parser.add_argument(
'--force-postconfig',
action='store_true',
default=False,
help=_('Force the overcloud post-deployment configuration.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
parser.add_argument(
'--answers-file',
help=_('Path to a YAML file with arguments and parameters.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
# Swiftclient logs things like 404s at error level, which is a problem
# because we use EAFP to check for the existence of files. Turn off
# most swiftclient logging to avoid cluttering up our output with
# pointless tracebacks.
sc_logger = logging.getLogger("swiftclient")
sc_logger.setLevel(logging.CRITICAL)
self._validate_args(parsed_args)
clients = self.app.client_manager
orchestration_client = clients.orchestration
stack = utils.get_stack(orchestration_client, parsed_args.stack)
if stack and stack.stack_status == 'IN_PROGRESS':
raise exceptions.StackInProgress(
"Unable to deploy as the stack '{}' status is '{}'".format(
stack.stack_name, stack.stack_status))
parameters = self._update_parameters(
parsed_args, clients.network, stack)
errors, warnings = self._predeploy_verify_capabilities(
stack, parameters, parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
stack_create = stack is None
if stack_create:
self.log.info("No stack found, will be doing a stack create")
else:
self.log.info("Stack found, will be doing a stack update")
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.dry_run:
print("Validation Finished")
return
self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = utils.get_stack(orchestration_client, parsed_args.stack)
# Force fetching of attributes
stack.get()
utils.create_overcloudrc(clients, stack, parsed_args.no_proxy)
utils.create_tempest_deployer_input()
# Run postconfig on create or force. Use force to makes sure endpoints
# are created with deploy reruns and upgrades
if (stack_create or parsed_args.force_postconfig
and not parsed_args.skip_postconfig):
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = utils.get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ingo R.Keck
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import json
import argparse
import os
import re
import sys
VERBOSE = False
def load_config(configfilepath):
"""
Konfigdatei laden. Die muss die Schlüssel "ersetzen" und "warnung" erhalten, und
kann z.B. so aussehen:
{
"ersetzen":[{"regexp":"\\.\\.\\.","ziel":"…"},
{"regexp":"([+-],[-+])","ziel":"±"},
{"regexp":" ","ziel":" "}],
"warnung":[{"regexp":"[0-9] [(mm),(km),(nm),(cm)","warnung":"Leerzeichen zwischen Zahl und Einheit?"},
{"regexp":"\\. [a-z]","warnung":"Kleiner Satzanfang?"},
{"regexp":"\\?\\?\\?","warnung":"Fehlt da was?"}]
}
:param configfilepath: Pfad zur Konfigdatei
:return: Listen der kompilierte ersetzen Muster und warnung Muster mit jeweils Ziel/Beschreibung und der
regexp als string, alles jeweils als Liste:
[[comp(regexp), Ziel, regexp],...], [[comp(regexp), Warnung, regexp],...]
"""
global VERBOSE
if isinstance(configfilepath, list):
configfilepath = configfilepath[0]
with open(configfilepath) as inputfile:
config = json.load(inputfile)
inputfile.close()
# Übersetze alle regexp
# ersetzen = [[re.compile(pattern["regexp"]),pattern["ziel"],pattern["regexp"]] for pattern in config["ersetzen"]]
# warnung = [[re.compile(pattern["regexp"]),pattern["warnung"],pattern["regexp"]] for pattern in config["warnung"]]
#
# besser Muster für Muster, damit es bei Problemen eine Fehlermeldung gibt.
ersetzen = []
for pattern in config["ersetzen"]:
if VERBOSE:
print("Überstetze Muster %s" % pattern["regexp"])
ersetzen.append([re.compile(pattern["regexp"]), pattern["ziel"], pattern["regexp"]])
warnung = []
for pattern in config["warnung"]:
if VERBOSE:
print("Überstetze Muster %s" % pattern["regexp"])
warnung.append([re.compile(pattern["regexp"]), pattern["warnung"], pattern["regexp"]])
if VERBOSE:
print("Ergebnisse: Ersetzten:")
print(ersetzen)
print("Warnen:")
print(warnung)
return ersetzen, warnung
def datei_saeubern(ersetzen, warnung, inpath, outpath=None, simulation=False):
global VERBOSE
if VERBOSE:
if not outpath:
print("Säubere Datei %s, Ausgabe zu StdOut" % inpath)
else:
print("Säubere Datei %s, Ausgabe zu %s" % (inpath, outpath))
# Datei in Speicher laden
with open(inpath) as inputfile:
data = inputfile.read()
inputfile.close()
if not data:
if VERBOSE:
print("Datei %s ist leer, oder Fehler beim Lesen." % inpath)
return False
# Zeichenposition zu Linie, Position umrechnen
data_in_zeilen = data.splitlines(keepends=True)
zeilenverzeichnis = []
for z, zeile in enumerate(data_in_zeilen):
neuezeile = [(z+1, p+1) for p in range(len(zeile))] # ok, die meisten Leute fangen bei 1 an zu zählen
# print(neuezeile)
zeilenverzeichnis += neuezeile
# print(zeilenverzeichnis)
# zuerst ersetzen
for pattern in ersetzen:
newdata, times = pattern[0].subn(pattern[1], data)
if simulation:
print("Datei %s, Muster %s %d mal gefunden" % (inpath, pattern[2], times), file=sys.stderr)
if VERBOSE:
print("Datei %s, Muster %s %d mal gefunden" % (inpath, pattern[2], times), file=sys.stdout)
if newdata:
data = newdata
# jetzt warnung
# Weil sich die Daten geändert haben, müssen wir die Position zu Zeilen Daten neu berechnen
# Zeichenposition zu Linie, Position umrechnen
data_in_zeilen = data.splitlines(keepends=True)
zeilenverzeichnis = []
for z, zeile in enumerate(data_in_zeilen):
neuezeile = [(z+1, p+1) for p in range(len(zeile))] # ok, die meisten Leute fangen bei 1 an zu zählen
# print(neuezeile)
zeilenverzeichnis += neuezeile
# iterator?
for pattern in warnung:
iterator = pattern[0].finditer(data)
for ni in iterator:
# Warnung nach stderr ausgeben
print("Datei %s, Position Zeile %d, Spalte %d: %s" % (inpath, zeilenverzeichnis[ni.start()][0],
zeilenverzeichnis[ni.start()][1], pattern[1]),
file=sys.stderr)
if not simulation:
if outpath:
with open(outpath, 'w') as outfile:
outfile.write(data)
outfile.close()
else:
print(data, file=sys.stdout)
return True
class FileTranslateTest(unittest.TestCase):
def test_file_translate(self):
global VERBOSE
VERBOSE = True
ersetzen, warnung = load_config('test.json')
ergebnis = datei_saeubern(ersetzen, warnung, 'test.txt', outpath=None, simulation=True)
self.assertTrue(ergebnis)
def test_file_write(self):
global VERBOSE
VERBOSE = False
import tempfile
import shutil
temppath = tempfile.mkdtemp()
configpath = os.path.join(temppath, 'test.json')
testpath = os.path.join(temppath, 'test.txt')
outpath = os.path.join(temppath, 'ergebnis.txt')
shutil.copyfile('test.json', configpath)
shutil.copyfile('test.txt', testpath)
ersetzen, warnung = load_config(configpath)
ergebnis = datei_saeubern(ersetzen, warnung, testpath, outpath, simulation=False)
with open(outpath) as inputfile:
data = inputfile.read()
print(data, file=sys.stderr)
inputfile.close()
# temporäres Verzeichnis löschen
shutil.rmtree(temppath)
self.assertTrue(ergebnis)
if __name__ == '__main__':
# Argumente parsen
parser = argparse.ArgumentParser(
description="Ein einfaches Skript um textbasiere Dateien "
+ "schnell zu säubern und zu analysieren. In einer Konfigsdatei werden "
+ "Ersetzungsregeln und Warnregeln definiert. Ersetzungsregeln werden zuerst "
+ "ausgeführt, dann wird der so erhaltene Text nach den Warnregeln analysiert. "
+ "Die Zeilen/Spalten-Angaben der Warnregeln beziehen sich auf den schon ersetzen Text.")
parser.add_argument('configpath', action='store', type=str, nargs=1,
help='Konfigdateipfad. Pfad zur Textdatei die gesäubert werden soll')
parser.add_argument('infilepath', action='store', type=str, default='', nargs='+',
help='Textdateipfad. Pfad zur Textdatei die gesäubert werden soll')
parser.add_argument('-o', action='store', type=str, dest='outprefix', default='',
help='Ausgabe des neuen Textes in eine Datei statt StdOut. Der Prefix wird vor die neue ' +
'Datei gehängt. Also z.B. Prefix "neu_" macht aus "eingang.txt" ein "neu_eingang.txt". ')
parser.add_argument('-s', action='store_true', dest='simulate',
help='Simulation. Ändert nichts an den Daten, sondern gibt auf StdOut aus was er tun würde')
parser.add_argument('-v', action='store_true', dest='verbose',
help='Verbose. Zum Debuggen.')
args = parser.parse_args()
# Versioncheck
if sys.version_info[0] < 3:
raise Exception("Bitte Python 3.4 oder neuer verwenden.")
if args.verbose:
print("setting VERBOSE = True")
VERBOSE = True
# Konfigdatei laden
config_ersetzen, config_warnung = load_config(args.configpath)
outpaths = []
if args.outprefix:
# neue Ausgabedateien erzeugen
for infile in args.infilepath:
filenpath, filename = os.path.split(infile)
outpaths.append(os.path.join(filenpath, args.outprefix+filename))
#
# alle angebenenen Dateien säubern
for n, infile in enumerate(args.infilepath):
if args.outprefix:
datei_saeubern(config_ersetzen, config_warnung, inpath=infile, outpath=outpaths[n],
simulation=args.simulate)
else:
datei_saeubern(config_ersetzen, config_warnung, inpath=infile, simulation=args.simulate)
Finaler Cleanup.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ingo R.Keck
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import json
import argparse
import os
import re
import sys
VERBOSE = False
def load_config(configfilepath):
"""
Konfigdatei laden. Die muss die Schlüssel "ersetzen" und "warnung" erhalten, und
kann z.B. so aussehen:
{
"ersetzen":[{"regexp":"\\.\\.\\.","ziel":"…"},
{"regexp":"([+-],[-+])","ziel":"±"},
{"regexp":" ","ziel":" "}],
"warnung":[{"regexp":"[0-9] [(mm),(km),(nm),(cm)","warnung":"Leerzeichen zwischen Zahl und Einheit?"},
{"regexp":"\\. [a-z]","warnung":"Kleiner Satzanfang?"},
{"regexp":"\\?\\?\\?","warnung":"Fehlt da was?"}]
}
:param configfilepath: Pfad zur Konfigdatei
:return: Listen der kompilierte ersetzen Muster und warnung Muster mit jeweils Ziel/Beschreibung und der
regexp als string, alles jeweils als Liste:
[[comp(regexp), Ziel, regexp],...], [[comp(regexp), Warnung, regexp],...]
"""
global VERBOSE
if isinstance(configfilepath, list):
configfilepath = configfilepath[0]
with open(configfilepath) as inputfile:
config = json.load(inputfile)
inputfile.close()
# Übersetze alle regexp
# ersetzen = [[re.compile(pattern["regexp"]),pattern["ziel"],pattern["regexp"]] for pattern in config["ersetzen"]]
# warnung = [[re.compile(pattern["regexp"]),pattern["warnung"],pattern["regexp"]] for pattern in config["warnung"]]
#
# besser Muster für Muster, damit es bei Problemen eine Fehlermeldung gibt.
ersetzen = []
for pattern in config["ersetzen"]:
if VERBOSE:
print("Überstetze Muster %s" % pattern["regexp"])
ersetzen.append([re.compile(pattern["regexp"]), pattern["ziel"], pattern["regexp"]])
warnung = []
for pattern in config["warnung"]:
if VERBOSE:
print("Überstetze Muster %s" % pattern["regexp"])
warnung.append([re.compile(pattern["regexp"]), pattern["warnung"], pattern["regexp"]])
if VERBOSE:
print("Ergebnisse: Ersetzten:")
print(ersetzen)
print("Warnen:")
print(warnung)
return ersetzen, warnung
def datei_saeubern(ersetzen, warnung, inpath, outpath=None, simulation=False):
global VERBOSE
if VERBOSE:
if not outpath:
print("Säubere Datei %s, Ausgabe zu StdOut" % inpath)
else:
print("Säubere Datei %s, Ausgabe zu %s" % (inpath, outpath))
# Datei in Speicher laden
with open(inpath) as inputfile:
data = inputfile.read()
inputfile.close()
if not data:
if VERBOSE:
print("Datei %s ist leer, oder Fehler beim Lesen." % inpath)
return False
# Zeichenposition zu Linie, Position umrechnen
data_in_zeilen = data.splitlines(keepends=True)
zeilenverzeichnis = []
for z, zeile in enumerate(data_in_zeilen):
neuezeile = [(z + 1, p + 1) for p in range(len(zeile))] # ok, die meisten Leute fangen bei 1 an zu zählen
# print(neuezeile)
zeilenverzeichnis += neuezeile
# print(zeilenverzeichnis)
# zuerst ersetzen
for pattern in ersetzen:
newdata, times = pattern[0].subn(pattern[1], data)
if simulation:
print("Datei %s, Muster %s %d mal gefunden" % (inpath, pattern[2], times), file=sys.stderr)
if VERBOSE:
print("Datei %s, Muster %s %d mal gefunden" % (inpath, pattern[2], times), file=sys.stdout)
if newdata:
data = newdata
# jetzt warnung
# Weil sich die Daten geändert haben, müssen wir die Position zu Zeilen Daten neu berechnen
# Zeichenposition zu Linie, Position umrechnen
data_in_zeilen = data.splitlines(keepends=True)
zeilenverzeichnis = []
for z, zeile in enumerate(data_in_zeilen):
neuezeile = [(z + 1, p + 1) for p in range(len(zeile))] # ok, die meisten Leute fangen bei 1 an zu zählen
# print(neuezeile)
zeilenverzeichnis += neuezeile
# iterator?
for pattern in warnung:
iterator = pattern[0].finditer(data)
for ni in iterator:
# Warnung nach stderr ausgeben
print("Datei %s, Position Zeile %d, Spalte %d: %s" % (inpath, zeilenverzeichnis[ni.start()][0],
zeilenverzeichnis[ni.start()][1], pattern[1]),
file=sys.stderr)
if not simulation:
if outpath:
with open(outpath, 'w') as outfile:
outfile.write(data)
outfile.close()
else:
print(data, file=sys.stdout)
return True
class FileTranslateTest(unittest.TestCase):
def test_file_translate(self):
global VERBOSE
VERBOSE = True
ersetzen, warnung = load_config('test.json')
ergebnis = datei_saeubern(ersetzen, warnung, 'test.txt', outpath=None, simulation=True)
self.assertTrue(ergebnis)
def test_file_write(self):
global VERBOSE
VERBOSE = False
import tempfile
import shutil
temppath = tempfile.mkdtemp()
configpath = os.path.join(temppath, 'test.json')
testpath = os.path.join(temppath, 'test.txt')
outpath = os.path.join(temppath, 'ergebnis.txt')
shutil.copyfile('test.json', configpath)
shutil.copyfile('test.txt', testpath)
ersetzen, warnung = load_config(configpath)
ergebnis = datei_saeubern(ersetzen, warnung, testpath, outpath, simulation=False)
with open(outpath) as inputfile:
data = inputfile.read()
print(data, file=sys.stderr)
inputfile.close()
# temporäres Verzeichnis löschen
shutil.rmtree(temppath)
self.assertTrue(ergebnis)
if __name__ == '__main__':
# Argumente parsen
parser = argparse.ArgumentParser(
description="Ein einfaches Skript um textbasiere Dateien "
+ "schnell zu säubern und zu analysieren. In einer Konfigsdatei werden "
+ "Ersetzungsregeln und Warnregeln definiert. Ersetzungsregeln werden zuerst "
+ "ausgeführt, dann wird der so erhaltene Text nach den Warnregeln analysiert. "
+ "Die Zeilen/Spalten-Angaben der Warnregeln beziehen sich auf den schon ersetzen Text.")
parser.add_argument('configpath', action='store', type=str, nargs=1,
help='Konfigdateipfad. Pfad zur Textdatei die gesäubert werden soll')
parser.add_argument('infilepath', action='store', type=str, default='', nargs='+',
help='Textdateipfad. Pfad zur Textdatei die gesäubert werden soll')
parser.add_argument('-o', action='store', type=str, dest='outprefix', default='',
help='Ausgabe des neuen Textes in eine Datei statt StdOut. Der Prefix wird vor die neue ' +
'Datei gehängt. Also z.B. Prefix "neu_" macht aus "eingang.txt" ein "neu_eingang.txt". ')
parser.add_argument('-s', action='store_true', dest='simulate',
help='Simulation. Ändert nichts an den Daten, sondern gibt auf StdOut aus was er tun würde')
parser.add_argument('-v', action='store_true', dest='verbose',
help='Verbose. Zum Debuggen.')
args = parser.parse_args()
# Versioncheck
if sys.version_info[0] < 3:
raise Exception("Bitte Python 3.4 oder neuer verwenden.")
if args.verbose:
print("setting VERBOSE = True")
VERBOSE = True
# Konfigdatei laden
config_ersetzen, config_warnung = load_config(args.configpath)
outpaths = []
if args.outprefix:
# neue Ausgabedateien erzeugen
for infile in args.infilepath:
filenpath, filename = os.path.split(infile)
outpaths.append(os.path.join(filenpath, args.outprefix + filename))
#
# alle angebenenen Dateien säubern
for n, infile in enumerate(args.infilepath):
if args.outprefix:
datei_saeubern(config_ersetzen, config_warnung, inpath=infile, outpath=outpaths[n],
simulation=args.simulate)
else:
datei_saeubern(config_ersetzen, config_warnung, inpath=infile, simulation=args.simulate)
|
# ! /usr/bin/python
# Copyright 2014 Jeremy Carroll
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import collectd
import json
import urllib2
import socket
import collections
from distutils.version import StrictVersion
PREFIX = "elasticsearch"
ES_CLUSTER = "elasticsearch"
ES_HOST = "localhost"
ES_PORT = 9200
VERBOSE_LOGGING = False
Stat = collections.namedtuple('Stat', ('type', 'path'))
STATS_CUR = {}
# DICT: ElasticSearch 1.0.0
STATS_ES1 = {
## STORE
'indices.store.throttle-time': Stat("counter", "nodes.%s.indices.store.throttle_time_in_millis"),
##SEARCH
'indices.search.open-contexts': Stat("gauge", "nodes.%s.indices.search.open_contexts"),
##CACHE
'indices.cache.field.eviction': Stat("counter", "nodes.%s.indices.fielddata.evictions"),
'indices.cache.field.size': Stat("bytes", "nodes.%s.indices.fielddata.memory_size_in_bytes"),
'indices.cache.filter.evictions': Stat("counter", "nodes.%s.indices.filter_cache.evictions"),
'indices.cache.filter.size': Stat("bytes", "nodes.%s.indices.filter_cache.memory_size_in_bytes"),
## FLUSH
'indices.flush.total': Stat("counter", "nodes.%s.indices.flush.total"),
'indices.flush.time': Stat("counter", "nodes.%s.indices.flush.total_time_in_millis"),
## MERGES
'indices.merges.current': Stat("gauge", "nodes.%s.indices.merges.current"),
'indices.merges.current-docs': Stat("gauge", "nodes.%s.indices.merges.current_docs"),
'indices.merges.current-size': Stat("bytes", "nodes.%s.indices.merges.current_size_in_bytes"),
'indices.merges.total': Stat("counter", "nodes.%s.indices.merges.total"),
'indices.merges.total-docs': Stat("gauge", "nodes.%s.indices.merges.total_docs"),
'indices.merges.total-size': Stat("bytes", "nodes.%s.indices.merges.total_size_in_bytes"),
'indices.merges.time': Stat("counter", "nodes.%s.indices.merges.total_time_in_millis"),
## REFRESH
'indices.refresh.total': Stat("counter", "nodes.%s.indices.refresh.total"),
'indices.refresh.time': Stat("counter", "nodes.%s.indices.refresh.total_time_in_millis"),
}
# DICT: ElasticSearch 0.90.x
STATS_ES09 = {
##CPU
'process.cpu.percent': Stat("gauge", "nodes.%s.process.cpu.percent"),
}
# DICT: Common stuff
STATS = {
## DOCS
'indices.docs.count': Stat("gauge", "nodes.%s.indices.docs.count"),
'indices.docs.deleted': Stat("counter", "nodes.%s.indices.docs.deleted"),
## STORE
'indices.store.size': Stat("bytes", "nodes.%s.indices.store.size_in_bytes"),
## INDEXING
'indices.indexing.index-total': Stat("counter", "nodes.%s.indices.indexing.index_total"),
'indices.indexing.index-time': Stat("counter", "nodes.%s.indices.indexing.index_time_in_millis"),
'indices.indexing.delete-total': Stat("counter", "nodes.%s.indices.indexing.delete_total"),
'indices.indexing.delete-time': Stat("counter", "nodes.%s.indices.indexing.delete_time_in_millis"),
'indices.indexing.index-current': Stat("gauge", "nodes.%s.indices.indexing.index_current"),
'indices.indexing.delete-current': Stat("gauge", "nodes.%s.indices.indexing.delete_current"),
# SEGMENTS
'indices.segments.count': Stat('counter', 'nodes.%s.indices.segments.count'),
'indices.segments.memory_in_bytes': Stat('bytes', 'nodes.%s.indices.segments.memory_in_bytes'),
'indices.segments.fixed_bit_set_memory_in_bytes': Stat('bytes',
'nodes.%s.indices.segments.fixed_bit_set_memory_in_bytes'),
## GET
'indices.get.total': Stat("counter", "nodes.%s.indices.get.total"),
'indices.get.time': Stat("counter", "nodes.%s.indices.get.time_in_millis"),
'indices.get.exists-total': Stat("counter", "nodes.%s.indices.get.exists_total"),
'indices.get.exists-time': Stat("counter", "nodes.%s.indices.get.exists_time_in_millis"),
'indices.get.missing-total': Stat("counter", "nodes.%s.indices.get.missing_total"),
'indices.get.missing-time': Stat("counter", "nodes.%s.indices.get.missing_time_in_millis"),
'indices.get.current': Stat("gauge", "nodes.%s.indices.get.current"),
## SEARCH
'indices.search.query-current': Stat("gauge", "nodes.%s.indices.search.query_current"),
'indices.search.query-total': Stat("counter", "nodes.%s.indices.search.query_total"),
'indices.search.query-time': Stat("counter", "nodes.%s.indices.search.query_time_in_millis"),
'indices.search.fetch-current': Stat("gauge", "nodes.%s.indices.search.fetch_current"),
'indices.search.fetch-total': Stat("counter", "nodes.%s.indices.search.fetch_total"),
'indices.search.fetch-time': Stat("counter", "nodes.%s.indices.search.fetch_time_in_millis"),
# JVM METRICS #
##GC
'jvm.gc.time': Stat("counter", "nodes.%s.jvm.gc.collectors.young.collection_time_in_millis"),
'jvm.gc.count': Stat("counter", "nodes.%s.jvm.gc.collectors.young.collection_count"),
'jvm.gc.old-time': Stat("counter", "nodes.%s.jvm.gc.collectors.old.collection_time_in_millis"),
'jvm.gc.old-count': Stat("counter", "nodes.%s.jvm.gc.collectors.old.collection_count"),
## MEM
'jvm.mem.heap-committed': Stat("bytes", "nodes.%s.jvm.mem.heap_committed_in_bytes"),
'jvm.mem.heap-used': Stat("bytes", "nodes.%s.jvm.mem.heap_used_in_bytes"),
'jvm.mem.heap-used-percent': Stat("percent", "nodes.%s.jvm.mem.heap_used_percent"),
'jvm.mem.non-heap-committed': Stat("bytes", "nodes.%s.jvm.mem.non_heap_committed_in_bytes"),
'jvm.mem.non-heap-used': Stat("bytes", "nodes.%s.jvm.mem.non_heap_used_in_bytes"),
## THREADS
'jvm.threads.count': Stat("gauge", "nodes.%s.jvm.threads.count"),
'jvm.threads.peak': Stat("gauge", "nodes.%s.jvm.threads.peak_count"),
# TRANSPORT METRICS #
'transport.server_open': Stat("gauge", "nodes.%s.transport.server_open"),
'transport.rx.count': Stat("counter", "nodes.%s.transport.rx_count"),
'transport.rx.size': Stat("bytes", "nodes.%s.transport.rx_size_in_bytes"),
'transport.tx.count': Stat("counter", "nodes.%s.transport.tx_count"),
'transport.tx.size': Stat("bytes", "nodes.%s.transport.tx_size_in_bytes"),
# HTTP METRICS #
'http.current_open': Stat("gauge", "nodes.%s.http.current_open"),
'http.total_open': Stat("counter", "nodes.%s.http.total_opened"),
# PROCESS METRICS #
'process.open_file_descriptors': Stat("gauge", "nodes.%s.process.open_file_descriptors"),
}
# FUNCTION: Collect stats from JSON result
def lookup_stat(stat, json):
node_names = json['nodes'].keys()
results = []
for node in node_names:
val = dig_it_up(json, STATS_CUR[stat].path % node)
# Check to make sure we have a valid result
# dig_it_up returns False if no match found
if not isinstance(val, bool):
results.append(int(val))
else:
results.append(None)
return results
def configure_callback(conf):
"""Received configuration information"""
global ES_HOST, ES_PORT, VERBOSE_LOGGING
for node in conf.children:
if node.key == 'Host':
ES_HOST = node.values[0]
elif node.key == 'Port':
ES_PORT = int(node.values[0])
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == 'Cluster':
ES_CLUSTER = node.values[0]
else:
collectd.warning('elasticsearch plugin: Unknown config key: %s.'
% node.key)
log_verbose('Configured with host=%s, port=%s' % (ES_HOST, ES_PORT))
def fetch_url(url):
try:
result = json.load(urllib2.urlopen(url, timeout=10))
except urllib2.URLError, e:
collectd.error('elasticsearch plugin: Error connecting to %s - %r' % (url, e))
return None
except socket.timeout as e:
collectd.error('elasticsearch plugin: Timeout connecting to %s - %r' % (url, e))
return None
except socket.error as e: # for example : ECONNRESET
collectd.error('elasticsearch plugin: Connection error to %s - %r' % (url, e))
return None
return result
def fetch_stats():
global ES_CLUSTER, ES_HOST, ES_PORT, STATS_CUR
base_url = 'http://' + ES_HOST + ':' + str(ES_PORT) + '/'
server_info = fetch_url(base_url)
if server_info is None:
return
version = server_info['version']['number']
if StrictVersion(version) >= StrictVersion('1.0.0'):
ES_URL = base_url + '_nodes/stats/transport,http,process,jvm,indices'
STATS_CUR = dict(STATS.items() + STATS_ES1.items())
else:
ES_URL = base_url + '_cluster/nodes/_local/stats?http=true&process=true&jvm=true&transport=true'
STATS_CUR = dict(STATS.items() + STATS_ES09.items())
# add info on thread pools
for pool in ['generic', 'index', 'get', 'snapshot', 'merge', 'optimize', 'bulk', 'warmer', 'flush', 'search', 'refresh']:
for attr in ['threads', 'queue', 'active', 'largest']:
path = 'thread_pool.{0}.{1}'.format(pool, attr)
STATS_CUR[path] = Stat("gauge", 'nodes.%s.{0}'.format(path))
for attr in ['completed', 'rejected']:
path = 'thread_pool.{0}.{1}'.format(pool, attr)
STATS_CUR[path] = Stat("counter", 'nodes.%s.{0}'.format(path))
result = fetch_url(ES_URL)
if result is None:
return
ES_CLUSTER = result['cluster_name']
return parse_stats(result)
def parse_stats(json):
"""Parse stats response from ElasticSearch"""
for name, key in STATS_CUR.iteritems():
result = lookup_stat(name, json)
count = len(result)
if count == 1:
dispatch_stat(result[0], name, key, "")
else:
total = 0
for index, value in enumerate(result):
dispatch_stat(value, name, key, "node%d" % index)
total += value
if key.type != "bytes":
total = total / count
dispatch_stat(total, name, key, "total")
def dispatch_stat(result, name, key, node_index):
"""Read a key from info response data and dispatch a value"""
if result is None:
collectd.warning('elasticsearch plugin: Value not found for %s' % name)
return
estype = key.type
value = int(result)
log_verbose('Sending value[%s]: %s=%s' % (estype, name, value))
val = collectd.Values(plugin='elasticsearch')
val.plugin_instance = ES_CLUSTER + "_" + node_index
if len(node_index) == 0:
val.plugin_instance = ES_CLUSTER
val.type = estype
val.type_instance = name
val.values = [value]
val.dispatch()
def read_callback():
log_verbose('Read callback called')
stats = fetch_stats()
def dig_it_up(obj, path):
try:
if type(path) in (str, unicode):
path = path.split('.')
return reduce(lambda x, y: x[y], path, obj)
except:
return False
def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('elasticsearch plugin [verbose]: %s' % msg)
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
disclaimer
# ! /usr/bin/python
# Copyright 2014 Jeremy Carroll
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collectd
import json
import urllib2
import socket
import collections
from distutils.version import StrictVersion
PREFIX = "elasticsearch"
ES_CLUSTER = "elasticsearch"
ES_HOST = "localhost"
ES_PORT = 9200
VERBOSE_LOGGING = False
Stat = collections.namedtuple('Stat', ('type', 'path'))
STATS_CUR = {}
# DICT: ElasticSearch 1.0.0
STATS_ES1 = {
## STORE
'indices.store.throttle-time': Stat("counter", "nodes.%s.indices.store.throttle_time_in_millis"),
##SEARCH
'indices.search.open-contexts': Stat("gauge", "nodes.%s.indices.search.open_contexts"),
##CACHE
'indices.cache.field.eviction': Stat("counter", "nodes.%s.indices.fielddata.evictions"),
'indices.cache.field.size': Stat("bytes", "nodes.%s.indices.fielddata.memory_size_in_bytes"),
'indices.cache.filter.evictions': Stat("counter", "nodes.%s.indices.filter_cache.evictions"),
'indices.cache.filter.size': Stat("bytes", "nodes.%s.indices.filter_cache.memory_size_in_bytes"),
## FLUSH
'indices.flush.total': Stat("counter", "nodes.%s.indices.flush.total"),
'indices.flush.time': Stat("counter", "nodes.%s.indices.flush.total_time_in_millis"),
## MERGES
'indices.merges.current': Stat("gauge", "nodes.%s.indices.merges.current"),
'indices.merges.current-docs': Stat("gauge", "nodes.%s.indices.merges.current_docs"),
'indices.merges.current-size': Stat("bytes", "nodes.%s.indices.merges.current_size_in_bytes"),
'indices.merges.total': Stat("counter", "nodes.%s.indices.merges.total"),
'indices.merges.total-docs': Stat("gauge", "nodes.%s.indices.merges.total_docs"),
'indices.merges.total-size': Stat("bytes", "nodes.%s.indices.merges.total_size_in_bytes"),
'indices.merges.time': Stat("counter", "nodes.%s.indices.merges.total_time_in_millis"),
## REFRESH
'indices.refresh.total': Stat("counter", "nodes.%s.indices.refresh.total"),
'indices.refresh.time': Stat("counter", "nodes.%s.indices.refresh.total_time_in_millis"),
}
# DICT: ElasticSearch 0.90.x
STATS_ES09 = {
##CPU
'process.cpu.percent': Stat("gauge", "nodes.%s.process.cpu.percent"),
}
# DICT: Common stuff
STATS = {
## DOCS
'indices.docs.count': Stat("gauge", "nodes.%s.indices.docs.count"),
'indices.docs.deleted': Stat("counter", "nodes.%s.indices.docs.deleted"),
## STORE
'indices.store.size': Stat("bytes", "nodes.%s.indices.store.size_in_bytes"),
## INDEXING
'indices.indexing.index-total': Stat("counter", "nodes.%s.indices.indexing.index_total"),
'indices.indexing.index-time': Stat("counter", "nodes.%s.indices.indexing.index_time_in_millis"),
'indices.indexing.delete-total': Stat("counter", "nodes.%s.indices.indexing.delete_total"),
'indices.indexing.delete-time': Stat("counter", "nodes.%s.indices.indexing.delete_time_in_millis"),
'indices.indexing.index-current': Stat("gauge", "nodes.%s.indices.indexing.index_current"),
'indices.indexing.delete-current': Stat("gauge", "nodes.%s.indices.indexing.delete_current"),
# SEGMENTS
'indices.segments.count': Stat('counter', 'nodes.%s.indices.segments.count'),
'indices.segments.memory_in_bytes': Stat('bytes', 'nodes.%s.indices.segments.memory_in_bytes'),
'indices.segments.fixed_bit_set_memory_in_bytes': Stat('bytes',
'nodes.%s.indices.segments.fixed_bit_set_memory_in_bytes'),
## GET
'indices.get.total': Stat("counter", "nodes.%s.indices.get.total"),
'indices.get.time': Stat("counter", "nodes.%s.indices.get.time_in_millis"),
'indices.get.exists-total': Stat("counter", "nodes.%s.indices.get.exists_total"),
'indices.get.exists-time': Stat("counter", "nodes.%s.indices.get.exists_time_in_millis"),
'indices.get.missing-total': Stat("counter", "nodes.%s.indices.get.missing_total"),
'indices.get.missing-time': Stat("counter", "nodes.%s.indices.get.missing_time_in_millis"),
'indices.get.current': Stat("gauge", "nodes.%s.indices.get.current"),
## SEARCH
'indices.search.query-current': Stat("gauge", "nodes.%s.indices.search.query_current"),
'indices.search.query-total': Stat("counter", "nodes.%s.indices.search.query_total"),
'indices.search.query-time': Stat("counter", "nodes.%s.indices.search.query_time_in_millis"),
'indices.search.fetch-current': Stat("gauge", "nodes.%s.indices.search.fetch_current"),
'indices.search.fetch-total': Stat("counter", "nodes.%s.indices.search.fetch_total"),
'indices.search.fetch-time': Stat("counter", "nodes.%s.indices.search.fetch_time_in_millis"),
# JVM METRICS #
##GC
'jvm.gc.time': Stat("counter", "nodes.%s.jvm.gc.collectors.young.collection_time_in_millis"),
'jvm.gc.count': Stat("counter", "nodes.%s.jvm.gc.collectors.young.collection_count"),
'jvm.gc.old-time': Stat("counter", "nodes.%s.jvm.gc.collectors.old.collection_time_in_millis"),
'jvm.gc.old-count': Stat("counter", "nodes.%s.jvm.gc.collectors.old.collection_count"),
## MEM
'jvm.mem.heap-committed': Stat("bytes", "nodes.%s.jvm.mem.heap_committed_in_bytes"),
'jvm.mem.heap-used': Stat("bytes", "nodes.%s.jvm.mem.heap_used_in_bytes"),
'jvm.mem.heap-used-percent': Stat("percent", "nodes.%s.jvm.mem.heap_used_percent"),
'jvm.mem.non-heap-committed': Stat("bytes", "nodes.%s.jvm.mem.non_heap_committed_in_bytes"),
'jvm.mem.non-heap-used': Stat("bytes", "nodes.%s.jvm.mem.non_heap_used_in_bytes"),
## THREADS
'jvm.threads.count': Stat("gauge", "nodes.%s.jvm.threads.count"),
'jvm.threads.peak': Stat("gauge", "nodes.%s.jvm.threads.peak_count"),
# TRANSPORT METRICS #
'transport.server_open': Stat("gauge", "nodes.%s.transport.server_open"),
'transport.rx.count': Stat("counter", "nodes.%s.transport.rx_count"),
'transport.rx.size': Stat("bytes", "nodes.%s.transport.rx_size_in_bytes"),
'transport.tx.count': Stat("counter", "nodes.%s.transport.tx_count"),
'transport.tx.size': Stat("bytes", "nodes.%s.transport.tx_size_in_bytes"),
# HTTP METRICS #
'http.current_open': Stat("gauge", "nodes.%s.http.current_open"),
'http.total_open': Stat("counter", "nodes.%s.http.total_opened"),
# PROCESS METRICS #
'process.open_file_descriptors': Stat("gauge", "nodes.%s.process.open_file_descriptors"),
}
# FUNCTION: Collect stats from JSON result
def lookup_stat(stat, json):
node_names = json['nodes'].keys()
results = []
for node in node_names:
val = dig_it_up(json, STATS_CUR[stat].path % node)
# Check to make sure we have a valid result
# dig_it_up returns False if no match found
if not isinstance(val, bool):
results.append(int(val))
else:
results.append(None)
return results
def configure_callback(conf):
"""Received configuration information"""
global ES_HOST, ES_PORT, VERBOSE_LOGGING
for node in conf.children:
if node.key == 'Host':
ES_HOST = node.values[0]
elif node.key == 'Port':
ES_PORT = int(node.values[0])
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == 'Cluster':
ES_CLUSTER = node.values[0]
else:
collectd.warning('elasticsearch plugin: Unknown config key: %s.'
% node.key)
log_verbose('Configured with host=%s, port=%s' % (ES_HOST, ES_PORT))
def fetch_url(url):
try:
result = json.load(urllib2.urlopen(url, timeout=10))
except urllib2.URLError, e:
collectd.error('elasticsearch plugin: Error connecting to %s - %r' % (url, e))
return None
except socket.timeout as e:
collectd.error('elasticsearch plugin: Timeout connecting to %s - %r' % (url, e))
return None
except socket.error as e: # for example : ECONNRESET
collectd.error('elasticsearch plugin: Connection error to %s - %r' % (url, e))
return None
return result
def fetch_stats():
global ES_CLUSTER, ES_HOST, ES_PORT, STATS_CUR
base_url = 'http://' + ES_HOST + ':' + str(ES_PORT) + '/'
server_info = fetch_url(base_url)
if server_info is None:
return
version = server_info['version']['number']
if StrictVersion(version) >= StrictVersion('1.0.0'):
ES_URL = base_url + '_nodes/stats/transport,http,process,jvm,indices'
STATS_CUR = dict(STATS.items() + STATS_ES1.items())
else:
ES_URL = base_url + '_cluster/nodes/_local/stats?http=true&process=true&jvm=true&transport=true'
STATS_CUR = dict(STATS.items() + STATS_ES09.items())
# add info on thread pools
for pool in ['generic', 'index', 'get', 'snapshot', 'merge', 'optimize', 'bulk', 'warmer', 'flush', 'search', 'refresh']:
for attr in ['threads', 'queue', 'active', 'largest']:
path = 'thread_pool.{0}.{1}'.format(pool, attr)
STATS_CUR[path] = Stat("gauge", 'nodes.%s.{0}'.format(path))
for attr in ['completed', 'rejected']:
path = 'thread_pool.{0}.{1}'.format(pool, attr)
STATS_CUR[path] = Stat("counter", 'nodes.%s.{0}'.format(path))
result = fetch_url(ES_URL)
if result is None:
return
ES_CLUSTER = result['cluster_name']
return parse_stats(result)
def parse_stats(json):
"""Parse stats response from ElasticSearch"""
for name, key in STATS_CUR.iteritems():
result = lookup_stat(name, json)
count = len(result)
if count == 1:
dispatch_stat(result[0], name, key, "")
else:
total = 0
for index, value in enumerate(result):
dispatch_stat(value, name, key, "node%d" % index)
total += value
if key.type != "bytes":
total = total / count
dispatch_stat(total, name, key, "total")
def dispatch_stat(result, name, key, node_index):
"""Read a key from info response data and dispatch a value"""
if result is None:
collectd.warning('elasticsearch plugin: Value not found for %s' % name)
return
estype = key.type
value = int(result)
log_verbose('Sending value[%s]: %s=%s' % (estype, name, value))
val = collectd.Values(plugin='elasticsearch')
val.plugin_instance = ES_CLUSTER + "_" + node_index
if len(node_index) == 0:
val.plugin_instance = ES_CLUSTER
val.type = estype
val.type_instance = name
val.values = [value]
val.dispatch()
def read_callback():
log_verbose('Read callback called')
stats = fetch_stats()
def dig_it_up(obj, path):
try:
if type(path) in (str, unicode):
path = path.split('.')
return reduce(lambda x, y: x[y], path, obj)
except:
return False
def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('elasticsearch plugin [verbose]: %s' % msg)
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
|
""" venv_prompt
"""
import os
from threading import Thread
from smashlib.util import get_prompt_t, set_prompt_t
from smashlib.plugins import Plugins, SmashPlugin
def this_venv():
result = os.environ.get('VIRTUAL_ENV','')
result = result.replace(os.environ['HOME'],'~')
result = os.path.sep.join(result.split(os.path.sep)[-2:])
return '({0})'.format(result)
class Plugin(SmashPlugin):
""" """
def install(self):
def delayed():
""" there's probably a better way to do this, but
ipython is not fully initialized when this
plugin is installed.
"""
import time; time.sleep(2)
__IPYTHON__._this_venv = this_venv
t = '''${getattr(__IPYTHON__, '_this_venv', lambda: "")()}''' + get_prompt_t()
set_prompt_t(t)
Thread(target=delayed).start()
use nicer prompt abstraction
""" venv_prompt
"""
import os
from threading import Thread
from smashlib.util import prompt #get_prompt_t, set_prompt_t
from smashlib.plugins import Plugins, SmashPlugin
def this_venv():
result = os.environ.get('VIRTUAL_ENV','')
result = result.replace(os.environ['HOME'],'~')
result = os.path.sep.join(result.split(os.path.sep)[-2:])
return '({0})'.format(result)
class Plugin(SmashPlugin):
""" """
def install(self):
def delayed():
""" there's probably a better way to do this, but
ipython is not fully initialized when this
plugin is installed.
"""
import time; time.sleep(2)
__IPYTHON__._this_venv = this_venv
t = '''${getattr(__IPYTHON__, '_this_venv', lambda: "")()}''' + prompt.template
prompt.template = t
Thread(target=delayed).start()
|
from PyQt5.QtWidgets import QFrame, QAction, QMenu, QSizePolicy, QInputDialog, QDialog, \
QVBoxLayout, QTabWidget, QWidget, QLabel
from sakia.gui.widgets.dialogs import dialog_async_exec
from PyQt5.QtCore import QObject, QT_TRANSLATE_NOOP, Qt, QLocale
from .toolbar_uic import Ui_SakiaToolbar
from .about_uic import Ui_AboutPopup
from .about_money_uic import Ui_AboutMoney
from .about_wot_uic import Ui_AboutWot
from sakia.helpers import timestamp_to_dhms, dpi_ratio
class ToolbarView(QFrame, Ui_SakiaToolbar):
"""
The model of Navigation component
"""
_action_revoke_uid_text = QT_TRANSLATE_NOOP("ToolbarView", "Publish a revocation document")
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
tool_menu = QMenu(self.tr("Tools"), self.toolbutton_menu)
self.toolbutton_menu.setMenu(tool_menu)
self.action_add_connection = QAction(self.tr("Add a connection"), tool_menu)
tool_menu.addAction(self.action_add_connection)
self.action_revoke_uid = QAction(self.tr(ToolbarView._action_revoke_uid_text), self)
tool_menu.addAction(self.action_revoke_uid)
self.action_parameters = QAction(self.tr("Settings"), tool_menu)
tool_menu.addAction(self.action_parameters)
self.action_plugins = QAction(self.tr("Plugins manager"), tool_menu)
tool_menu.addAction(self.action_plugins)
tool_menu.addSeparator()
about_menu = QMenu(self.tr("About"), tool_menu)
tool_menu.addMenu(about_menu)
self.action_about_money = QAction(self.tr("About Money"), about_menu)
about_menu.addAction(self.action_about_money)
self.action_about_referentials = QAction(self.tr("About Referentials"), about_menu)
about_menu.addAction(self.action_about_referentials)
self.action_about_wot = QAction(self.tr("About Web of Trust"), about_menu)
about_menu.addAction(self.action_about_wot)
self.action_about = QAction(self.tr("About Sakia"), about_menu)
about_menu.addAction(self.action_about)
self.action_exit = QAction(self.tr("Exit"), tool_menu)
tool_menu.addAction(self.action_exit)
self.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Minimum)
self.setMaximumHeight(60)
self.button_network.setIconSize(self.button_network.iconSize()*dpi_ratio())
self.button_contacts.setIconSize(self.button_contacts.iconSize()*dpi_ratio())
self.button_identity.setIconSize(self.button_identity.iconSize()*dpi_ratio())
self.button_explore.setIconSize(self.button_explore.iconSize()*dpi_ratio())
self.toolbutton_menu.setIconSize(self.toolbutton_menu.iconSize()*dpi_ratio())
self.button_network.setMaximumHeight(self.button_network.maximumHeight()*dpi_ratio())
self.button_contacts.setMaximumHeight(self.button_contacts.maximumHeight()*dpi_ratio())
self.button_identity.setMaximumHeight(self.button_identity.maximumHeight()*dpi_ratio())
self.button_explore.setMaximumHeight(self.button_explore.maximumHeight()*dpi_ratio())
self.toolbutton_menu.setMaximumHeight(self.toolbutton_menu.maximumHeight()*dpi_ratio())
async def ask_for_connection(self, connections):
connections_titles = [c.title() for c in connections]
input_dialog = QInputDialog()
input_dialog.setComboBoxItems(connections_titles)
input_dialog.setWindowTitle(self.tr("Membership"))
input_dialog.setLabelText(self.tr("Select a connection"))
await dialog_async_exec(input_dialog)
result = input_dialog.textValue()
if input_dialog.result() == QDialog.Accepted:
for c in connections:
if c.title() == result:
return c
def show_about_wot(self, params):
"""
Set wot text from currency parameters
:param sakia.data.entities.BlockchainParameters params: Parameters of the currency
:return:
"""
dialog = QDialog(self)
about_dialog = Ui_AboutWot()
about_dialog.setupUi(dialog)
# set infos in label
about_dialog.label_wot.setText(
self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
QLocale().toString(params.sig_period / 86400, 'f', 2),
self.tr('Minimum delay between 2 certifications (in days)'),
QLocale().toString(params.sig_validity / 86400, 'f', 2),
self.tr('Maximum age of a valid signature (in days)'),
params.sig_qty,
self.tr('Minimum quantity of signatures to be part of the WoT'),
params.sig_stock,
self.tr('Maximum quantity of active certifications made by member.'),
params.sig_window,
self.tr('Maximum delay a certification can wait before being expired for non-writing.'),
params.xpercent,
self.tr('Minimum percent of sentries to reach to match the distance rule'),
params.ms_validity / 86400,
self.tr('Maximum age of a valid membership (in days)'),
params.step_max,
self.tr('Maximum distance between each WoT member and a newcomer'),
)
)
dialog.setWindowTitle(self.tr("Web of Trust rules"))
dialog.exec()
def show_about_money(self, params, currency, localized_data):
dialog = QDialog(self)
about_dialog = Ui_AboutMoney()
about_dialog.setupUi(dialog)
about_dialog.label_general.setText(self.general_text(localized_data))
about_dialog.label_rules.setText(self.rules_text(localized_data))
about_dialog.label_money.setText(self.money_text(params, currency))
dialog.setWindowTitle(self.tr("Money rules"))
dialog.exec()
def show_about_referentials(self, referentials):
dialog = QDialog(self)
layout = QVBoxLayout(dialog)
tabwidget = QTabWidget(dialog)
layout.addWidget(tabwidget)
for ref in referentials:
widget = QWidget()
layout = QVBoxLayout(widget)
label = QLabel()
label.setText(self.text_referential(ref))
layout.addWidget(label)
tabwidget.addTab(widget, ref.translated_name())
dialog.setWindowTitle(self.tr("Referentials"))
dialog.exec()
def general_text(self, localized_data):
"""
Fill the general text with given informations
:return:
"""
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></div></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:2.2%} / {:} days</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
localized_data.get('ud', '####'),
self.tr('Universal Dividend UD(t) in'),
localized_data['diff_units'],
localized_data.get('mass_minus_1', "###"),
self.tr('Monetary Mass M(t-1) in'),
localized_data['units'],
localized_data.get('members_count', '####'),
self.tr('Members N(t)'),
localized_data.get('mass_minus_1_per_member', '####'),
self.tr('Monetary Mass per member M(t-1)/N(t) in'),
localized_data['diff_units'],
localized_data.get('actual_growth', 0),
localized_data.get('days_per_dividend', '####'),
self.tr('Actual growth c = UD(t)/[M(t-1)/N(t)]'),
localized_data.get('ud_median_time_minus_1', '####'),
self.tr('Penultimate UD date and time (t-1)'),
localized_data.get('ud_median_time', '####'),
self.tr('Last UD date and time (t)'),
localized_data.get('next_ud_median_time', '####'),
self.tr('Next UD date and time (t+1)'),
localized_data.get('next_ud_reeaval', '####'),
self.tr('Next UD reevaluation (t+1)')
)
def rules_text(self, localized_data):
"""
Set text in rules
:param dict localized_data:
:return:
"""
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
self.tr('{:2.0%} / {:} days').format(localized_data['growth'], localized_data['days_per_dividend']),
self.tr('Fundamental growth (c) / Delta time (dt)'),
self.tr('UDĞ(t) = UDĞ(t-1) + c²*M(t-1)/N(t-1)'),
self.tr('Universal Dividend (formula)'),
self.tr('{:} = {:} + {:2.0%}²* {:} / {:}').format(
localized_data.get('ud_plus_1', '####'),
localized_data.get('ud', '####'),
localized_data.get('growth', '####'),
localized_data.get('mass', '####'),
localized_data.get('members_count', '####')
),
self.tr('Universal Dividend (computed)')
)
def text_referential(self, ref):
"""
Set text from referentials
"""
# set infos in label
ref_template = """
<table cellpadding="5">
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
</table>
"""
return ref_template.format(self.tr('Name'), ref.translated_name(),
self.tr('Units'), ref.units,
self.tr('Formula'), ref.formula,
self.tr('Description'), ref.description
)
def money_text(self, params, currency):
"""
Set text from money parameters
:param sakia.data.entities.BlockchainParameters params: Parameters of the currency
:param str currency: The currency
"""
dt_dhms = timestamp_to_dhms(params.dt)
if dt_dhms[0] > 0:
dt_as_str = self.tr("{:} day(s) {:} hour(s)").format(*dt_dhms)
else:
dt_as_str = self.tr("{:} hour(s)").format(dt_dhms[1])
if dt_dhms[2] > 0 or dt_dhms[3] > 0:
dt_dhms += ", {:} minute(s) and {:} second(s)".format(*dt_dhms[1:])
dt_reeval_dhms = timestamp_to_dhms(params.dt_reeval)
dt_reeval_as_str = self.tr("{:} day(s) {:} hour(s)").format(*dt_reeval_dhms)
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:2.0%} / {:} days</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:2.0%}</b></td><td>{:}</td></tr>
</table>
""").format(
params.c,
QLocale().toString(params.dt / 86400, 'f', 2),
self.tr('Fundamental growth (c)'),
params.ud0,
self.tr('Initial Universal Dividend UD(0) in'),
currency,
dt_as_str,
self.tr('Time period between two UD'),
dt_reeval_as_str,
self.tr('Time period between two UD reevaluation'),
params.median_time_blocks,
self.tr('Number of blocks used for calculating median time'),
params.avg_gen_time,
self.tr('The average time in seconds for writing 1 block (wished time)'),
params.dt_diff_eval,
self.tr('The number of blocks required to evaluate again PoWMin value'),
params.percent_rot,
self.tr('The percent of previous issuers to reach for personalized difficulty')
)
def show_about(self, text):
dialog = QDialog(self)
about_dialog = Ui_AboutPopup()
about_dialog.setupUi(dialog)
about_dialog.label.setText(text)
dialog.exec()
Another try #669
from PyQt5.QtWidgets import QFrame, QAction, QMenu, QSizePolicy, QInputDialog, QDialog, \
QVBoxLayout, QTabWidget, QWidget, QLabel
from sakia.gui.widgets.dialogs import dialog_async_exec
from PyQt5.QtCore import QObject, QT_TRANSLATE_NOOP, Qt, QLocale
from .toolbar_uic import Ui_SakiaToolbar
from .about_uic import Ui_AboutPopup
from .about_money_uic import Ui_AboutMoney
from .about_wot_uic import Ui_AboutWot
from sakia.helpers import timestamp_to_dhms, dpi_ratio
class ToolbarView(QFrame, Ui_SakiaToolbar):
"""
The model of Navigation component
"""
_action_revoke_uid_text = QT_TRANSLATE_NOOP("ToolbarView", "Publish a revocation document")
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
tool_menu = QMenu(self.tr("Tools"), self.toolbutton_menu)
self.toolbutton_menu.setMenu(tool_menu)
self.action_add_connection = QAction(self.tr("Add a connection"), tool_menu)
tool_menu.addAction(self.action_add_connection)
self.action_revoke_uid = QAction(self.tr(ToolbarView._action_revoke_uid_text), self)
tool_menu.addAction(self.action_revoke_uid)
self.action_parameters = QAction(self.tr("Settings"), tool_menu)
tool_menu.addAction(self.action_parameters)
self.action_plugins = QAction(self.tr("Plugins manager"), tool_menu)
tool_menu.addAction(self.action_plugins)
tool_menu.addSeparator()
about_menu = QMenu(self.tr("About"), tool_menu)
tool_menu.addMenu(about_menu)
self.action_about_money = QAction(self.tr("About Money"), about_menu)
about_menu.addAction(self.action_about_money)
self.action_about_referentials = QAction(self.tr("About Referentials"), about_menu)
about_menu.addAction(self.action_about_referentials)
self.action_about_wot = QAction(self.tr("About Web of Trust"), about_menu)
about_menu.addAction(self.action_about_wot)
self.action_about = QAction(self.tr("About Sakia"), about_menu)
about_menu.addAction(self.action_about)
self.action_exit = QAction(self.tr("Exit"), tool_menu)
tool_menu.addAction(self.action_exit)
self.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Minimum)
self.setMaximumHeight(60)
self.button_network.setIconSize(self.button_network.iconSize()*dpi_ratio())
self.button_contacts.setIconSize(self.button_contacts.iconSize()*dpi_ratio())
self.button_identity.setIconSize(self.button_identity.iconSize()*dpi_ratio())
self.button_explore.setIconSize(self.button_explore.iconSize()*dpi_ratio())
self.toolbutton_menu.setIconSize(self.toolbutton_menu.iconSize()*dpi_ratio())
self.button_network.setSize(self.button_network.size()*dpi_ratio())
self.button_contacts.setSize(self.button_contacts.size()*dpi_ratio())
self.button_identity.setSize(self.button_identity.size()*dpi_ratio())
self.button_explore.setSize(self.button_explore.size()*dpi_ratio())
self.toolbutton_menu.setSize(self.toolbutton_menu.size()*dpi_ratio())
async def ask_for_connection(self, connections):
connections_titles = [c.title() for c in connections]
input_dialog = QInputDialog()
input_dialog.setComboBoxItems(connections_titles)
input_dialog.setWindowTitle(self.tr("Membership"))
input_dialog.setLabelText(self.tr("Select a connection"))
await dialog_async_exec(input_dialog)
result = input_dialog.textValue()
if input_dialog.result() == QDialog.Accepted:
for c in connections:
if c.title() == result:
return c
def show_about_wot(self, params):
"""
Set wot text from currency parameters
:param sakia.data.entities.BlockchainParameters params: Parameters of the currency
:return:
"""
dialog = QDialog(self)
about_dialog = Ui_AboutWot()
about_dialog.setupUi(dialog)
# set infos in label
about_dialog.label_wot.setText(
self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
QLocale().toString(params.sig_period / 86400, 'f', 2),
self.tr('Minimum delay between 2 certifications (in days)'),
QLocale().toString(params.sig_validity / 86400, 'f', 2),
self.tr('Maximum age of a valid signature (in days)'),
params.sig_qty,
self.tr('Minimum quantity of signatures to be part of the WoT'),
params.sig_stock,
self.tr('Maximum quantity of active certifications made by member.'),
params.sig_window,
self.tr('Maximum delay a certification can wait before being expired for non-writing.'),
params.xpercent,
self.tr('Minimum percent of sentries to reach to match the distance rule'),
params.ms_validity / 86400,
self.tr('Maximum age of a valid membership (in days)'),
params.step_max,
self.tr('Maximum distance between each WoT member and a newcomer'),
)
)
dialog.setWindowTitle(self.tr("Web of Trust rules"))
dialog.exec()
def show_about_money(self, params, currency, localized_data):
dialog = QDialog(self)
about_dialog = Ui_AboutMoney()
about_dialog.setupUi(dialog)
about_dialog.label_general.setText(self.general_text(localized_data))
about_dialog.label_rules.setText(self.rules_text(localized_data))
about_dialog.label_money.setText(self.money_text(params, currency))
dialog.setWindowTitle(self.tr("Money rules"))
dialog.exec()
def show_about_referentials(self, referentials):
dialog = QDialog(self)
layout = QVBoxLayout(dialog)
tabwidget = QTabWidget(dialog)
layout.addWidget(tabwidget)
for ref in referentials:
widget = QWidget()
layout = QVBoxLayout(widget)
label = QLabel()
label.setText(self.text_referential(ref))
layout.addWidget(label)
tabwidget.addTab(widget, ref.translated_name())
dialog.setWindowTitle(self.tr("Referentials"))
dialog.exec()
def general_text(self, localized_data):
"""
Fill the general text with given informations
:return:
"""
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></div></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:2.2%} / {:} days</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
localized_data.get('ud', '####'),
self.tr('Universal Dividend UD(t) in'),
localized_data['diff_units'],
localized_data.get('mass_minus_1', "###"),
self.tr('Monetary Mass M(t-1) in'),
localized_data['units'],
localized_data.get('members_count', '####'),
self.tr('Members N(t)'),
localized_data.get('mass_minus_1_per_member', '####'),
self.tr('Monetary Mass per member M(t-1)/N(t) in'),
localized_data['diff_units'],
localized_data.get('actual_growth', 0),
localized_data.get('days_per_dividend', '####'),
self.tr('Actual growth c = UD(t)/[M(t-1)/N(t)]'),
localized_data.get('ud_median_time_minus_1', '####'),
self.tr('Penultimate UD date and time (t-1)'),
localized_data.get('ud_median_time', '####'),
self.tr('Last UD date and time (t)'),
localized_data.get('next_ud_median_time', '####'),
self.tr('Next UD date and time (t+1)'),
localized_data.get('next_ud_reeaval', '####'),
self.tr('Next UD reevaluation (t+1)')
)
def rules_text(self, localized_data):
"""
Set text in rules
:param dict localized_data:
:return:
"""
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
</table>
""").format(
self.tr('{:2.0%} / {:} days').format(localized_data['growth'], localized_data['days_per_dividend']),
self.tr('Fundamental growth (c) / Delta time (dt)'),
self.tr('UDĞ(t) = UDĞ(t-1) + c²*M(t-1)/N(t-1)'),
self.tr('Universal Dividend (formula)'),
self.tr('{:} = {:} + {:2.0%}²* {:} / {:}').format(
localized_data.get('ud_plus_1', '####'),
localized_data.get('ud', '####'),
localized_data.get('growth', '####'),
localized_data.get('mass', '####'),
localized_data.get('members_count', '####')
),
self.tr('Universal Dividend (computed)')
)
def text_referential(self, ref):
"""
Set text from referentials
"""
# set infos in label
ref_template = """
<table cellpadding="5">
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
<tr><th>{:}</th><td>{:}</td></tr>
</table>
"""
return ref_template.format(self.tr('Name'), ref.translated_name(),
self.tr('Units'), ref.units,
self.tr('Formula'), ref.formula,
self.tr('Description'), ref.description
)
def money_text(self, params, currency):
"""
Set text from money parameters
:param sakia.data.entities.BlockchainParameters params: Parameters of the currency
:param str currency: The currency
"""
dt_dhms = timestamp_to_dhms(params.dt)
if dt_dhms[0] > 0:
dt_as_str = self.tr("{:} day(s) {:} hour(s)").format(*dt_dhms)
else:
dt_as_str = self.tr("{:} hour(s)").format(dt_dhms[1])
if dt_dhms[2] > 0 or dt_dhms[3] > 0:
dt_dhms += ", {:} minute(s) and {:} second(s)".format(*dt_dhms[1:])
dt_reeval_dhms = timestamp_to_dhms(params.dt_reeval)
dt_reeval_as_str = self.tr("{:} day(s) {:} hour(s)").format(*dt_reeval_dhms)
# set infos in label
return self.tr("""
<table cellpadding="5">
<tr><td align="right"><b>{:2.0%} / {:} days</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:} {:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:}</b></td><td>{:}</td></tr>
<tr><td align="right"><b>{:2.0%}</b></td><td>{:}</td></tr>
</table>
""").format(
params.c,
QLocale().toString(params.dt / 86400, 'f', 2),
self.tr('Fundamental growth (c)'),
params.ud0,
self.tr('Initial Universal Dividend UD(0) in'),
currency,
dt_as_str,
self.tr('Time period between two UD'),
dt_reeval_as_str,
self.tr('Time period between two UD reevaluation'),
params.median_time_blocks,
self.tr('Number of blocks used for calculating median time'),
params.avg_gen_time,
self.tr('The average time in seconds for writing 1 block (wished time)'),
params.dt_diff_eval,
self.tr('The number of blocks required to evaluate again PoWMin value'),
params.percent_rot,
self.tr('The percent of previous issuers to reach for personalized difficulty')
)
def show_about(self, text):
dialog = QDialog(self)
about_dialog = Ui_AboutPopup()
about_dialog.setupUi(dialog)
about_dialog.label.setText(text)
dialog.exec()
|
from flask import render_template, flash, redirect, request, session
from app import app
import requests
import json
import time
CLIENT_ID = os.environ['CLIENT_ID']
CLIENT_SECRET = os.environ['CLIENT_SECRET']
def error_healing(error_code):
if error_code == 1:
return 'Произошла неизвестная ошибка'
if error_code == 2:
return 'Сорян, админ все повыключал'
if error_code == 5:
return 'Авторизация не удалась'
if error_code == 6:
time.sleep(2)
return None
if error_code == 9:
return 'Слишком много однотипных действий'
if error_code == 14:
return 'Прости, вылезла капча. Попробуй перезайти'
if error_code == 15:
return 'Этот юзер спрятался от меня'
if error_code == 17:
return 'Так исторически сложилось, что тебе придется войти'
if error_code == 18:
return 'Эта страничка удалена, у нее нет друзей'
if error_code == 113:
return 'Прости, но ты ввел что-то не так, как я ожидаю'
if error_code == 1000:
return 'Нет, сначала положи что-нибудь в форму!'
def form_url(redirect_uri):
params = {'client_id': CLIENT_ID,
'display': 'page',
'redirect_uri': redirect_uri,
'scope': 'friends',
'response_type': 'code',
'v': '5.62',
}
request = requests.Request('GET', 'https://oauth.vk.com/authorize',
params=params)
request.prepare()
return request.prepare().url
def get_user_info(token, short_name):
params = {'user_ids': short_name,
'access_token': token,
}
url = 'https://api.vk.com/method/users.get'
request = json.loads(requests.get(url, params).text)
return request
def get_online_friends_ids(short_name, token):
if not short_name:
return {'error': {'error_code': 1000}}
user_info = get_user_info(token, short_name)
if 'error' in user_info:
return user_info
url = 'https://api.vk.com/method/friends.getOnline'
params = {'user_id': user_info['response'][0]['uid'],
'access_token': token,
'order': 'hints',
'count': 5000, # vk won't return more
'v': '5.62',
'online_mobile': 1,
}
vk_friends_online = json.loads(requests.get(url, params).text)
return vk_friends_online
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
params = {'logged_in': False,
'auth_url': form_url(request.url_root + 'getpas'),
'logout_url': '/logout'
}
short_name = request.args.get('text', '')
if 'access_token' not in session:
return render_template('index.html', **params)
params['logged_in'] = True
token = session['access_token']
online_friends_ids = get_online_friends_ids(short_name, token)
if 'error' in online_friends_ids:
params['error'] = error_healing(online_friends_ids['error']['error_code'])
return render_template('index.html', **params)
online_friends_ids = online_friends_ids['response']
friends_info_pc = []
for friend_id in online_friends_ids['online']:
friend_info = get_user_info(token, friend_id)
if 'error' in friend_info:
error_healing(friend_info['error']['error_code'])
friend_info = get_user_info(token, friend_id)
friends_info_pc.append(friend_info)
friends_info_mobile = []
for friend_id in online_friends_ids['online_mobile']:
friend_info = get_user_info(token, friend_id)
if 'error' in friend_info:
error_healing(friend_info['error']['error_code'])
friend_info = get_user_info(token, friend_id)
friends_info_mobile.append(friend_info)
params['online_friends_mobile'] = friends_info_mobile
params['online_friends_pc'] = friends_info_pc
params.pop('online_friends', None)
return render_template('index.html', **params)
@app.route('/getpas', methods=['GET', 'POST'])
def getpas():
code = request.args.get('code')
if code is None:
return redirect('/index')
redirect_uri = request.url_root + 'getpas'
vk_params = {'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': redirect_uri,
'code': code,
}
response = requests.get('https://oauth.vk.com/access_token', params=vk_params)
token = response.json().get('access_token', None)
session['access_token'] = token
return redirect('index')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session.pop('access_token',None)
return redirect('index')
trying to run on heroku
from flask import render_template, flash, redirect, request, session
from app import app
import requests
import json
import time
import os
CLIENT_ID = os.environ['CLIENT_ID']
CLIENT_SECRET = os.environ['CLIENT_SECRET']
def error_healing(error_code):
if error_code == 1:
return 'Произошла неизвестная ошибка'
if error_code == 2:
return 'Сорян, админ все повыключал'
if error_code == 5:
return 'Авторизация не удалась'
if error_code == 6:
time.sleep(2)
return None
if error_code == 9:
return 'Слишком много однотипных действий'
if error_code == 14:
return 'Прости, вылезла капча. Попробуй перезайти'
if error_code == 15:
return 'Этот юзер спрятался от меня'
if error_code == 17:
return 'Так исторически сложилось, что тебе придется войти'
if error_code == 18:
return 'Эта страничка удалена, у нее нет друзей'
if error_code == 113:
return 'Прости, но ты ввел что-то не так, как я ожидаю'
if error_code == 1000:
return 'Нет, сначала положи что-нибудь в форму!'
def form_url(redirect_uri):
params = {'client_id': CLIENT_ID,
'display': 'page',
'redirect_uri': redirect_uri,
'scope': 'friends',
'response_type': 'code',
'v': '5.62',
}
request = requests.Request('GET', 'https://oauth.vk.com/authorize',
params=params)
request.prepare()
return request.prepare().url
def get_user_info(token, short_name):
params = {'user_ids': short_name,
'access_token': token,
}
url = 'https://api.vk.com/method/users.get'
request = json.loads(requests.get(url, params).text)
return request
def get_online_friends_ids(short_name, token):
if not short_name:
return {'error': {'error_code': 1000}}
user_info = get_user_info(token, short_name)
if 'error' in user_info:
return user_info
url = 'https://api.vk.com/method/friends.getOnline'
params = {'user_id': user_info['response'][0]['uid'],
'access_token': token,
'order': 'hints',
'count': 5000, # vk won't return more
'v': '5.62',
'online_mobile': 1,
}
vk_friends_online = json.loads(requests.get(url, params).text)
return vk_friends_online
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
params = {'logged_in': False,
'auth_url': form_url(request.url_root + 'getpas'),
'logout_url': '/logout'
}
short_name = request.args.get('text', '')
if 'access_token' not in session:
return render_template('index.html', **params)
params['logged_in'] = True
token = session['access_token']
online_friends_ids = get_online_friends_ids(short_name, token)
if 'error' in online_friends_ids:
params['error'] = error_healing(online_friends_ids['error']['error_code'])
return render_template('index.html', **params)
online_friends_ids = online_friends_ids['response']
friends_info_pc = []
for friend_id in online_friends_ids['online']:
friend_info = get_user_info(token, friend_id)
if 'error' in friend_info:
error_healing(friend_info['error']['error_code'])
friend_info = get_user_info(token, friend_id)
friends_info_pc.append(friend_info)
friends_info_mobile = []
for friend_id in online_friends_ids['online_mobile']:
friend_info = get_user_info(token, friend_id)
if 'error' in friend_info:
error_healing(friend_info['error']['error_code'])
friend_info = get_user_info(token, friend_id)
friends_info_mobile.append(friend_info)
params['online_friends_mobile'] = friends_info_mobile
params['online_friends_pc'] = friends_info_pc
params.pop('online_friends', None)
return render_template('index.html', **params)
@app.route('/getpas', methods=['GET', 'POST'])
def getpas():
code = request.args.get('code')
if code is None:
return redirect('/index')
redirect_uri = request.url_root + 'getpas'
vk_params = {'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': redirect_uri,
'code': code,
}
response = requests.get('https://oauth.vk.com/access_token', params=vk_params)
token = response.json().get('access_token', None)
session['access_token'] = token
return redirect('index')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session.pop('access_token',None)
return redirect('index')
|
#!/usr/bin/env ccp4-python
__author__ = "Jens Thomas, and Felix Simkovic"
__date__ = "22 Mar 2019"
__version__ = "1.0"
import argparse
import os
import logging
import multiprocessing
from ample.util import argparse_util
from ample.util.sequence_util import Sequence
from ample.util import logging_util
from ample.modelling.rosetta_model import RosettaModel
def process_args(args):
if args.rosetta_flagsfile:
args.rosetta_flagsfile = os.path.abspath(args.rosetta_flagsfile)
if args.nproc is None:
if args.submit_cluster:
args.nproc = 1
else:
args.nproc = multiprocessing.cpu_count()
# Handle the command-line
parser = argparse.ArgumentParser(description="AMPLE Modelling Module")
# Need to add seperately here as is usually part of MR options
parser.add_argument('-fasta', help='protein fasta file.')
argparse_util.add_core_options(parser)
argparse_util.add_rosetta_options(parser)
argparse_util.add_cluster_submit_options(parser)
work_dir = os.path.abspath('rosetta_modelling')
parser.set_defaults(submit_cluster = False,
submit_qtype = 'SGE',
submit_array = True,
nmodels = 1000,
work_dir = work_dir)
args = parser.parse_args()
process_args(args)
# Start logging to the console
logging_util.setup_console_logging()
logger = logging.getLogger()
logger.info("*** AMPLE ROSETTA modelling package ***")
if not os.path.isdir(args.work_dir):
os.mkdir(args.work_dir)
rm = RosettaModel()
if args.rosetta_dir and os.path.isdir(args.rosetta_dir):
rm.set_paths(rosetta_dir=args.rosetta_dir)
if args.fasta:
fp = Sequence(fasta=args.fasta, canonicalise=False)
rm.sequence_length = fp.length()
rm.nmodels = args.nmodels
rm.work_dir = args.work_dir
rm.models_dir = os.path.join(args.work_dir, "models")
rm.multimer_modelling = args.multimer_modelling
rm.nproc = args.nproc
rm.submit_cluster = args.submit_cluster
rm.submit_qtype = args.submit_qtype
rm.submit_queue = args.submit_queue
rm.submit_array = args.submit_array
rm.submit_max_array = args.submit_max_array
logger.info("Running binary {} with flagsfile: {}".format(args.rosetta_executable, args.rosetta_flagsfile))
if args.multimer_modelling:
rm.do_multimer_modelling()
else:
rm.model_from_flagsfile(args.rosetta_flagsfile, args.rosetta_executable)
Set args on rosetta model
#!/usr/bin/env ccp4-python
__author__ = "Jens Thomas, and Felix Simkovic"
__date__ = "22 Mar 2019"
__version__ = "1.0"
import argparse
import os
import logging
import multiprocessing
from ample.util import argparse_util
from ample.util.sequence_util import Sequence
from ample.util import logging_util
from ample.modelling.rosetta_model import RosettaModel
def process_args(args):
if args.rosetta_flagsfile:
args.rosetta_flagsfile = os.path.abspath(args.rosetta_flagsfile)
if args.nproc is None:
if args.submit_cluster:
args.nproc = 1
else:
args.nproc = multiprocessing.cpu_count()
# Handle the command-line
parser = argparse.ArgumentParser(description="AMPLE Modelling Module")
# Need to add seperately here as is usually part of MR options
parser.add_argument('-fasta', help='protein fasta file.')
argparse_util.add_core_options(parser)
argparse_util.add_rosetta_options(parser)
argparse_util.add_cluster_submit_options(parser)
work_dir = os.path.abspath('rosetta_modelling')
parser.set_defaults(submit_cluster = False,
submit_qtype = 'SGE',
submit_array = True,
nmodels = 1000,
work_dir = work_dir)
args = parser.parse_args()
process_args(args)
# Start logging to the console
logging_util.setup_console_logging()
logger = logging.getLogger()
logger.info("*** AMPLE ROSETTA modelling package ***")
if not os.path.isdir(args.work_dir):
os.mkdir(args.work_dir)
rm = RosettaModel()
if args.rosetta_dir and os.path.isdir(args.rosetta_dir):
rm.set_paths(rosetta_dir=args.rosetta_dir)
if args.fasta:
rm.fasta = args.fasta
fp = Sequence(fasta=args.fasta, canonicalise=False)
rm.sequence_length = fp.length()
rm.nmodels = args.nmodels
rm.work_dir = args.work_dir
rm.models_dir = os.path.join(args.work_dir, "models")
rm.frags_3mers = args.frags_3mers
rm.frags_9mers = args.frags_9mers
rm.multimer_modelling = args.multimer_modelling
rm.nproc = args.nproc
rm.submit_cluster = args.submit_cluster
rm.submit_qtype = args.submit_qtype
rm.submit_queue = args.submit_queue
rm.submit_array = args.submit_array
rm.submit_max_array = args.submit_max_array
logger.info("Running binary {} with flagsfile: {}".format(args.rosetta_executable, args.rosetta_flagsfile))
if args.multimer_modelling:
rm.do_multimer_modelling()
else:
rm.model_from_flagsfile(args.rosetta_flagsfile, args.rosetta_executable)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReticulate(RPackage):
"""Interface to 'Python' modules, classes, and functions. When calling into
'Python', R data types are automatically converted to their equivalent
'Python' types. When values are returned from 'Python' to R they are
converted back to R types. Compatible with all versions of 'Python' >=
2.7."""
homepage = "https://github.com/rstudio/reticulate"
url = "https://cloud.r-project.org/src/contrib/reticulate_1.13.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reticulate"
version('1.13', sha256='adbe41d556b667c4419d563680f8608a56b0f792b8bc427b3bf4c584ff819de3')
depends_on('r@3.0:', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-rcpp@0.12.7:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('python@2.7.0:')
r-reticulate: add version; dependencies (#16747)
* Add version 1.15
* Add link dependency on r-rcpp
* Add dependency on r-rappdirs
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReticulate(RPackage):
"""Interface to 'Python' modules, classes, and functions. When calling into
'Python', R data types are automatically converted to their equivalent
'Python' types. When values are returned from 'Python' to R they are
converted back to R types. Compatible with all versions of 'Python' >=
2.7."""
homepage = "https://github.com/rstudio/reticulate"
url = "https://cloud.r-project.org/src/contrib/reticulate_1.13.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reticulate"
version('1.15', sha256='47db3e9c9424263ade15287da8e74f6ba261a936b644b197dba6772853b7b50d')
version('1.13', sha256='adbe41d556b667c4419d563680f8608a56b0f792b8bc427b3bf4c584ff819de3')
depends_on('r@3.0:', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-rcpp@0.12.7:', type=('build', 'run', 'link'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-rappdirs', when='@1.15:', type=('build', 'run'))
depends_on('python@2.7.0:')
|
import re
import itertools
from django.template.defaultfilters import slugify
def greek_to_english(string):
"""
Converts all greek letters to the corresponding english letters.
Useful for creating song slugs from greek song titles.
"""
GREEK_MAP = {
'α':'a', 'β':'b', 'γ':'g', 'δ':'d', 'ε':'e', 'ζ':'z', 'η':'h', 'θ':'th',
'ι':'i', 'κ':'k', 'λ':'l', 'μ':'m', 'ν':'n', 'ξ':'ks', 'ο':'o', 'π':'p',
'ρ':'r', 'σ':'s', 'τ':'t', 'υ':'y', 'φ':'f', 'χ':'x', 'ψ':'ps', 'ω':'w',
'ά':'a', 'έ':'e', 'ί':'i', 'ό':'o', 'ύ':'y', 'ή':'h', 'ώ':'w', 'ς':'s',
'ϊ':'i', 'ΰ':'y', 'ϋ':'y', 'ΐ':'i',
'Α':'A', 'Β':'B', 'Γ':'G', 'Δ':'D', 'Ε':'E', 'Ζ':'Z', 'Η':'H', 'Θ':'Th',
'Ι':'I', 'Κ':'K', 'Λ':'L', 'Μ':'M', 'Ν':'N', 'Ξ':'Ks', 'Ο':'O', 'Π':'P',
'Ρ':'R', 'Σ':'S', 'Τ':'T', 'Υ':'Y', 'Φ':'F', 'Χ':'X', 'Ψ':'Ps', 'Ω':'W',
'Ά':'A', 'Έ':'E', 'Ί':'I', 'Ό':'O', 'Ύ':'Y', 'Ή':'H', 'Ώ':'W', 'Ϊ':'I',
'Ϋ':'Y'
}
s = "".join(GREEK_MAP.keys())
result = ''
for piece in re.compile('[%s]|[^%s]+' % (s,s)).findall(string):
if piece in GREEK_MAP:
result += GREEK_MAP[piece]
else:
result += piece
return result.replace('oy', 'ou').replace('OY', 'OU').replace('Oy', 'Ou')
def slugify_greek(string):
return slugify(greek_to_english(string))
def generate_unique_slug(cls, string, max_length=-1):
"""
Creates a slug with the appropriate maximum length.
To ensure uniqueness, it adds a integer suffix to the slug if the
"appropriate" slug is already used by some other object.
Keyword arguments:
cls -- the class of the model
string -- the string to slugify
max_length -- maximum length of the resulting slug. if negative, we will
use the max_length of the slug field of the cls
"""
if max_length < 0:
max_length = cls._meta.get_field('slug').max_length
slug = orig = slugify_greek(string)[:max_length]
for x in itertools.count(1):
if not cls.objects.filter(slug=slug).exists():
break
# truncate the original slug dynamically, minus 1 for the hyphen
slug = "{0}-{1}".format(orig[:max_length - len(str(x)) - 1], x)
return slug
def strip_whitespace_lines(string):
"""
Remove whitespace lines from the beginning and the end of the string,
as well as adjacent whitespace lines inside.
"""
lines = [(l.strip() if not l.strip() else l) for l in string.splitlines()]
# remove whitespace lines from beginning and end
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop()
# remove any adjacent whitespace lines
lines = [l[0] for l in itertools.groupby(lines)]
return '\n'.join(lines)
don't strip adjacent no-whitespace lines
import re
import itertools
from django.template.defaultfilters import slugify
def greek_to_english(string):
"""
Converts all greek letters to the corresponding english letters.
Useful for creating song slugs from greek song titles.
"""
GREEK_MAP = {
'α':'a', 'β':'b', 'γ':'g', 'δ':'d', 'ε':'e', 'ζ':'z', 'η':'h', 'θ':'th',
'ι':'i', 'κ':'k', 'λ':'l', 'μ':'m', 'ν':'n', 'ξ':'ks', 'ο':'o', 'π':'p',
'ρ':'r', 'σ':'s', 'τ':'t', 'υ':'y', 'φ':'f', 'χ':'x', 'ψ':'ps', 'ω':'w',
'ά':'a', 'έ':'e', 'ί':'i', 'ό':'o', 'ύ':'y', 'ή':'h', 'ώ':'w', 'ς':'s',
'ϊ':'i', 'ΰ':'y', 'ϋ':'y', 'ΐ':'i',
'Α':'A', 'Β':'B', 'Γ':'G', 'Δ':'D', 'Ε':'E', 'Ζ':'Z', 'Η':'H', 'Θ':'Th',
'Ι':'I', 'Κ':'K', 'Λ':'L', 'Μ':'M', 'Ν':'N', 'Ξ':'Ks', 'Ο':'O', 'Π':'P',
'Ρ':'R', 'Σ':'S', 'Τ':'T', 'Υ':'Y', 'Φ':'F', 'Χ':'X', 'Ψ':'Ps', 'Ω':'W',
'Ά':'A', 'Έ':'E', 'Ί':'I', 'Ό':'O', 'Ύ':'Y', 'Ή':'H', 'Ώ':'W', 'Ϊ':'I',
'Ϋ':'Y'
}
s = "".join(GREEK_MAP.keys())
result = ''
for piece in re.compile('[%s]|[^%s]+' % (s,s)).findall(string):
if piece in GREEK_MAP:
result += GREEK_MAP[piece]
else:
result += piece
return result.replace('oy', 'ou').replace('OY', 'OU').replace('Oy', 'Ou')
def slugify_greek(string):
return slugify(greek_to_english(string))
def generate_unique_slug(cls, string, max_length=-1):
"""
Creates a slug with the appropriate maximum length.
To ensure uniqueness, it adds a integer suffix to the slug if the
"appropriate" slug is already used by some other object.
Keyword arguments:
cls -- the class of the model
string -- the string to slugify
max_length -- maximum length of the resulting slug. if negative, we will
use the max_length of the slug field of the cls
"""
if max_length < 0:
max_length = cls._meta.get_field('slug').max_length
slug = orig = slugify_greek(string)[:max_length]
for x in itertools.count(1):
if not cls.objects.filter(slug=slug).exists():
break
# truncate the original slug dynamically, minus 1 for the hyphen
slug = "{0}-{1}".format(orig[:max_length - len(str(x)) - 1], x)
return slug
def strip_whitespace_lines(string):
"""
Remove whitespace lines from the beginning and the end of the string,
as well as adjacent whitespace lines inside. Also strip every line from
whitespace.
"""
string = re.sub('\n\n+', '\n\n', string)
lines = [('' if not l.strip() else l) for l in string.splitlines()]
# remove whitespace lines from beginning and end
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop()
return '\n'.join(lines)
|
# stdlib, alphabetical
import os
# Django core, alphabetical
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
# External dependencies, alphabetical
from annoying.functions import get_object_or_None
# This project, alphabetical
from fpr import forms as fprforms
from fpr import models as fprmodels
from fpr import utils
def home(request):
# once per session, display the welcome text
if not 'welcome_message_shown' in request.session: # or not request.session['welcome_message_shown']:
file_path = os.path.join(os.path.dirname(__file__), 'templates/welcome.html')
file = open(file_path, 'r')
messages.info(request, file.read())
request.session['welcome_message_shown'] = True
return redirect('format_list')
############ FORMATS ############
def format_list(request):
formats = fprmodels.Format.objects.filter()
# TODO Formats grouped by FormatGroup for better display in template
return render(request, 'fpr/format/list.html', locals())
def format_detail(request, slug):
format = get_object_or_404(fprmodels.Format, slug=slug)
format_versions = fprmodels.FormatVersion.active.filter(format=format)
return render(request, 'fpr/format/detail.html', locals())
def format_edit(request, slug=None):
if slug:
action = "Edit"
format = get_object_or_404(fprmodels.Format, slug=slug)
group = format.group
else:
action = "Create"
format = None
group = None
form = fprforms.FormatForm(request.POST or None, instance=format, prefix='f')
format_group_form = fprforms.FormatGroupForm(request.POST or None, instance=group, prefix='fg')
if form.is_valid():
if form.cleaned_data['group'] == 'new' and format_group_form.is_valid():
format = form.save(commit=False)
group = format_group_form.save()
format.group = group
format.save()
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
elif form.cleaned_data['group'] != 'new':
format = form.save(commit=False)
group = fprmodels.FormatGroup.objects.get(uuid=form.cleaned_data['group'])
format.group = group
format = form.save()
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
return render(request, 'fpr/format/form.html', locals())
############ FORMAT VERSIONS ############
def formatversion_detail(request, format_slug, slug=None):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
version = get_object_or_404(fprmodels.FormatVersion, slug=slug)
return render(request, 'fpr/format/version/detail.html', locals())
def formatversion_edit(request, format_slug, slug=None):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
if slug:
action = "Replace"
version = get_object_or_404(fprmodels.FormatVersion, slug=slug, format=format)
else:
action = "Create"
version = None
form = fprforms.FormatVersionForm(request.POST or None, instance=version)
if form.is_valid():
# If replacing, disable old one and set replaces info for new one
new_version = form.save(commit=False)
new_version.format = format
replaces = utils.determine_what_replaces_model_instance(fprmodels.FormatVersion, version)
new_version.save(replacing=replaces)
utils.update_references_to_object(fprmodels.FormatVersion, 'uuid', replaces, new_version)
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
else:
utils.warn_if_replacing_with_old_revision(request, version)
return render(request, 'fpr/format/version/form.html', locals())
def formatversion_delete(request, format_slug, slug):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
version = get_object_or_404(fprmodels.FormatVersion, slug=slug, format=format)
dependent_objects = utils.dependent_objects(version)
if request.method == 'POST':
if 'delete' in request.POST:
version.enabled = False
version.save()
messages.info(request, 'Disabled.')
for obj in dependent_objects:
obj['value'].enabled = False
obj['value'].save()
return redirect('format_detail', format.slug)
return render(request, 'fpr/format/version/delete.html', locals())
############ FORMAT GROUPS ############
def formatgroup_list(request):
groups = fprmodels.FormatGroup.objects.all()
return render(request, 'fpr/format/group/list.html', locals())
def formatgroup_edit(request, slug=None):
if slug:
action = "Edit"
group = get_object_or_404(fprmodels.FormatGroup, slug=slug)
group_formats = fprmodels.Format.objects.filter(group=group.uuid)
else:
action = "Create"
group = None
form = fprforms.FormatGroupForm(request.POST or None, instance=group)
if form.is_valid():
group = form.save()
messages.info(request, 'Saved.')
return redirect('formatgroup_list')
return render(request, 'fpr/format/group/form.html', locals())
def formatgroup_delete(request, slug):
group = get_object_or_404(fprmodels.FormatGroup, slug=slug)
format_count = fprmodels.Format.objects.filter(group=group.uuid).count()
other_groups = fprmodels.FormatGroup.objects.exclude(uuid=group.uuid)
other_group_count = len(other_groups)
if request.method == 'POST':
if 'delete' in request.POST:
# if formats exist that are a member of this group, perform group substitution
formats = fprmodels.Format.objects.filter(group=group.uuid)
if (len(formats)) > 0:
substitute_group_uuid = request.POST.get('substitute', '')
if 'substitute' in request.POST and substitute_group_uuid != '':
substitute_group = fprmodels.FormatGroup.objects.get(uuid=substitute_group_uuid)
substitution_count = 0
for format in formats:
format.group = substitute_group
format.save()
substitution_count += 1
messages.info(request, str(substitution_count) + ' subtitutions were performed.')
else:
messages.warning(request, 'Please select a group to substitute for this group in member formats.')
return redirect('formatgroup_delete', slug)
group.delete()
messages.info(request, 'Deleted.')
return redirect('formatgroup_list')
else:
return render(request, 'fpr/format/group/delete.html', locals())
############ ID TOOLS ############
def idtool_list(request):
idtools = fprmodels.IDTool.objects.filter(enabled=True)
# TODO Add IDToolConfig info??
return render(request, 'fpr/idtool/list.html', locals())
def idtool_detail(request, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=slug, enabled=True)
idtool_config = fprmodels.IDToolConfig.active.filter(tool=idtool)
return render(request, 'fpr/idtool/detail.html', locals())
def idtool_edit(request, slug=None):
if slug:
action = "Replace"
idtool = get_object_or_404(fprmodels.IDTool, slug=slug, enabled=True)
else:
action = "Create"
idtool = None
form = fprforms.IDToolForm(request.POST or None, instance=idtool)
if form.is_valid():
idtool = form.save()
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
return render(request, 'fpr/idtool/form.html', locals())
############ ID TOOL CONFIGURATIONS ############
def idtoolconfig_detail(request, idtool_slug, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug, enabled=True)
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
return render(request, 'fpr/idtool/config/detail.html', locals())
def idtoolconfig_edit(request, idtool_slug, slug=None):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug, enabled=True)
if slug:
action = "Replace"
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
command = config.command
else:
action = "Create"
config = None
command = None
form = fprforms.IDToolConfigForm(request.POST or None, instance=config)
config_command_form = fprforms.IDCommandForm(request.POST or None, instance=command, prefix='c')
if form.is_valid():
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDToolConfig, config)
if form.cleaned_data['command'] == 'new' and config_command_form.is_valid():
config = form.save(commit=False)
command = config_command_form.save()
config.tool = idtool
config.command = command
config.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
elif form.cleaned_data['command'] != 'new':
config = form.save(commit=False)
config.tool = idtool
command = fprmodels.IDCommand.objects.get(uuid=form.cleaned_data['command'])
config.command = command
config = form.save()
config.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
else:
utils.warn_if_replacing_with_old_revision(request, config)
return render(request, 'fpr/idtool/config/form.html', locals())
def idtoolconfig_delete(request, idtool_slug, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug)
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
dependent_objects = utils.dependent_objects(config)
if request.method == 'POST':
if 'delete' in request.POST:
config.enabled = False
config.save()
messages.info(request, 'Disabled.')
for obj in dependent_objects:
obj['value'].enabled = False
obj['value'].save()
return redirect('idtool_detail', idtool.slug)
return render(request, 'fpr/idtool/config/delete.html', locals())
############ ID RULES ############
def idrule_list(request):
idrules = fprmodels.IDRule.active.all()
return render(request, 'fpr/idrule/list.html', locals())
def idrule_detail(request, uuid=None):
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
return render(request, 'fpr/idrule/detail.html', locals())
def idrule_edit(request, uuid=None):
if uuid:
action = "Edit"
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
else:
action = "Create"
idrule = None
form = fprforms.IDRuleForm(request.POST or None, instance=idrule)
if form.is_valid():
new_idrule = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDRule, idrule)
new_idrule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idrule_list')
else:
utils.warn_if_replacing_with_old_revision(request, idrule)
return render(request, 'fpr/idrule/form.html', locals())
def idrule_delete(request, uuid):
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
if request.method == 'POST':
if 'delete' in request.POST:
idrule.enabled = False
idrule.save()
messages.info(request, 'Disabled.')
return redirect('idrule_detail', idrule.uuid)
return render(request, 'fpr/idrule/delete.html', locals())
############ ID COMMANDS ############
def idcommand_list(request):
idcommands = fprmodels.IDCommand.active.all()
return render(request, 'fpr/idcommand/list.html', locals())
def idcommand_detail(request, uuid):
idcommand = get_object_or_404(fprmodels.IDCommand, uuid=uuid)
idtoolconfigs = fprmodels.IDToolConfig.active.filter(command=idcommand)
return render(request, 'fpr/idcommand/detail.html', locals())
def idcommand_edit(request, uuid=None):
if uuid:
action = "Edit"
idcommand = get_object_or_404(fprmodels.IDCommand, uuid=uuid)
else:
action = "Create"
idcommand = None
form = fprforms.IDCommandForm(request.POST or None, instance=idcommand)
if form.is_valid():
new_idcommand = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDCommand, idcommand)
new_idcommand.save(replacing=replaces)
utils.update_references_to_object(fprmodels.IDCommand, 'uuid', replaces, new_idcommand)
messages.info(request, 'Saved.')
return redirect('idcommand_list')
else:
utils.warn_if_replacing_with_old_revision(request, idcommand)
return render(request, 'fpr/idcommand/form.html', locals())
############ FP RULES ############
def fprule_list(request):
fprules = fprmodels.FPRule.objects.filter(enabled=True).filter(~Q(purpose='Characterize'))
return render(request, 'fpr/fprule/list.html', locals())
def fprule_detail(request, uuid):
fprule = get_object_or_404(fprmodels.FPRule, uuid=uuid)
return render(request, 'fpr/fprule/detail.html', locals())
def fprule_edit(request, uuid=None):
if uuid:
action = "Edit"
fprule = get_object_or_404(fprmodels.FPRule, uuid=uuid)
command = fprule.command
else:
action = "Create"
fprule = None
command = None
form = fprforms.FPRuleForm(request.POST or None, instance=fprule, prefix='f')
fprule_command_form = fprforms.FPCommandForm(request.POST or None, instance=command, prefix='fc')
if form.is_valid():
replaces = utils.determine_what_replaces_model_instance(fprmodels.FPRule, fprule)
if form.cleaned_data['command'] == 'new' and fprule_command_form.is_valid():
fprule = form.save(commit=False)
command = fprule_command_form.save()
fprule.command = command
fprule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('fprule_detail', fprule.uuid)
elif form.cleaned_data['command'] != 'new':
fprule = form.save(commit=False)
command = fprmodels.FPCommand.objects.get(uuid=form.cleaned_data['command'])
fprule.command = command
fprule = form.save()
fprule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('fprule_list')
else:
utils.warn_if_replacing_with_old_revision(request, fprule)
return render(request, 'fpr/fprule/form.html', locals())
############ FP TOOLS ############
def fptool_list(request):
fptools = fprmodels.FPTool.objects.filter(enabled=True)
return render(request, 'fpr/fptool/list.html', locals())
def fptool_detail(request, slug):
fptool = get_object_or_404(fprmodels.FPTool, slug=slug, enabled=True)
fpcommands = fprmodels.FPCommand.objects.filter(tool__uuid=fptool.uuid)
return render(request, 'fpr/fptool/detail.html', locals())
def fptool_edit(request, slug=None):
if slug:
action = "Replace"
fptool = get_object_or_404(fprmodels.FPTool, slug=slug, enabled=True)
else:
action = "Create"
fptool = None
form = fprforms.FPToolForm(request.POST or None, instance=fptool)
if form.is_valid():
fptool = form.save()
messages.info(request, 'Saved.')
return redirect('fptool_detail', fptool.slug)
return render(request, 'fpr/fptool/form.html', locals())
############ FP COMMANDS ############
def fpcommand_list(request):
fpcommands = fprmodels.FPCommand.objects.filter(enabled=True).filter(~Q(command_usage='Characterization'))
return render(request, 'fpr/fpcommand/list.html', locals())
def fpcommand_detail(request, uuid):
fpcommand = get_object_or_404(fprmodels.FPCommand, uuid=uuid, enabled=True)
return render(request, 'fpr/fpcommand/detail.html', locals())
def fpcommand_edit(request, uuid=None):
if uuid:
action = "Replace"
fpcommand = get_object_or_404(fprmodels.FPCommand, uuid=uuid)
else:
action = "Create"
fpcommand = None
if request.method == 'POST':
form = fprforms.FPCommandForm(request.POST, instance=fpcommand)
if form.is_valid():
# remove existing relations
commandtools = fprmodels.FPCommandTool.objects.filter(command=fpcommand)
for commandtool in commandtools:
commandtool.delete()
new_fpcommand = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.FPCommand, fpcommand)
new_fpcommand.save(replacing=replaces)
utils.update_references_to_object(fprmodels.FPCommand, 'uuid', replaces, new_fpcommand)
# TODO: add many to many reference updating
for tool_id in request.POST.getlist('tool'):
tool = fprmodels.FPCommandTool(
command=new_fpcommand,
tool=fprmodels.FPTool.objects.get(pk=tool_id)
)
tool.save()
messages.info(request, 'Saved.')
return redirect('fpcommand_list')
else:
if 'parent' in request.GET:
fptool = get_object_or_None(fprmodels.FPTool, uuid=request.GET.get('parent', ''), enabled=True)
initial = {'tool': [fptool]}
else:
initial = None
form = fprforms.FPCommandForm(instance=fpcommand, initial=initial)
utils.warn_if_replacing_with_old_revision(request, fpcommand)
return render(request, 'fpr/fpcommand/form.html', locals())
############ REVISIONS ############
def revision_list(request, entity_name, uuid):
# get model using entity name
available_models = models.get_models()
model = None
for model in available_models:
if model._meta.db_table == 'fpr_' + entity_name:
break
if model == None:
raise Http404
# human-readable names
revision_type = entity_name
human_readable_names = {
'formatversion': 'Format Version',
'idtoolconfig': 'ID Tool Configuration',
'idrule': 'ID Rule',
'idcommand': 'Identification Command',
'fpcommand': 'FP Command',
'fprule': 'FP Rule'
}
if entity_name in human_readable_names:
revision_type = human_readable_names[entity_name]
# restrict to models that are intended to have revisions
try:
getattr(model, 'replaces')
# get specific revision's data and augment with detail URL
revision = model.objects.get(uuid=uuid)
_augment_revisions_with_detail_url(entity_name, model, [revision])
# get revision ancestor data and augment with detail URLs
ancestors = utils.get_revision_ancestors(model, uuid, [])
_augment_revisions_with_detail_url(entity_name, model, ancestors)
# get revision descendant data and augment with detail URLs
descendants = utils.get_revision_descendants(model, uuid, [])
_augment_revisions_with_detail_url(entity_name, model, descendants)
descendants.reverse()
return render(request, 'fpr/revisions/list.html', locals())
except AttributeError:
raise Http404
def _augment_revisions_with_detail_url(entity_name, model, revisions):
for revision in revisions:
detail_view_name = entity_name + '_edit'
try:
parent_key_value = None
if entity_name == 'formatversion':
parent_key_value = revision.format.slug
if entity_name == 'idtoolconfig':
parent_key_value = revision.tool.slug
if parent_key_value:
revision.detail_url = reverse(detail_view_name, args=[parent_key_value, revision.slug])
else:
revision.detail_url = reverse(detail_view_name, args=[revision.slug])
except:
revision.detail_url = reverse(detail_view_name, args=[revision.uuid])
Minor commenting.
# stdlib, alphabetical
import os
# Django core, alphabetical
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
# External dependencies, alphabetical
from annoying.functions import get_object_or_None
# This project, alphabetical
from fpr import forms as fprforms
from fpr import models as fprmodels
from fpr import utils
def home(request):
# once per session, display the welcome text
if not 'welcome_message_shown' in request.session: # or not request.session['welcome_message_shown']:
file_path = os.path.join(os.path.dirname(__file__), 'templates/welcome.html')
file = open(file_path, 'r')
messages.info(request, file.read())
request.session['welcome_message_shown'] = True
return redirect('format_list')
############ FORMATS ############
def format_list(request):
formats = fprmodels.Format.objects.filter()
# TODO Formats grouped by FormatGroup for better display in template
return render(request, 'fpr/format/list.html', locals())
def format_detail(request, slug):
format = get_object_or_404(fprmodels.Format, slug=slug)
format_versions = fprmodels.FormatVersion.active.filter(format=format)
return render(request, 'fpr/format/detail.html', locals())
def format_edit(request, slug=None):
if slug:
action = "Edit"
format = get_object_or_404(fprmodels.Format, slug=slug)
group = format.group
else:
action = "Create"
format = None
group = None
form = fprforms.FormatForm(request.POST or None, instance=format, prefix='f')
format_group_form = fprforms.FormatGroupForm(request.POST or None, instance=group, prefix='fg')
if form.is_valid():
if form.cleaned_data['group'] == 'new' and format_group_form.is_valid():
format = form.save(commit=False)
group = format_group_form.save()
format.group = group
format.save()
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
elif form.cleaned_data['group'] != 'new':
format = form.save(commit=False)
group = fprmodels.FormatGroup.objects.get(uuid=form.cleaned_data['group'])
format.group = group
format = form.save()
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
return render(request, 'fpr/format/form.html', locals())
############ FORMAT VERSIONS ############
def formatversion_detail(request, format_slug, slug=None):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
version = get_object_or_404(fprmodels.FormatVersion, slug=slug)
return render(request, 'fpr/format/version/detail.html', locals())
def formatversion_edit(request, format_slug, slug=None):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
if slug:
action = "Replace"
version = get_object_or_404(fprmodels.FormatVersion, slug=slug, format=format)
else:
action = "Create"
version = None
form = fprforms.FormatVersionForm(request.POST or None, instance=version)
if form.is_valid():
# If replacing, disable old one and set replaces info for new one
new_version = form.save(commit=False)
new_version.format = format
replaces = utils.determine_what_replaces_model_instance(fprmodels.FormatVersion, version)
new_version.save(replacing=replaces)
utils.update_references_to_object(fprmodels.FormatVersion, 'uuid', replaces, new_version)
messages.info(request, 'Saved.')
return redirect('format_detail', format.slug)
else:
utils.warn_if_replacing_with_old_revision(request, version)
return render(request, 'fpr/format/version/form.html', locals())
def formatversion_delete(request, format_slug, slug):
format = get_object_or_404(fprmodels.Format, slug=format_slug)
version = get_object_or_404(fprmodels.FormatVersion, slug=slug, format=format)
dependent_objects = utils.dependent_objects(version)
if request.method == 'POST':
if 'delete' in request.POST:
version.enabled = False
version.save()
messages.info(request, 'Disabled.')
for obj in dependent_objects:
obj['value'].enabled = False
obj['value'].save()
return redirect('format_detail', format.slug)
return render(request, 'fpr/format/version/delete.html', locals())
############ FORMAT GROUPS ############
def formatgroup_list(request):
groups = fprmodels.FormatGroup.objects.all()
return render(request, 'fpr/format/group/list.html', locals())
def formatgroup_edit(request, slug=None):
if slug:
action = "Edit"
group = get_object_or_404(fprmodels.FormatGroup, slug=slug)
group_formats = fprmodels.Format.objects.filter(group=group.uuid)
else:
action = "Create"
group = None
form = fprforms.FormatGroupForm(request.POST or None, instance=group)
if form.is_valid():
group = form.save()
messages.info(request, 'Saved.')
return redirect('formatgroup_list')
return render(request, 'fpr/format/group/form.html', locals())
def formatgroup_delete(request, slug):
group = get_object_or_404(fprmodels.FormatGroup, slug=slug)
format_count = fprmodels.Format.objects.filter(group=group.uuid).count()
other_groups = fprmodels.FormatGroup.objects.exclude(uuid=group.uuid)
other_group_count = len(other_groups)
if request.method == 'POST':
if 'delete' in request.POST:
# if formats exist that are a member of this group, perform group substitution
formats = fprmodels.Format.objects.filter(group=group.uuid)
if (len(formats)) > 0:
substitute_group_uuid = request.POST.get('substitute', '')
if 'substitute' in request.POST and substitute_group_uuid != '':
substitute_group = fprmodels.FormatGroup.objects.get(uuid=substitute_group_uuid)
substitution_count = 0
for format in formats:
format.group = substitute_group
format.save()
substitution_count += 1
messages.info(request, str(substitution_count) + ' subtitutions were performed.')
else:
messages.warning(request, 'Please select a group to substitute for this group in member formats.')
return redirect('formatgroup_delete', slug)
group.delete()
messages.info(request, 'Deleted.')
return redirect('formatgroup_list')
else:
return render(request, 'fpr/format/group/delete.html', locals())
############ ID TOOLS ############
def idtool_list(request):
idtools = fprmodels.IDTool.objects.filter(enabled=True)
# TODO Add IDToolConfig info??
return render(request, 'fpr/idtool/list.html', locals())
def idtool_detail(request, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=slug, enabled=True)
idtool_config = fprmodels.IDToolConfig.active.filter(tool=idtool)
return render(request, 'fpr/idtool/detail.html', locals())
def idtool_edit(request, slug=None):
if slug:
action = "Replace"
idtool = get_object_or_404(fprmodels.IDTool, slug=slug, enabled=True)
else:
action = "Create"
idtool = None
form = fprforms.IDToolForm(request.POST or None, instance=idtool)
if form.is_valid():
idtool = form.save()
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
return render(request, 'fpr/idtool/form.html', locals())
############ ID TOOL CONFIGURATIONS ############
def idtoolconfig_detail(request, idtool_slug, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug, enabled=True)
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
return render(request, 'fpr/idtool/config/detail.html', locals())
def idtoolconfig_edit(request, idtool_slug, slug=None):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug, enabled=True)
if slug:
action = "Replace"
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
command = config.command
else:
action = "Create"
config = None
command = None
form = fprforms.IDToolConfigForm(request.POST or None, instance=config)
config_command_form = fprforms.IDCommandForm(request.POST or None, instance=command, prefix='c')
if form.is_valid():
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDToolConfig, config)
if form.cleaned_data['command'] == 'new' and config_command_form.is_valid():
config = form.save(commit=False)
command = config_command_form.save()
config.tool = idtool
config.command = command
config.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
elif form.cleaned_data['command'] != 'new':
config = form.save(commit=False)
config.tool = idtool
command = fprmodels.IDCommand.objects.get(uuid=form.cleaned_data['command'])
config.command = command
config = form.save()
config.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idtool_detail', idtool.slug)
else:
utils.warn_if_replacing_with_old_revision(request, config)
return render(request, 'fpr/idtool/config/form.html', locals())
def idtoolconfig_delete(request, idtool_slug, slug):
idtool = get_object_or_404(fprmodels.IDTool, slug=idtool_slug)
config = get_object_or_404(fprmodels.IDToolConfig, slug=slug, tool=idtool)
dependent_objects = utils.dependent_objects(config)
if request.method == 'POST':
if 'delete' in request.POST:
config.enabled = False
config.save()
messages.info(request, 'Disabled.')
for obj in dependent_objects:
obj['value'].enabled = False
obj['value'].save()
return redirect('idtool_detail', idtool.slug)
return render(request, 'fpr/idtool/config/delete.html', locals())
############ ID RULES ############
def idrule_list(request):
idrules = fprmodels.IDRule.active.all()
return render(request, 'fpr/idrule/list.html', locals())
def idrule_detail(request, uuid=None):
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
return render(request, 'fpr/idrule/detail.html', locals())
def idrule_edit(request, uuid=None):
if uuid:
action = "Edit"
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
else:
action = "Create"
idrule = None
form = fprforms.IDRuleForm(request.POST or None, instance=idrule)
if form.is_valid():
new_idrule = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDRule, idrule)
new_idrule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('idrule_list')
else:
utils.warn_if_replacing_with_old_revision(request, idrule)
return render(request, 'fpr/idrule/form.html', locals())
def idrule_delete(request, uuid):
idrule = get_object_or_404(fprmodels.IDRule, uuid=uuid)
if request.method == 'POST':
if 'delete' in request.POST:
idrule.enabled = False
idrule.save()
messages.info(request, 'Disabled.')
return redirect('idrule_detail', idrule.uuid)
return render(request, 'fpr/idrule/delete.html', locals())
############ ID COMMANDS ############
def idcommand_list(request):
idcommands = fprmodels.IDCommand.active.all()
return render(request, 'fpr/idcommand/list.html', locals())
def idcommand_detail(request, uuid):
idcommand = get_object_or_404(fprmodels.IDCommand, uuid=uuid)
idtoolconfigs = fprmodels.IDToolConfig.active.filter(command=idcommand)
return render(request, 'fpr/idcommand/detail.html', locals())
def idcommand_edit(request, uuid=None):
if uuid:
action = "Edit"
idcommand = get_object_or_404(fprmodels.IDCommand, uuid=uuid)
else:
action = "Create"
idcommand = None
form = fprforms.IDCommandForm(request.POST or None, instance=idcommand)
if form.is_valid():
new_idcommand = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.IDCommand, idcommand)
new_idcommand.save(replacing=replaces)
utils.update_references_to_object(fprmodels.IDCommand, 'uuid', replaces, new_idcommand)
messages.info(request, 'Saved.')
return redirect('idcommand_list')
else:
utils.warn_if_replacing_with_old_revision(request, idcommand)
return render(request, 'fpr/idcommand/form.html', locals())
############ FP RULES ############
def fprule_list(request):
fprules = fprmodels.FPRule.objects.filter(enabled=True).filter(~Q(purpose='Characterize'))
return render(request, 'fpr/fprule/list.html', locals())
def fprule_detail(request, uuid):
fprule = get_object_or_404(fprmodels.FPRule, uuid=uuid)
return render(request, 'fpr/fprule/detail.html', locals())
def fprule_edit(request, uuid=None):
if uuid:
action = "Edit"
fprule = get_object_or_404(fprmodels.FPRule, uuid=uuid)
command = fprule.command
else:
action = "Create"
fprule = None
command = None
form = fprforms.FPRuleForm(request.POST or None, instance=fprule, prefix='f')
fprule_command_form = fprforms.FPCommandForm(request.POST or None, instance=command, prefix='fc')
if form.is_valid():
replaces = utils.determine_what_replaces_model_instance(fprmodels.FPRule, fprule)
if form.cleaned_data['command'] == 'new' and fprule_command_form.is_valid():
fprule = form.save(commit=False)
command = fprule_command_form.save()
fprule.command = command
fprule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('fprule_detail', fprule.uuid)
elif form.cleaned_data['command'] != 'new':
fprule = form.save(commit=False)
command = fprmodels.FPCommand.objects.get(uuid=form.cleaned_data['command'])
fprule.command = command
fprule = form.save()
fprule.save(replacing=replaces)
messages.info(request, 'Saved.')
return redirect('fprule_list')
else:
utils.warn_if_replacing_with_old_revision(request, fprule)
return render(request, 'fpr/fprule/form.html', locals())
############ FP TOOLS ############
def fptool_list(request):
fptools = fprmodels.FPTool.objects.filter(enabled=True)
return render(request, 'fpr/fptool/list.html', locals())
def fptool_detail(request, slug):
fptool = get_object_or_404(fprmodels.FPTool, slug=slug, enabled=True)
fpcommands = fprmodels.FPCommand.objects.filter(tool__uuid=fptool.uuid)
return render(request, 'fpr/fptool/detail.html', locals())
def fptool_edit(request, slug=None):
if slug:
action = "Replace"
fptool = get_object_or_404(fprmodels.FPTool, slug=slug, enabled=True)
else:
action = "Create"
fptool = None
form = fprforms.FPToolForm(request.POST or None, instance=fptool)
if form.is_valid():
fptool = form.save()
messages.info(request, 'Saved.')
return redirect('fptool_detail', fptool.slug)
return render(request, 'fpr/fptool/form.html', locals())
############ FP COMMANDS ############
def fpcommand_list(request):
fpcommands = fprmodels.FPCommand.objects.filter(enabled=True).filter(~Q(command_usage='Characterization'))
return render(request, 'fpr/fpcommand/list.html', locals())
def fpcommand_detail(request, uuid):
fpcommand = get_object_or_404(fprmodels.FPCommand, uuid=uuid, enabled=True)
return render(request, 'fpr/fpcommand/detail.html', locals())
def fpcommand_edit(request, uuid=None):
if uuid:
action = "Replace"
fpcommand = get_object_or_404(fprmodels.FPCommand, uuid=uuid)
else:
action = "Create"
fpcommand = None
if request.method == 'POST':
form = fprforms.FPCommandForm(request.POST, instance=fpcommand)
if form.is_valid():
# remove existing relations
commandtools = fprmodels.FPCommandTool.objects.filter(command=fpcommand)
for commandtool in commandtools:
commandtool.delete()
# save command
new_fpcommand = form.save(commit=False)
replaces = utils.determine_what_replaces_model_instance(fprmodels.FPCommand, fpcommand)
new_fpcommand.save(replacing=replaces)
utils.update_references_to_object(fprmodels.FPCommand, 'uuid', replaces, new_fpcommand)
# create relations to tool
for tool_id in request.POST.getlist('tool'):
tool = fprmodels.FPCommandTool(
command=new_fpcommand,
tool=fprmodels.FPTool.objects.get(pk=tool_id)
)
tool.save()
messages.info(request, 'Saved.')
return redirect('fpcommand_list')
else:
if 'parent' in request.GET:
fptool = get_object_or_None(fprmodels.FPTool, uuid=request.GET.get('parent', ''), enabled=True)
initial = {'tool': [fptool]}
else:
initial = None
form = fprforms.FPCommandForm(instance=fpcommand, initial=initial)
utils.warn_if_replacing_with_old_revision(request, fpcommand)
return render(request, 'fpr/fpcommand/form.html', locals())
############ REVISIONS ############
def revision_list(request, entity_name, uuid):
# get model using entity name
available_models = models.get_models()
model = None
for model in available_models:
if model._meta.db_table == 'fpr_' + entity_name:
break
if model == None:
raise Http404
# human-readable names
revision_type = entity_name
human_readable_names = {
'formatversion': 'Format Version',
'idtoolconfig': 'ID Tool Configuration',
'idrule': 'ID Rule',
'idcommand': 'Identification Command',
'fpcommand': 'FP Command',
'fprule': 'FP Rule'
}
if entity_name in human_readable_names:
revision_type = human_readable_names[entity_name]
# restrict to models that are intended to have revisions
try:
getattr(model, 'replaces')
# get specific revision's data and augment with detail URL
revision = model.objects.get(uuid=uuid)
_augment_revisions_with_detail_url(entity_name, model, [revision])
# get revision ancestor data and augment with detail URLs
ancestors = utils.get_revision_ancestors(model, uuid, [])
_augment_revisions_with_detail_url(entity_name, model, ancestors)
# get revision descendant data and augment with detail URLs
descendants = utils.get_revision_descendants(model, uuid, [])
_augment_revisions_with_detail_url(entity_name, model, descendants)
descendants.reverse()
return render(request, 'fpr/revisions/list.html', locals())
except AttributeError:
raise Http404
def _augment_revisions_with_detail_url(entity_name, model, revisions):
for revision in revisions:
detail_view_name = entity_name + '_edit'
try:
parent_key_value = None
if entity_name == 'formatversion':
parent_key_value = revision.format.slug
if entity_name == 'idtoolconfig':
parent_key_value = revision.tool.slug
if parent_key_value:
revision.detail_url = reverse(detail_view_name, args=[parent_key_value, revision.slug])
else:
revision.detail_url = reverse(detail_view_name, args=[revision.slug])
except:
revision.detail_url = reverse(detail_view_name, args=[revision.uuid])
|
from __future__ import print_function
import os, sys
import json
import subprocess
import time
import tempfile
import threading
from io import BytesIO
import errno
import boto3
import botocore
import socket
debug = True
verbose = False
def _find_snapshot_index(name, snapshots):
"""
Given a list of snapshots (that is, an ordered-by-creation-time
array of dictionaries), return the index. If it's not found,
raise KeyError.
"""
for indx, snapshot in enumerate(snapshots):
if snapshot["Name"] == name:
return indx
raise KeyError(name)
def _last_common_snapshot(source, target):
"""
Given a list of snapshots (which are dictionaries),
return the last common snapshot (also as a dictionary,
but a different one). The inputs are a list, sorted
by creation date.
The return value -- if any -- will include:
- Name: (str) the name of the snapshot
- CreationTime: (int) the creation time of the snapshot.
This is taken from the source.
Optional values:
- incremental: (bool) Whether or not this was an incremental
snapshot. This is always taken from target.
- parent: (str) If an incremental snapshot, then the previous
snapshot used to create it. This is always taken from target.
- ResumeToken: (str) If the snapshot in question was interrupted,
and can be resumed, this will be the value. This value must be
present and equal in both source and target, or else it will not
be in the return value.
"""
# We're going to turn the target list into a dictionary, first.
target_dict = dict((el["Name"], el) for el in target)
# Now we go through the source list, in reversed order, seeing
# if the source snapshot is in target.
for snap in reversed(source):
if snap["Name"] in target_dict:
t = target_dict[snap["Name"]]
# Great, we found it!
rv = {"Name" : snap["Name"], "CreationTime" : int(snap["CreationTime"]) }
rv["incremental"] = t.get("incremental", False)
if "parent" in t:
rv["parent"] = t["parent"]
if "ResumeToken" in snap and "ResumeToken" in t:
if t["ResumeToken"] == snap["ResumeToken"]:
rv["ResumeToken"] = snap['ResumeToken']
return rv
return None
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
if snapname in dict2:
rv.append(snapname)
else:
pass;
return rv
def CHECK_OUTPUT(*args, **kwargs):
if debug:
print("CHECK_OUTPUT({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_output(*args, **kwargs)
def CALL(*args, **kwargs):
if debug:
print("CALL({}, {})".format(args, kwargs, file=sys.stderr))
return subprocess.call(*args, **kwargs)
def CHECK_CALL(*args, **kwargs):
if debug:
print("CHECK_CALL({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_call(*args, **kwargs)
def POPEN(*args, **kwargs):
if debug:
print("POPEN({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.Popen(*args, **kwargs)
def _get_snapshot_size_estimate(ds, toname, fromname=None, recursive=False):
"""
Get an estimate of the size of a snapshot. If fromname is given, it's
an incremental, and we start from that.
"""
command = ["/sbin/zfs", "send", "-nPv"]
if recursive:
command.append("-R")
if fromname:
command.extend(["-i", "{}@{}".format(ds, fromname)])
command.append("{}@{}".format(ds, toname))
try:
output = CHECK_OUTPUT(command, stderr=subprocess.STDOUT)
output = output.decode("utf-8").split("\n")
for line in output:
if line.startswith("size"):
(x, y) = line.split()
if x == "size":
return int(y)
except subprocess.CalledProcessError as e:
if verbose:
print("`{}` got exception {}".format(" ".join(command), str(e)), file=sys.stderr)
raise
return 0
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation,receive_resume_token",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
if debug:
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = CHECK_OUTPUT(command).decode('utf-8').split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
snapshot = snapshot.rstrip()
if not snapshot:
continue
if debug:
print("Output line: {}".format(snapshot), file=sys.stderr)
(name, ctime, resume_token) = snapshot.split("\t")
name = name.split('@')[1]
d = { "Name" : name, "CreationTime" : int(ctime) }
if resume_token != "-":
d["ResumeToken"] = resume_token
snapshots.append(d)
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupMissingFullBackupError(ZFSBackupError):
def __init__(self):
super(ZFSBackupMissingFullBackupError).__init__(self,
"No full backup available")
class ZFSBackupSnapshotNotFoundError(ZFSBackupError):
def __init__(self, snapname):
self.snapshot_name = snapname
super(ZFSBackupSnapshotNotFoundError).__init__(self,
"Specified snapshot {} does not exist".format(snapname))
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
The transformative property indicates that the filter transforms
the data as it processes it. Some filters don't -- the counter
filter, for example. This is important for some ZFSBackups subclasses,
such as ZFSBackupSSH, which need to apply transformative filters on
the other end as part of the backup and restore. By default, it's
true; subclasses can change it, and the object can alter it.
"""
def __init__(self, name="Null Filter"):
self.transformative = True
self._name = name
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return self._name
@property
def transformative(self):
return self._transformative
@transformative.setter
def transformative(self, b):
self._transformative = b
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
def finish(self):
"""
Any cleanup work required for the filter.
In the base class, that's nothing.
"""
pass
class ZFSBackupFilterThread(ZFSBackupFilter):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
Interestingly, this doesn't seem to actually work the way I'd expected:
when writing from a thread to a popen'd pipe, the pipe will block, even
when a thread closes the write end of the pipe.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__(name=name)
self.thread = None
self.source = None
self.input_pipe = None
self.output_pipe = None
self.transformative = False
@property
def backup_command(self):
return None
@property
def restore_command(self):
return None
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
else:
return buf
def run(self, *args, **kwargs):
# We use a try/finally block to ensure
# the write-side is always closed.
try:
while True:
b = self.source.read(1024*1024)
if b:
if debug:
print("In thread {}, read {} bytes".format(self.name, len(b)), file=sys.stderr)
temp_buf = self.process(b)
os.write(self.output_pipe, b)
if debug:
print("In thread {}, just wrote {} bytes".format(self.name, len(b)), file=sys.stderr)
else:
if debug:
print("In thread {}, done reading from stream".format(self.name), file=sys.stderr)
break
finally:
try:
os.close(self.output_pipe)
except OSError:
pass
def _start(self, source):
import fcntl
self.source = source
(self.input_pipe, self.output_pipe) = os.pipe()
# We need to set F_CLOEXEC on the output_pipe, or
# a subsequent Popen call will keep a dangling open
# reference around.
flags = fcntl.fcntl(self.output_pipe, fcntl.F_GETFD)
fcntl.fcntl(self.output_pipe, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self._py_read = os.fdopen(self.input_pipe, "rb")
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
if debug:
print("In thread start_{}, returning {}".format(self._mode, self._py_read),
file=sys.stderr)
return self._py_read
def start_backup(self, source):
if self.thread:
self.thread = None
self._mode = "backup"
return self._start(source)
def start_restore(self, source):
if self.thread:
self.thread = None
self._mode = "restore"
return self._start(source)
def finish(self):
if self.thread:
self.thread.join()
return
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None, name="ZFS Count Filter"):
super(ZFSBackupFilterCounter, self).__init__(name=name)
self._count = 0
self.handler = handler
def process(self, b):
self._count += len(b)
return b
def start_backup(self, source):
return super(ZFSBackupFilterCounter, self).start_backup(source)
def start_restore(self, source):
return super(ZFSBackupFilterCounter, self).start_restore(source)
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
# This will block until the thread is done
self.finish()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
If restore_command is None, then backup_command will be used.
"""
def __init__(self, backup_command=["/bin/cat"], restore_command=None,
name='Command-based backup filter', error=None):
super(ZFSBackupFilterCommand, self).__init__(name=name)
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
self.proc = None
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command or self.backup_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+b")
self.proc = POPEN(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return self.proc.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+b")
if debug:
print("start_backup: command = {}, stdin={}, stderr={}".format(" ".join(self.backup_command),
source,
self.error),
file=sys.stderr)
self.proc = POPEN(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
if debug:
print("In start_bauckup for command, source = {}, proc.stdout = {}".format(source,
self.proc.stdout),
file=sys.stderr)
return self.proc.stdout
def finish(self):
if self.proc:
self.proc.wait()
if self.error:
try:
self.error.close()
except OSError:
pass
self.error = None
class ZFSBackupFilterEncrypted(ZFSBackupFilterCommand):
"""
A filter to encrypt and decrypt a stream.
The openssl command can do a lot more than we're asking
of it here.
We require a password file (for now, anyway).
"""
def __init__(self, cipher="aes-256-cbc",
password_file=None):
def ValidateCipher(cipher):
if cipher is None:
return False
try:
ciphers = CHECK_OUTPUT(["/usr/bin/openssl", "list-cipher-commands"]).split()
return cipher in ciphers
except subprocess.CalledProcessError:
return False
if password_file is None:
raise ValueError("Password file must be set for encryption filter")
if not ValidateCipher(cipher):
raise ValueError("Invalid cipher {}".format(cipher))
self.cipher = cipher
self.password_file = password_file
backup_command = ["/usr/bin/openssl",
"enc", "-{}".format(cipher),
"-e",
"-salt",
"-pass", "file:{}".format(password_file)]
restore_command = ["/usr/bin/openssl",
"enc", "-{}".format(cipher),
"-d",
"-salt",
"-pass", "file:{}".format(password_file)]
super(ZFSBackupFilterEncrypted, self).__init__(backup_command=backup_command,
restore_command=restore_command,
name='{} encryption filter'.format(self.cipher))
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
self.pigz = True
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
name='pigz compressor filter'
else:
self.pigz = False
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
name='gzip compressor filter'
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command],
name=name)
@property
def name(self):
return "pigz compress filter" if self.pigz else "gzip compress filter"
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def filters(self):
return self._filters
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _finish_filters(self):
# Common method to wait for all filters to finish and clean up
for f in self.filters:
f.finish()
def _filter_backup(self, source, error=sys.stderr):
# Private method, to stitch the backup filters together.
input = source
for f in self.filters:
f.error_output = error
if debug:
print("Starting filter {} ({}), input = {}".format(f.name, f.backup_command, input), file=sys.stderr)
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
# Note that they are in reverse order.
input = source
for f in reversed(self.filters):
f.error_output = error
if debug:
print("Starting restore filter {} ({})".format(f.name, f.restore_ommand), file=sys.stderr)
input = f.start_restore(input)
return input
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
CHECK_CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
return
def restore_handler(self, stream, **kwargs):
"""
Method called to read a snapshot from the target. In the base class,
this simply does a 'zfs send' (with appropriate options).
Unlike the corresponding backup_handler, restore_handler has to handle
any setup for incremental sends. It can know to do an incremental
backup by having "parent" in kwargs, which will be the name of the
base snapshot.
All filters are also set up here. In the base class, that means
no transformative filters (since there's no real point).
"""
command = ["/sbin/zfs", "send", "-p"]
if self.recursive:
command.append("-R")
if "ResumeToken" in kwargs:
command.extend(["-t", kwargs["ResumeToken"]])
if "parent" in kwargs:
command.extend(["-I", kwargs["parent"]])
command.append("{}@{}".format(self.target, kwargs["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters
fobj = stream
with open("/dev/null", "w+") as devnull:
POPEN(command, stdout=fobj, stderr=error_output,
stdin=devnull)
return
def backup_handler(self, stream, **kwargs):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
if debug:
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
CHECK_CALL(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None,
force_full=False,
snapshot_handler=None,
each_snapshot=True):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
After that, we then find the most recent common snapshot from source
and target (unless force_full is True, in which case that is set to None).
If force_full is False, it will then collect a list of snapshots on the
source from the last common snapshot to the last snapshot.
each_snapshot indicates whether or not to iterate over each snapshot
between the first and last one selected.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupSnapshotNotFoundError(snapname)
# We want to remove everything in source_snapshots after the given one.
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
# This is the last snapshot we will send, and we are guaranteed
# by this point that it exists on the source.
last_snapshot = source_snapshots[-1]
if debug:
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
# Next step is to get the last common snapshot.
if force_full:
last_common_snapshot = None
else:
last_common_snapshot = _last_common_snapshot(source_snapshots,
self.target_snapshots)
if debug:
print("ZFSBackup: last_snapshot = {}, last_common_snapshot = {}".format(last_snapshot,
last_common_snapshot),
file=sys.stderr)
snapshot_list = source_snapshots
if last_common_snapshot is None:
# If we have no snapshots in common, then we do all of the snapshots
pass
elif last_common_snapshot["Name"] == last_snapshot["Name"]:
# No snapshots to do, we're all done.
if debug:
print("No snapshots to send", file=sys.stderr)
return
else:
# We have a snapshot in common in source and target,
# and we want to get a list of snapshots from last_common_snapshot
# to last_snapshot from snapshot_list
# To do this, we're going to go through snapshot_list, looking
# for the index of both last_common_snapshot and last_snapshot.
lcs_index = None
last_index = None
for indx, snap in enumerate(snapshot_list):
if snap['Name'] == last_snapshot['Name']:
last_index = indx
break
if snap['Name'] == last_common_snapshot['Name']:
lcs_index = indx
# Now we're going to do a bit of sanity checking:
if last_index < lcs_index or lcs_index is None:
# This seems a weird case -- the snapshot we've been
# told to do is before the last common one.
raise ZFSBackupError("Last snapshot in source ({}) is before last common snapshot ({})".format(last_snapshot['Name'], last_common_snapshot['Name']))
snapshot_list = snapshot_list[lcs_index:last_index+1]
if debug:
print("Last common snapshot = {}".format(last_common_snapshot),
file=sys.stderr)
print("\tDoing snapshots {}".format(" ".join([x["Name"] for x in snapshot_list])),
file=sys.stderr)
if not each_snapshot:
if last_common_snapshot:
snapshot_list = (snapshot_list[0], snapshot_list[-1])
else:
snapshot_list = [snapshot_list[-1]]
# At this point, snapshot_list either starts with the
# last common snapshot, or there were no common snapshots.
for snapshot in snapshot_list:
resume = None
if last_common_snapshot and snapshot["Name"] == last_common_snapshot["Name"]:
# If we're resuming a send, we want to continue
resume = last_common_snapshot.get("ResumeToken", None)
if not resume:
# We want to skip the last common snapshot,
# so we can use it as the base of an incremental send
# in the next pass
continue
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
backup_dict = { "Name": snapshot["Name"] }
backup_dict["Recursive"] = self.recursive
try:
backup_dict["SizeEstimate"] = _get_snapshot_size_estimate(self.source,
snapshot["Name"],
fromname=last_common_snapshot["Name"] if last_common_snapshot else None,
recursive=self.recursive)
except:
if verbose:
print("Unable to get size estimate for snapshot", file=sys.stderr)
if resume:
command.extend(["-C", resume])
backup_dict["ResumeToken"] = resume
if last_common_snapshot:
command.extend(["-i" if each_snapshot else "-I", "{}".format(last_common_snapshot["Name"])])
backup_dict["incremental"] = True
backup_dict["parent"] = last_common_snapshot["Name"]
else:
backup_dict["incremental"] = False
backup_dict["CreationTime"] = snapshot["CreationTime"]
if debug:
print("backup_dict = {}".format(backup_dict), file=sys.stderr)
command.append("{}@{}".format(self.source, snapshot["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile(mode="a+") as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = POPEN(["/usr/bin/ktrace"] + command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
if debug:
print("backup_dict = {}".format(backup_dict), file=sys.stderr)
print("send_proc.stdout = {}".format(send_proc.stdout), file=sys.stderr)
if callable(snapshot_handler):
snapshot_handler(stage="start", **backup_dict)
try:
self.backup_handler(send_proc.stdout, **backup_dict)
except ZFSBackupError:
send_proc.wait()
if send_proc.returncode:
# We'll ignore any errors generated by the filters
error_output.seek(0)
raise ZFSBackupError(error_output.read().rstrip())
else:
raise
else:
send_proc.wait()
if callable(snapshot_handler):
snapshot_handler(stage="complete", **backup_dict)
self._finish_filters()
# Set the last_common_snapshot to make the next iteration an incremental
last_common_snapshot = snapshot
return
def restore(self, snapname=None,
force_full=False,
snapshot_handler=None,
to=None):
"""
Perform a restore. This is essentially the inverse of backup --
the target is the source of data, that are sent to 'zfs recv' (with
appropriate flags).
If snapname is given, then the restore will be done to that
snapshot; if force_full is False, the restore will try to find
the most recent snapshot in common before snapname, and
attempt an incremental restore. Therefore the most common case
for a restore to be done is a full restore to an empty pool/dataset,
which may be done at once, or by restoring a series of incrementals.
If there is no previous snapshot in common, _or_ force_full is True,
then it will need to find the most recent full backup. In the case
of the base class, every snapshot is potentially a full backup, so
it can start with snapname. In the case of ZFSBackupDirectory,
however, it will need to search backwards for a full backup. If there
are no full backups, then it will raise an exception.
If snapname is present in both targt and source, then there will
be no work done. (This would be more suitable for a rollback, after
all.)
Any filters applied to the backup should be applied to the restore;
subclasses that keep track of that information (ZFSBackupDirectory and
ZFSBackupS3 at this point) will use their own knowledge of the filters
used at backup to apply them in the correct order. With ZFSBackup and
ZFSBackupSSH, that's not necessary, since any data transformations are
either ignored or undone as part of the backup process, but compression
filters (as an example) may still be helpful to improve overall performance.
"""
if snapname is None:
# Get the last snapshot available on the target
snapname = self.target_snapshots[-1]["Name"]
try:
snapshot_index = _find_snapshot_index(snapname, self.target_snapshots)
except KeyError:
raise ZFSBackupSnapshotNotFoundError(snapname)
# If the snapshot is already in source, then there's nothing to do
try:
_find_snapshot_index(snapname, self.source_snapshots)
return
except KeyError:
pass
# We want to make sure we include the desired snapshot name.
snapshot_list = self.target_snapshots[:snapshot_index+1]
# Now let's look for the last common snapshot.
# Because of the test above, we know that snapname is not in source.
if force_full is False:
last_common_snapshot = _last_common_snapshot(self.source_snapshots,
snapshot_list)
else:
last_common_snapshot = None
# If last_common_snapshot is set, then we need a list of
# snapshots on the target between last_common_snapshot and
# snapname; if last_common_snapshot is None, then we
# need a list of snapshots on the target starting with the
# most recent full snapshot. This is subclass-specific.
if last_common_snapshot:
start_index = _find_snapshot_index(last_common_snapshot["Name"],
snapshot_list)
else:
start_index = self._most_recent_full_backup_index(snapshot_list)
if debug:
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
print("start_index = {}, snapshot_list = {}".format(start_index, snapshot_list), file=sys.stderr)
# This is now a list of snapshots to restore
restore_snaps = snapshot_list[start_index:]
if debug:
print("Restoring snapshots {}".format(restore_snaps), file=sys.stderr)
for snap in restore_snaps:
# Do I need any other options? Possibliy if doing
# an interrupted restore.
resume = None
if last_common_snapshot and snap["Name"] == last_common_snapshot["Name"]:
# XXX: This isn't right, I think: we can have a resume token
# for a full send.
# If we're resuming we want to be able to continue
resume = last_common_snapshot.get("ResumeToken", None)
if not resume:
# We want to skip the last common snapshot, so we can use it
# as the basis of an incremental send.
continue
command = ["/sbin/zfs", "receive", "-d", "-F"]
# Copy so we can add some elements to it
restore_dict = snap.copy()
if last_common_snapshot:
restore_dict["parent"] = last_common_snapshot["Name"]
if resume:
restore_dict["ResumeToken"] = resume
command.extend(["-t", resume])
elif "ResumeToken" in restore_dict:
restore_dict.pop("ResumeToken")
command.append(self.source)
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile(mode="a+") as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
if callable(snapshot_handler):
snapshot_handler(state="start", **restore_dict)
recv_proc = POPEN(command,
bufsize=mByte,
stdin=subprocess.PIPE,
stderr=error_output,
stdout=devnull)
try:
self.restore_handler(recv_proc.stdin, **restore_dict)
except ZFSBackupError:
recv_proc.wait()
if recv_proc.returncode:
# We end up ignoring any errors generated by the filters
error_output.seek(0)
raise ZFSBackupError("Restore failed: {}".format(error_output.read().rstrip()))
else:
raise
if callable(snapshot_handler):
snapshot_handler(state="complete", **restore_dict)
self._finish_filters()
last_common_snapshot = snap
return
def _most_recent_full_backup_index(self, snapshots):
"""
Given a list of snapshots, find the most recent full backup.
If no full backup is given, then it raises an exception.
"""
# For the base class, this is always simply the last snapshot
if snapshots:
return len(snapshots) - 1
else:
raise ZFSBackupMissingFullBackupError()
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation,receive_resume_token",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime, resume_token) = snapshot.rstrip().split()
d = {"Name" : name, "CreationTime" : int(ctime) }
if resume_token != "-":
d["ResumeToken"] = resume_token
snapshots.append(d)
return snapshots
def Check(self, **kwargs):
"""
A method to do a verification that the backup is okay.
In the base class, we don't do anything.
"""
pass
class ZFSBackupDirectory(ZFSBackup):
"""
A variant of ZFSBackup that backs up to files, rather than replication.
The layout used is:
target/
prefix/
map.json
chunks/
data files
prefix will default to the hostname if none is given.
target is the root pathname -- note that this doesn't need to be
a ZFS filesystem.
The map file maps from dataset to snapshots.
Since some filesystems (I'm looking at you, msdos) have a
limit of 4gb, we'll keep chunks limited to 2gb.
Each dataset has a chronologically-ordered array of
snapshots.
A snapshot entry in the map contains the name, the
creation time, whether it is recursive, and, if it
is an incremental snapshot, what the previous one was.
It also contains the names of the chunks, and any transformative
filter commands (in order to restore it).
"""
def __init__(self, source, target, prefix=None, recursive=False):
self._prefix = prefix or socket.gethostname()
self._mapfile = None
self._chunk_dirname = "chunks"
super(ZFSBackupDirectory, self).__init__(source, target, recursive)
def __repr__(self):
return "{}({}, {}, prefix={}, recursive={})".format(self.__class__.__name__,
self.source, self.target,
self.prefix, self.recursive)
def validate(self):
"""
Ensure that the destination exists. Since this is just
using files, all we need is os.path.exists
"""
if not os.path.exists(self.target):
raise ZFSBackupError("Target {} does not exist".format(self.target))
return
@property
def mapfile(self):
"""
Return the mapfile. If it isn't loaded, we load it now.
"""
if self._mapfile is None:
mapfile_path = os.path.join(self.target, self.prefix, "map.json")
try:
with open(mapfile_path, "r") as mapfile:
self._mapfile = json.load(mapfile)
except:
# I know, blanket catch, shouldn't do that
self._mapfile = {}
return self._mapfile
@mapfile.setter
def mapfile(self, d):
if debug:
print("Setting mapfile to {}".format(d), file=sys.stderr)
if not self._mapfile or self._mapfile != d:
self._mapfile = d
self._save_mapfile()
def _save_mapfile(self):
"""
Save the map file.
"""
if self._mapfile:
mapfile_path = os.path.join(self.target, self.prefix, "map.json")
if debug:
print("Saving map file to {}".format(mapfile_path), file=sys.stderr)
with open(mapfile_path, "w") as mapfile:
json.dump(self._mapfile, mapfile,
sort_keys=True,
indent=4, separators=(',', ': '))
@property
def target_snapshots(self):
"""
The snapshots are in the mapfile.
First key we care about is the source dataset.
"""
m = self.mapfile
if debug:
print("mapfile = {}".format(m), file=sys.stderr)
if self.source in m:
return m[self.source]["snapshots"]
else:
return []
def _write_chunks(self, stream):
chunks = []
mByte = 1024 * 1024
gByte = 1024 * mByte
done = False
base_path = os.path.join(self.target, self.prefix)
chunk_dir = os.path.join(base_path, self._chunk_dirname)
for d in (base_path, chunk_dir):
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
while not done:
with tempfile.NamedTemporaryFile(dir=chunk_dir, delete=False) as chunk:
chunks.append(os.path.join(self.prefix,
self._chunk_dirname,
os.path.basename(chunk.name)))
total = 0
while total < 2*gByte:
buf = stream.read(mByte)
if not buf:
done = True
break
chunk.write(buf)
total += len(buf)
if debug:
print("Finished writing chunk file {}".format(chunk.name), file=sys.stderr)
return chunks
def backup_handler(self, stream, **kwargs):
# Write the backup to the target. In our case, we're
# doing a couple of things:
# First, we need to make sure the full target directory
# exists -- create it if necessary.
# Sanity check: unlike the base class, we need to
# know the name of the snapshot, and whether it's incremental.
# If it is, we also need to know the previous one
snapshot_name = kwargs.get("Name", None)
incremental = kwargs.get("incremental", None)
parent = kwargs.get("parent", None)
if snapshot_name is None:
raise ZFSBackupError("Missing name of snapshot")
if incremental is None:
raise ZFSBackupError("Missing incremental information about snapshot")
# Next sanity check: if this snapshot is already in the map, abort
source_map = self.mapfile.get(self.source, {})
current_snapshots = source_map.get("snapshots", [])
for x in current_snapshots:
if x["Name"] == snapshot_name:
raise ZFSBackupError("Snapshot {} is already present in target".format(snapshot_name))
filters = []
for f in reversed(self.filters):
if f.transformative and f.restore_command:
filters.append(f.restore_command)
# Now we need to start writing chunks, keeping track of their names.
with tempfile.TemporaryFile() as error_output:
fobj = self._filter_backup(stream, error=error_output)
chunks = self._write_chunks(fobj)
if not chunks:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
# Now we need to update the map to have the chunks.
snapshot_dict = {
"Name" : snapshot_name,
"CreationTime" : kwargs.get("CreationTime", int(time.time())),
"incremental": incremental,
"chunks" : chunks
}
if incremental:
snapshot_dict["parent"] = parent
if filters:
snapshot_dict["filters"] = filters
for key in kwargs.keys():
if key in ("Name", "CreationTime", "incremental", "chunks",
"parent", "filters"):
continue
snapshot_dict[key] = kwargs.get(key)
current_snapshots.append(snapshot_dict)
source_map["snapshots"] = current_snapshots
self.mapfile[self.source] = source_map
self._save_mapfile()
@property
def prefix(self):
return self._prefix
def _get_all_chunks(self):
"""
Returns a set of all the chunks in self.target/self.prefix/self._chunk_dirname
"""
rv = set()
chunk_dir = os.path.join(self.prefix, self._chunk_dirname)
for entry in os.listdir(os.path.join(self.target, chunk_dir)):
if os.path.isdir(os.path.join(self.target, chunk_dir, entry)):
# This shouldn't be the case
continue
rv.add(os.path.join(chunk_dir, entry))
return rv
def Check(self, **kwargs):
"""
Method to ensure that the backup is sane.
In this case, it means checking that every chunk
in the directory is accounted for. We also check
to see if every snapshot has all of the chunks it
lists, and ensure that every incrememental snapshot
has its parent, all the way to a non-incremental.
If there are any problems, we return a list of them.
If cleanup=True in kwargs, we'll clean up the problems
(still returning the list). (Not yet implemented.)
N.B. Due to the nature of this method and class, it
will remove *all* untracked chunks; however, it will only
do a consistency check for the specified dataset, unless
check_all=True in kwargs.
"""
problems = []
cleanup = kwargs.get("cleanup", False)
check_all = kwargs.get("check_all", False)
# First step is to get the backups from the mapfile.
backups = self.mapfile.keys()
# Next we want to get a list of all the chunks.
# These will be relative to the target directory,
# so we'll turn them into ${prefix}/${chunkdir}/${chunkname}
# Since we don't care about order, but do care about lookup,
# we'll put them into a set.
directory_chunks = self._get_all_chunks()
# Let's now ensure every chunk is accounted for
# We put them all into another set
mapfile_chunks = set()
for backup in self.mapfile.itervalues():
for snapshot in backup['snapshots']:
for chunk in snapshot['chunks']:
mapfile_chunks.add(chunk)
# Let's see if there are any extraneous files
extra_chunks = directory_chunks - mapfile_chunks
# And voila, we have a list of chunks that have gone orphaned
for chunk in extra_chunks:
problems.append(("delete_chunk", chunk))
# Next pass, let's ensure that the backups have all of
# their chunks.
# If check_all is True, we'll look at all of the backups,
# otherwise just ours.
if not check_all:
backups = [self.source]
for backup in backups:
snapshot_names = {}
for snapshot in self.mapfile[backup]["snapshots"]:
# The list is supposed to be in order
name = snapshot["Name"]
snapshot_names[name] = True
found_all = True
if verbose:
print("Checking {}@{}".format(backup, name), file=sys.stderr)
for chunk in snapshot["chunks"]:
if not chunk in directory_chunks:
found_all = False
break
if snapshot.get("incremental", False):
if snapshot["parent"] not in snapshot_names:
problems.append(("missing_parent", backup, name, snapshot["parent"]))
if not found_all:
problems.append(("corrupt_snapshot", backup, name))
return problems
class ZFSBackupS3(ZFSBackupDirectory):
"""
Backup to AWS. Optionally with transitions to glacier.
The layout used is:
bucket/
prefix/
map.json
chunks/
data files
The map file maps from dataset to snapshots.
A glacier file is limited to 40tb (and S3 to 5tb),
so we'll actually break the snapshots into 4gbyte
chunks.
We control a lifecycle rule for bucket, which we
will name "${prefix} ZFS Backup Rule"; if glacier
is enabled, we add that rule, and set glacier migration
for "chunks/" for 0 days; if it is not
enabled, then we set the rule to be disabled. (But
we always have the rule there.)
Each dataset has a chronologically-ordered array of
snapshots.
A snapshot entry in the map contains the name, the
creation time, whether it is recursive, and, if it
is an incremental snapshot, what the previous one was.
It also contains the names of the chunks.
So it looks something like:
"tank" : [
"auto-daily-2017-01-01:00:00" : {
"CreationTime" : 12345678,
"Size" : 1024000,
"Recursive" : True,
"Incremental" : null,
"Chunks" : [
"chunks/${random}",
"chunks/${random}"
]
},
"auto-daily-2017-01-02:00:00" : {
...
}
]
Each dataset being backed up has an entry in the map file.
"""
def __init__(self, source,
bucket, s3_key, s3_secret,
recursive=False, server=None,
prefix=None, region=None, glacier=True):
"""
Backing up to S3 requires a key, secret, and bucket.
If prefix is none, it will use the current hostname.
(As a result, prefix must be unique within the bucket.)
If the bucket doesn't exist, it gets created; if
glacier is True, then it will set up a transition rule.
Note that bucket names need to be globally unique.
"""
self._map = None
self._glacier = glacier
self._s3 = boto3.client('s3', aws_access_key_id=s3_key,
aws_secret_access_key=s3_secret,
endpoint_url=server,
region_name=region)
# Note that this may not exist.
self.bucket = bucket.lower()
self._prefix = prefix or socket.gethostname()
# We'll over-load prefix here
super(ZFSBackupS3, self).__init__(source, "",
prefix=prefix,
recursive=recursive)
self._setup_bucket()
def validate(self):
if debug:
print("* * * HERE I AM NOW * * *\n\n", file=sys.stderr)
if debug:
print("\nRunning setup_bucket\n")
return
def __repr__(self):
return "{}({}, {}, <ID>, <SECRET>, recursive={}, server={}, prefix={}, region={}, glacier={}".format(
self.__class__.__name__, self.source, self.bucket, self.recursive, self.server,
self.prefix, self.region, self.glacier)
def _setup_bucket(self):
"""
Create a bucket, if necessary. Also, set up the lifecycle rule
depending on whether or not we're using glacier.
"""
if debug:
print("Trying to setup bucket {}".format(self.bucket), file=sys.stderr)
try:
self.s3.head_bucket(Bucket=self.bucket)
except botocore.exceptions.ClientError as e:
if e.response["Error"]['Code'] == '404':
# Need to create the bucket
if debug:
print("Creating bucket {}".format(self.bucket))
result = self.s3.create_bucket(Bucket=self.bucket)
if debug:
print("When creating bucket {}, response is {}".format(self.bucket, result),
file=sys.stderr)
else:
raise
# Great, now we have a bucket for sure, or have exceptioned out.
# Now we want to get the lifecycle rules.
try:
lifecycle = self.s3.get_bucket_lifecycle_configuration(Bucket=self.bucket)
except botocore.exceptions.ClientError as e:
if e.response["Error"]['Code'] == 'NoSuchLifecycleConfiguration':
lifecycle = {}
elif e.response['Error']['Code'] == "NotImplemented":
lifecycle = None
else:
raise
if lifecycle is not None:
try:
rules = lifecycle["Rules"]
except KeyError:
rules = []
rule_id = "{} ZFS Backup Glacier Transition Rule".format(self.prefix)
rule_indx = None
changed = False
if rules:
if debug:
print("Trying to add/set lifecycle rule", file=sys.stderr)
for indx, rule in enumerate(rules):
if rule["ID"] == rule_id:
rule_indx = indx
break
if debug:
print("rule_indx = {}, appropriate rule = {}".format(rule_indx,
rules[rule_indx] if rule_indx is not None else "<no rules>"), file=sys.stderr)
if rule_indx is None:
# We need to add it
new_rule = {
"ID" : rule_id,
"Prefix" : "{}/".format(self._chunk_dirname),
"Status" : "Enabled",
"Transitions" : [
{
"Days" : 0,
"StorageClass" : "GLACIER"
},
],
# Does this prevent transitions from working?
# 'AbortIncompleteMultipartUpload' : {
# 'DaysAfterInitiation' : 7,
# },
}
rule_indx = len(rules)
rules.append(new_rule)
changed = True
if debug:
print("rule_indx = {}, rules = {}".format(rule_indx, rules), file=sys.stderr)
else:
if (self.glacier == ( rules[rule_indx]["Status"] == "Enabled")):
changed = False
if debug:
print("rule_indx = {}, changed = {}, rules = {}, now let's set it to enabled".format(rule_indx, changed, rules), file=sys.stderr)
rules[rule_indx]["Status"] = "Enabled" if self.glacier else "Disabled"
if changed:
if debug:
print("rules = {}".format(rules), file=sys.stderr)
self.s3.put_bucket_lifecycle_configuration(Bucket=self.bucket,
LifecycleConfiguration={ 'Rules' : rules }
)
return
@property
def glacier(self):
return self._glacier
@property
def prefix(self):
return self._prefix
@property
def s3(self):
return self._s3
@property
def bucket(self):
return self._bucket
@bucket.setter
def bucket(self, b):
self._bucket = b
def _key_exists(self, keyname):
try:
self.s3.head_object(Bucket=self.bucket,
Key=keyname)
return True
except botocore.exceptions.ClientError:
return False
@property
def mapfile(self):
"""
Load the map file from the bucket. We cache it so we
don't keep reloading it.
"""
if self._mapfile is None:
# Check to see if the map file exists in the bucket
map_key = "{}/map.json".format(self.prefix)
if self._key_exists(map_key):
map_file = BytesIO()
self.s3.download_fileobj(Bucket=self.bucket,
Key=map_key,
Fileobj=map_file)
map_file.seek(0)
self._mapfile = json.loads(map_file.getvalue().decode('utf-8'))
else:
if debug:
print("mapfile {} does not exist in bucket".format(map_key), file=sys.stderr)
self._mapfile = {}
return self._mapfile
@mapfile.setter
def mapfile(self, mf):
self._mapfile = mf
def _save_mapfile(self):
if self._mapfile:
map_key = "{}/map.json".format(self.prefix)
buffer = json.dumps(self._mapfile).encode('utf-8')
map_file = BytesIO(buffer)
map_file.seek(0)
self.s3.upload_fileobj(Bucket=self.bucket,
Key=map_key,
Fileobj=map_file)
def _write_chunks(self, stream):
import binascii
chunks = []
mByte = 1024 * 1024
gByte = 1024 * mByte
done = False
chunk_dir = os.path.join(self._chunk_dirname, self.prefix)
while not done:
while True:
chunk_key = binascii.b2a_hex(os.urandom(32)).decode('utf-8')
chunk_key = os.path.join(chunk_dir, chunk_key)
if not self._key_exists(chunk_key):
break
total = 0
uploader = self.s3.create_multipart_upload(Bucket=self.bucket,
ACL='private',
Key=chunk_key)
upload_id = uploader['UploadId']
parts = []
try:
while total < 4*gByte:
part_num = len(parts) + 1
buf = stream.read(10*mByte)
if not buf:
if debug:
print("Breaking out of loop after {} bytes".format(total), file=sys.stderr)
done = True
break
# We need to upload this 10Mbyte part somehow
response = self.s3.upload_part(Bucket=self.bucket,
Key=chunk_key,
Body=buf,
PartNumber=part_num,
UploadId=upload_id)
if debug:
print("response = {}".format(response), file=sys.stderr)
parts.append({ "ETag" : response["ETag"], "PartNumber" : part_num })
total += len(buf)
if parts:
if debug:
print("After {} parts, completing upload".format(len(parts)), file=sys.stderr)
self.s3.complete_multipart_upload(Bucket=self.bucket,
Key=chunk_key,
UploadId=upload_id,
MultipartUpload={ "Parts" : parts })
except:
# This blanket exception catch is intentional
if verbose:
print("Aborting multipart upload after {} parts".format(len(parts)), file=sys.stderr)
self.s3.abort_multipart_upload(Bucket=self.bucket,
Key=chunk_key,
UploadId=upload_id)
raise
chunks.append(chunk_key)
if debug:
print("Wrote {} bytes to chunk {}".format(total, chunk_key), file=sys.stderr)
total = 0
if debug:
print("Wrote out {} chunks".format(len(chunks)), file=sys.stderr)
return chunks
def validate(self):
"""
We don't do a lot of validation, since s3 costs per usage.
We'll lazily check the bucket, and create it if necessary.
"""
return
def AvailableRegions():
"""
List the available regons.
"""
return boto3.session.Session().get_available_regions('s3')
def _get_all_chunks(self):
"""
Returns a set of all the chunks -- keys, in AWS parlance --
that begin with self.bucket/self._chunk_dir/self.prefix/
"""
rv = set()
last_string = ''
while True:
response = self.s3.list_objects_v2(Bucket=self.bucket,
Prefix=os.path.join(self._chunk_dirname, self.prefix),
StartAfter=last_string)
for key in [x.get("Key") for x in response.get("Contents")]:
last_string = key
rv.add(key)
if response.get("IsTruncated") == False:
break
return rv
def Check(self, **kwargs):
"""
Check an S3 backup destination.
This uses the base class, and then checks for multipart uploads.
"""
from datetime import datetime, timedelta
problems = super(ZFSBackupS3, self).Check(**kwargs)
# Now we check for multipart uploads in our bucket
try:
uploads = self.s3.list_multipart_uploads(Bucket=self.bucket)
except botocore.exceptions.ClientError:
return problems
for upload in uploads.get("Uploads", []):
upload_id = upload["UploadId"]
upload_key = upload["Key"]
# Is this correct?
initiated = upload["Initiated"]
now = datetime.now()
delta = now - intitiated
if delta.days > 2:
problems.append(("stale_multpart_upload", self.bucket, upload_key, upload_id))
class ZFSBackupSSH(ZFSBackup):
"""
Replicate to a remote host using ssh.
This runs all of the commands the base class does, but via ssh
to another host.
When running a command on a remote host, we have the following
options:
1) We don't care about input or output, only the return value.
2) We stream to it, or from it.
(1) is mostly for validation -- ensure the target exists, and
we can connect to it.
For (2), we stream to it (writing to stdin), and don't care about
the output until after, for backup.
For (2), we stream _from_ it (reading from its stdout) when getting
a list of snapshots, and when doing a restore.
"""
def __init__(self, source, target, remote_host,
remote_user=None,
ssh_opts=[],
recursive=False):
self._user = remote_user
self._host = remote_host
self._ssh_opts = ssh_opts[:]
super(ZFSBackupSSH, self).__init__(source, target, recursive)
@property
def user(self):
return self._user
@property
def host(self):
return self._host
@property
def ssh_options(self):
return self._ssh_opts
def _build_command(self, cmd, *args):
# First set up ssh.
command = ["/usr/bin/ssh"]
if self.ssh_options:
command.extend(self.ssh_options)
if self.user:
command.append("{}@{}".format(self.user, self.host))
else:
command.append(self.host)
# Then goes the rest of the command
command.append(cmd)
for arg in args:
command.append('"{}"'.format(arg))
return command
def _run_cmd(self, cmd, *args, **kwargs):
"""
This implements running a command and not caring about
the output. If stdout or stderr are given, those will
be file-like objects that the output and error are written
to. If the command exists with a non-0 value, we raise an
exception.
"""
command = self._build_command(cmd, *args)
try:
CHECK_CALL(command, **kwargs)
except subprocess.CalledProcessError:
raise ZFSBackupError("`{}` failed".format(command))
def _remote_stream(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, but we want to write to or read
from it. We return a subprocess.Popen object, so the caller
needs to specify stdin=subprocess.PIPE, or stdout. Both can't be pipes.
This should only be called by _remote_write or remote_stream
"""
command = self._build_command(cmd, *args)
return POPEN(command[0], *command[1:], **kwargs)
def _remote_write(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, writing to it via stdin.
"""
# First remove stdin=, if it's there.
kwargs["stdin"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def _remote_read(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, reading its stdout.
"""
# First remove stdout=, if it's there.
kwargs["stdout"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def validate(self):
"""
Do a couple of validations.
"""
# See if we can connect to the remote host
with tempfile.TemporaryFile() as error_output:
try:
self._run_cmd("/usr/bin/true", stderr=error_output)
except ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError("Unable to connect to remote host: {}".format(error_output.read()))
# See if the target exists
with open("/dev/null", "w+") as devnull:
try:
self._run_cmd("/sbin/zfs", "list", "-H", self.target,
stdout=devnull, stderr=devnull, stdin=devnull)
except ZFSBackupError:
raise ZFSBackupError("Target {} does not exist on remote host".format(self.target))
return
def restore_handler(self, stream, **kwargs):
"""
Restore from a remote ZFS dataset (via ssh).
"""
command = ["/sbin/zfs", "send", "-p"]
if self.recursive:
command.append("-R")
if "ResumeToken" in kwargs:
command.extend(["-t", kwargs["ResumeToken"]])
if parent in kwargs:
command.extend(["-I", kwargs["parent"]])
command.append("{}@{}".format(self.target, kwargs["Name"]))
# If we have any transformative filters, we need to create them in order.
# Note that, as counterintuitive as it may seem, we use the backup_command for
# each filter on the remote side.
for filter in self.filters:
if filter.transformative and filter.backup_command:
commannd = command + ["|"] + filter.backup_command
command = self._build_command(*command)
if debug:
print("Remote restore command: " + " ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
fobj = self._filter_restore(stream, error=error_output)
try:
CHECK_CALL(command, stdout=fobj, stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read().rstrip())
return
def backup_handler(self, stream, **kwargs):
"""
Implement the replication.
"""
# First, we create the intervening dataset paths. See the base class' method.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = self._build_command("/sbin/zfs", "create", "-o", "readonly=on", full_path)
try:
CALL(command, stdout=devnull, stderr=devnull, stdin=devnull)
except subprocess.CalledProcessError:
pass
# If we have any transformative filters, we need to create them in reverse order.
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
for filter in reversed(self.filters):
if filter.transformative and filter.restore_command:
command = filter.restore_command + ["|"] + command
command = self._build_command(*command)
if debug:
print("backup command = {}".format(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
try:
fobj = self._filter_backup(stream, error=error_output)
command_proc = POPEN(command, stdin=fobj, stderr=error_output)
command_proc.wait()
except subprocess.CalledProcessError, ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
@property
def target_snapshots(self):
if not self._target_snapshots:
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
command = self._build_command("/sbin/zfs", "list", "-H", "-p",
"-o", "name,creation", "-r",
"-d", "1", "-t", "snapshot", "-s",
"creation", target_path)
snapshots = []
try:
output = CHECK_OUTPUT(command).split("\n")
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
pass
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, recursive=False):
super(ZFSBackupCount, self).__init__(source, "", recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, recursive={})".format(self.__class__.__name__,
self.source,
self.recursive)
def validate(self):
return
def backup_handler(self, stream, **kwargs):
fobj = self._filter_backup(stream)
mByte = 1024 * 1024
while True:
b = fobj.read(mByte)
if b:
self._count += len(b)
else:
break
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def parse_operation(args):
"""
Determine which operation, and what options for it.
Default is to just parse ["backup"]
"""
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description="Operation and options")
parser.register('type', 'bool', to_bool)
if not args:
args = ["backup"]
ops = parser.add_subparsers(help='sub-operation help', dest='command')
# The current valid operations are backup, restore, list, verify, and delete
# Although only backup and restore are currently implemented
backup_operation = ops.add_parser("backup", help="Backup command")
restore_operation = ops.add_parser("restore", help='Restore command')
verify_operation = ops.add_parser("verify", help='Verify command')
verify_operation.add_argument("--all", action='store_true', dest='check_all',
help='Check every backup for consistency',
default=False)
delete_operation = ops.add_parser('delete', help='Delete command')
list_operation = ops.add_parser("list", help='List command')
rv = parser.parse_args(args)
return rv
def parse_arguments(args=None):
global debug, verbose
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument("--debug", dest='debug',
action='store_true', default=False,
help='Turn on debugging')
parser.add_argument("--verbose", dest='verbose', action='store_true',
default=False, help='Be verbose')
parser.add_argument('--recursive', '-R', dest='recursive',
action='store_true',
default=False,
help='Recursively replicate')
parser.add_argument("--snapshot", "-S", "--dataset", "--pool",
dest='snapshot_name',
default=None,
help='Dataset/pool/snapshot to back up')
parser.add_argument("--encrypted", "-E", dest='encrypted',
action='store_true', default=False,
help='Encrypt snapshots')
parser.add_argument("--cipher", dest='cipher',
default='aes-256-cbc',
help='Encryption cipher to use')
parser.add_argument('--password-file', dest='password_file',
default=None,
help='Password file for encryption')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
parser.add_argument('--pigz', action='store_true',
dest='use_pigz', default=False,
help='Use pigz to compress')
incrementals = parser.add_mutually_exclusive_group()
incrementals.add_argument("--iterate-incrementals", dest="iterate",
action='store_true', default=True)
incrementals.add_argument("--no-iterate-incrementals", dest="iterate",
action='store_false')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
zfs_parser.add_argument("rest", nargs=argparse.REMAINDER)
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
counter_parser.add_argument("rest", nargs=argparse.REMAINDER)
# ssh parser has a lot more options
ssh_parser = subparsers.add_parser("ssh",
help="Replicate to a remote ZFS server")
ssh_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
ssh_parser.add_argument('--host', '-H', dest='remote_host',
required=True,
help='Remote hostname')
ssh_parser.add_argument("--user", '-U', dest='remote_user',
help='Remote user (defaults to current user)')
ssh_parser.add_argument("rest", nargs=argparse.REMAINDER)
# Directory parser has only two options
directory_parser = subparsers.add_parser("directory",
help='Save snapshots to a directory')
directory_parser.add_argument("--dest", "-D", dest='destination', required=True,
help='Path to store snapshots')
directory_parser.add_argument("--prefix", "-P", dest='prefix', default=None,
help='Prefix to use when saving snapshots (defaults to hostname)')
directory_parser.add_argument("rest", nargs=argparse.REMAINDER)
# S3 parser has many options
s3_parser = subparsers.add_parser("s3", help="Save snapshots to an S3 server")
s3_parser.add_argument("--bucket", dest='bucket_name', required=True,
help='Name of bucket in which to save data')
s3_parser.add_argument("--prefix", dest='prefix', default=None,
help='Prefix (inside of bucket); defaults to host name)')
s3_parser.add_argument("--key", "--s3-id", dest='s3_key', required=True,
help='S3 Access ID')
s3_parser.add_argument("--secret", dest='s3_secret', required=True,
help='S3 Secret Key')
s3_parser.add_argument('--server', dest="s3_server", default=None,
help='S3-compatible server')
glacier = s3_parser.add_mutually_exclusive_group()
glacier.add_argument("--glacier", dest='glacier', action='store_true', default=True)
glacier.add_argument("--no-glacier", dest='glacier', action='store_false')
s3_parser.add_argument('--glacer', dest='glacier', default=True,
type=bool, help='Use Glacier transitioning')
s3_parser.add_argument('--region', dest='region', default=None,
help='S3 Region to use')
s3_parser.add_argument("rest", nargs=argparse.REMAINDER)
rv = parser.parse_args(args)
if rv.subcommand is None:
parser.print_help()
sys.exit(1)
return rv
def main():
global debug, verbose
args = parse_arguments()
operation = parse_operation(args.rest)
# Start doing some sanity checks
# Due to the complexity of encryption, we need to handle
# some cases that (as far as I can tell) argparse doesn't.
if args.encrypted:
if args.password_file is None:
print("Password file is required when encrypting backups", file=sys.stderr)
sys.exit(1)
if args.subcommand == "ssh":
print("Encrypting while using ssh replication is not possible", file=sys.stderr)
sys.exit(1)
verbose = args.verbose
debug = args.debug
if debug:
verbose = True
if debug:
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
dataset = args.snapshot_name
snapname = None
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
elif args.subcommand == 'ssh':
backup = ZFSBackupSSH(dataset, args.destination, args.remote_host,
remote_user=args.remote_user,
recursive=args.recursive)
elif args.subcommand == 'directory':
backup = ZFSBackupDirectory(dataset, args.destination, recursive=args.recursive,
prefix=args.prefix)
elif args.subcommand == 's3':
backup = ZFSBackupS3(dataset, args.bucket_name, args.s3_key, args.s3_secret,
recursive=args.recursive, server=args.s3_server,
prefix=args.prefix, region=args.region, glacier=args.glacier)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
before_count = None; after_count = None
if args.compressed:
if verbose:
before_count = ZFSBackupFilterCounter(name="before")
backup.AddFilter(before_count)
backup.AddFilter(ZFSBackupFilterCompressed(pigz=args.use_pigz))
if verbose:
after_count = ZFSBackupFilterCounter(name="after")
backup.AddFilter(after_count)
if args.encrypted:
encrypted_filter = ZFSBackupFilterEncrypted(cipher=args.cipher,
password_file=args.password_file)
backup.AddFilter(encrypted_filter)
if operation.command == "backup":
def handler(**kwargs):
stage = kwargs.get("stage", "")
if stage == "start":
print("Starting backup of snapshot {}@{}".format(dataset, kwargs.get("Name")))
elif stage == "complete":
print("Completed backup of snapshot {}@{}".format(dataset, kwargs.get("Name")))
if verbose:
print("Starting backup of {}".format(dataset))
try:
backup.backup(snapname=snapname,
snapshot_handler=handler if verbose else None,
each_snapshot=args.iterate)
if args.verbose:
print("Done with backup");
except ZFSBackupError as e:
print("Backup failed: {}".format(e.message), file=sys.stderr)
elif operation.command == "restore":
def handler(**kwargs):
stage = kwargs.get("stage", "")
if stage == "start":
print("Starting restore of snapshot {}@{}".format(dataset, kwargs.get("Name")))
elif stage == "complete":
print("Completed restore of snapshot {}@{}".format(dataset, kwargs.get("Name")))
if verbose:
print("Starting restore of {}".format(dataset))
try:
backup.restore(snapname=snapname,
snapshot_handler=handler if verbose else None)
if verbose:
print("Done with restore")
except ZFSBackupError as e:
print("Restore failed: {}".format(e.message), file=sys.stderr)
elif operation.command == 'verify':
problems = backup.Check(check_all=operation.check_all)
if problems:
print(problems)
elif verbose:
print("No problems")
elif operation.command == "list":
# List snapshots
if debug:
print("Listing snapshots", file=sys.stderr)
for snapshot in backup.target_snapshots:
output = "Snapshot {}@{}".format(dataset, snapshot["Name"])
if verbose:
ctime = time.localtime(snapshot.get("CreationTime", 0))
output += "\n\tCreated {}".format(time.strftime("%a, %d %b %Y %H:%M:%S %z", ctime))
if snapshot.get("incremental", False):
output += "\n\tincremental parent={}".format(snapshot.get("parent", "<unknown>"))
filters = snapshot.get("filters", [])
for filter in filters:
output += "\n\tFilter: {}".format(" ".join(filter))
if "chunks" in snapshot:
output += "\n\tChunks:\n"
for chunk in snapshot["chunks"]:
output += "\t\t{}".format(chunk)
for key in snapshot.keys():
if key in ("Name", "CreationTime", "incremental",
"parent", "chunks", "filters"):
continue
output += "\n\t{} = {}".format(key, snapshot[key])
print(output)
if operation.command in ("backup", "restore"):
if isinstance(backup, ZFSBackupCount):
output = "{} bytes".format(backup.count)
print(output)
if before_count and before_count.count and after_count:
pct = (after_count.count * 100.0) / before_count.count
output = "Compressed {} to {} bytes ({:.2f}%)".format(before_count.count,
after_count.count,
pct)
print(output)
if __name__ == "__main__":
main()
Okay a restore worked. Even over ssh.
This is still very fragile, so I need to figure out
better ways to do it. Being able to combine ZFSBackup
and ZFSBackupSSH more would help a lot, since they're
essentially the same thing but with different transport
protocols.
from __future__ import print_function
import os, sys
import json
import subprocess
import time
import tempfile
import threading
from io import BytesIO
import errno
import boto3
import botocore
import socket
debug = True
verbose = False
def _find_snapshot_index(name, snapshots):
"""
Given a list of snapshots (that is, an ordered-by-creation-time
array of dictionaries), return the index. If it's not found,
raise KeyError.
"""
for indx, snapshot in enumerate(snapshots):
if snapshot["Name"] == name:
return indx
raise KeyError(name)
def _last_common_snapshot(source, target):
"""
Given a list of snapshots (which are dictionaries),
return the last common snapshot (also as a dictionary,
but a different one). The inputs are a list, sorted
by creation date.
The return value -- if any -- will include:
- Name: (str) the name of the snapshot
- CreationTime: (int) the creation time of the snapshot.
This is taken from the source.
Optional values:
- incremental: (bool) Whether or not this was an incremental
snapshot. This is always taken from target.
- parent: (str) If an incremental snapshot, then the previous
snapshot used to create it. This is always taken from target.
- ResumeToken: (str) If the snapshot in question was interrupted,
and can be resumed, this will be the value. This value must be
present and equal in both source and target, or else it will not
be in the return value.
"""
# We're going to turn the target list into a dictionary, first.
target_dict = dict((el["Name"], el) for el in target)
# Now we go through the source list, in reversed order, seeing
# if the source snapshot is in target.
for snap in reversed(source):
if snap["Name"] in target_dict:
t = target_dict[snap["Name"]]
# Great, we found it!
rv = {"Name" : snap["Name"], "CreationTime" : int(snap["CreationTime"]) }
rv["incremental"] = t.get("incremental", False)
if "parent" in t:
rv["parent"] = t["parent"]
if "ResumeToken" in snap and "ResumeToken" in t:
if t["ResumeToken"] == snap["ResumeToken"]:
rv["ResumeToken"] = snap['ResumeToken']
return rv
return None
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
if snapname in dict2:
rv.append(snapname)
else:
pass;
return rv
def CHECK_OUTPUT(*args, **kwargs):
if debug:
print("CHECK_OUTPUT({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_output(*args, **kwargs)
def CALL(*args, **kwargs):
if debug:
print("CALL({}, {})".format(args, kwargs, file=sys.stderr))
return subprocess.call(*args, **kwargs)
def CHECK_CALL(*args, **kwargs):
if debug:
print("CHECK_CALL({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_call(*args, **kwargs)
def POPEN(*args, **kwargs):
if debug:
print("POPEN({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.Popen(*args, **kwargs)
def _get_snapshot_size_estimate(ds, toname, fromname=None, recursive=False):
"""
Get an estimate of the size of a snapshot. If fromname is given, it's
an incremental, and we start from that.
"""
command = ["/sbin/zfs", "send", "-nPv"]
if recursive:
command.append("-R")
if fromname:
command.extend(["-i", "{}@{}".format(ds, fromname)])
command.append("{}@{}".format(ds, toname))
try:
output = CHECK_OUTPUT(command, stderr=subprocess.STDOUT)
output = output.decode("utf-8").split("\n")
for line in output:
if line.startswith("size"):
(x, y) = line.split()
if x == "size":
return int(y)
except subprocess.CalledProcessError as e:
if verbose:
print("`{}` got exception {}".format(" ".join(command), str(e)), file=sys.stderr)
raise
return 0
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation,receive_resume_token",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
if debug:
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = CHECK_OUTPUT(command).decode('utf-8').split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
snapshot = snapshot.rstrip()
if not snapshot:
continue
if debug:
print("Output line: {}".format(snapshot), file=sys.stderr)
(name, ctime, resume_token) = snapshot.split("\t")
name = name.split('@')[1]
d = { "Name" : name, "CreationTime" : int(ctime) }
if resume_token != "-":
d["ResumeToken"] = resume_token
snapshots.append(d)
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupMissingFullBackupError(ZFSBackupError):
def __init__(self):
super(ZFSBackupMissingFullBackupError).__init__(self,
"No full backup available")
class ZFSBackupSnapshotNotFoundError(ZFSBackupError):
def __init__(self, snapname):
self.snapshot_name = snapname
super(ZFSBackupSnapshotNotFoundError).__init__(self,
"Specified snapshot {} does not exist".format(snapname))
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
The transformative property indicates that the filter transforms
the data as it processes it. Some filters don't -- the counter
filter, for example. This is important for some ZFSBackups subclasses,
such as ZFSBackupSSH, which need to apply transformative filters on
the other end as part of the backup and restore. By default, it's
true; subclasses can change it, and the object can alter it.
"""
def __init__(self, name="Null Filter"):
self.transformative = True
self._name = name
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return self._name
@property
def transformative(self):
return self._transformative
@transformative.setter
def transformative(self, b):
self._transformative = b
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
def finish(self):
"""
Any cleanup work required for the filter.
In the base class, that's nothing.
"""
pass
class ZFSBackupFilterThread(ZFSBackupFilter):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
Interestingly, this doesn't seem to actually work the way I'd expected:
when writing from a thread to a popen'd pipe, the pipe will block, even
when a thread closes the write end of the pipe.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__(name=name)
self.thread = None
self.source = None
self.input_pipe = None
self.output_pipe = None
self.transformative = False
self._mode = None
@property
def mode(self):
return self._mode
@property
def backup_command(self):
return None
@property
def restore_command(self):
return None
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
else:
return buf
def run(self, *args, **kwargs):
# We use a try/finally block to ensure
# the write-side is always closed.
try:
while True:
if self.mode == "backup":
b = self.stream.read(1024*1024)
elif self.mode == "restore":
b = os.read(self.input_pipe, 1024*1024)
if b:
if debug:
print("In thread {}, read {} bytes".format(self.name, len(b)), file=sys.stderr)
temp_buf = self.process(b)
if self.mode == "backup":
os.write(self.output_pipe, b)
else:
self.stream.write(b)
if debug:
print("In thread {}, just wrote {} bytes".format(self.name, len(b)), file=sys.stderr)
else:
if debug:
print("In thread {}, done reading from stream".format(self.name), file=sys.stderr)
break
finally:
try:
if self.mode == "backup":
os.close(self.output_pipe)
elif self.mode == "restore":
self.stream.close()
except OSError:
pass
def _start(self, stream):
import fcntl
self.stream = stream
(self.input_pipe, self.output_pipe) = os.pipe()
# We need to set F_CLOEXEC on pipes, lest a
# subsequent Popen call keep a dangling open
# reference around.
flags = fcntl.fcntl(self.output_pipe, fcntl.F_GETFD)
fcntl.fcntl(self.output_pipe, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
fcntl.fcntl(self.input_pipe, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self._py_read = os.fdopen(self.input_pipe, "rb")
self._py_write = os.fdopen(self.output_pipe, "wb")
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
if self.mode == "backup":
rv = self._py_read
elif self.mode == "restore":
rv = self._py_write
if debug:
print("In thread start_{}, returning {}".format(self._mode, rv),
file=sys.stderr)
return rv
def start_backup(self, stream):
if self.thread:
self.thread = None
self._mode = "backup"
return self._start(stream)
def start_restore(self, stream):
if self.thread:
self.thread = None
self._mode = "restore"
return self._start(stream)
def finish(self):
if self.thread:
self.thread.join()
return
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None, name="ZFS Count Filter"):
super(ZFSBackupFilterCounter, self).__init__(name=name)
self._count = 0
self.handler = handler
def process(self, b):
self._count += len(b)
return b
def start_backup(self, source):
return super(ZFSBackupFilterCounter, self).start_backup(source)
def start_restore(self, source):
return super(ZFSBackupFilterCounter, self).start_restore(source)
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
# This will block until the thread is done
self.finish()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
If restore_command is None, then backup_command will be used.
"""
def __init__(self, backup_command=["/bin/cat"], restore_command=None,
name='Command-based backup filter', error=None):
super(ZFSBackupFilterCommand, self).__init__(name=name)
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
self.proc = None
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command or self.backup_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+b")
self.proc = POPEN(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return self.proc.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+b")
if debug:
print("start_backup: command = {}, stdin={}, stderr={}".format(" ".join(self.backup_command),
source,
self.error),
file=sys.stderr)
self.proc = POPEN(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
if debug:
print("In start_bauckup for command, source = {}, proc.stdout = {}".format(source,
self.proc.stdout),
file=sys.stderr)
return self.proc.stdout
def finish(self):
if self.proc:
self.proc.wait()
if self.error:
try:
self.error.close()
except OSError:
pass
self.error = None
class ZFSBackupFilterEncrypted(ZFSBackupFilterCommand):
"""
A filter to encrypt and decrypt a stream.
The openssl command can do a lot more than we're asking
of it here.
We require a password file (for now, anyway).
"""
def __init__(self, cipher="aes-256-cbc",
password_file=None):
def ValidateCipher(cipher):
if cipher is None:
return False
try:
ciphers = CHECK_OUTPUT(["/usr/bin/openssl", "list-cipher-commands"]).split()
return cipher in ciphers
except subprocess.CalledProcessError:
return False
if password_file is None:
raise ValueError("Password file must be set for encryption filter")
if not ValidateCipher(cipher):
raise ValueError("Invalid cipher {}".format(cipher))
self.cipher = cipher
self.password_file = password_file
backup_command = ["/usr/bin/openssl",
"enc", "-{}".format(cipher),
"-e",
"-salt",
"-pass", "file:{}".format(password_file)]
restore_command = ["/usr/bin/openssl",
"enc", "-{}".format(cipher),
"-d",
"-salt",
"-pass", "file:{}".format(password_file)]
super(ZFSBackupFilterEncrypted, self).__init__(backup_command=backup_command,
restore_command=restore_command,
name='{} encryption filter'.format(self.cipher))
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
self.pigz = True
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
name='pigz compressor filter'
else:
self.pigz = False
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
name='gzip compressor filter'
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command],
name=name)
@property
def name(self):
return "pigz compress filter" if self.pigz else "gzip compress filter"
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def filters(self):
return self._filters
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _finish_filters(self):
# Common method to wait for all filters to finish and clean up
for f in self.filters:
f.finish()
def _filter_backup(self, source, error=sys.stderr):
# Private method, to stitch the backup filters together.
input = source
for f in self.filters:
f.error_output = error
if debug:
print("Starting filter {} ({}), input = {}".format(f.name, f.backup_command, input), file=sys.stderr)
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
# Note that they are in reverse order.
output = source
for f in reversed(self.filters):
f.error_output = error
if debug:
print("Starting restore filter {} ({})".format(f.name, f.restore_command), file=sys.stderr)
output = f.start_restore(output)
if debug:
print("\tFilter output = {}".format(output), file=sys.stderr)
return output
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
CHECK_CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
return
def restore_handler(self, stream, **kwargs):
"""
Method called to read a snapshot from the target. In the base class,
this simply does a 'zfs send' (with appropriate options).
Unlike the corresponding backup_handler, restore_handler has to handle
any setup for incremental sends. It can know to do an incremental
backup by having "parent" in kwargs, which will be the name of the
base snapshot.
All filters are also set up here. In the base class, that means
no transformative filters (since there's no real point).
"""
command = ["/sbin/zfs", "send", "-p"]
if self.recursive:
command.append("-R")
if "ResumeToken" in kwargs:
command.extend(["-t", kwargs["ResumeToken"]])
if "parent" in kwargs:
command.extend(["-I", kwargs["parent"]])
if "/" in self.source:
remote_ds = os.path.join(self.target, self.source.partition("/")[2])
else:
remote_ds = os.path.join(self.target, self.source)
command.append("{}@{}".format(remote_ds, kwargs["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters
fobj = stream
with open("/dev/null", "w+") as devnull:
POPEN(command, stdout=fobj, stderr=error_output,
stdin=devnull)
return
def backup_handler(self, stream, **kwargs):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
if debug:
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
CHECK_CALL(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None,
force_full=False,
snapshot_handler=None,
each_snapshot=True):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
After that, we then find the most recent common snapshot from source
and target (unless force_full is True, in which case that is set to None).
If force_full is False, it will then collect a list of snapshots on the
source from the last common snapshot to the last snapshot.
each_snapshot indicates whether or not to iterate over each snapshot
between the first and last one selected.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupSnapshotNotFoundError(snapname)
# We want to remove everything in source_snapshots after the given one.
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
# This is the last snapshot we will send, and we are guaranteed
# by this point that it exists on the source.
last_snapshot = source_snapshots[-1]
if debug:
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
# Next step is to get the last common snapshot.
if force_full:
last_common_snapshot = None
else:
last_common_snapshot = _last_common_snapshot(source_snapshots,
self.target_snapshots)
if debug:
print("ZFSBackup: last_snapshot = {}, last_common_snapshot = {}".format(last_snapshot,
last_common_snapshot),
file=sys.stderr)
snapshot_list = source_snapshots
if last_common_snapshot is None:
# If we have no snapshots in common, then we do all of the snapshots
pass
elif last_common_snapshot["Name"] == last_snapshot["Name"]:
# No snapshots to do, we're all done.
if debug:
print("No snapshots to send", file=sys.stderr)
return
else:
# We have a snapshot in common in source and target,
# and we want to get a list of snapshots from last_common_snapshot
# to last_snapshot from snapshot_list
# To do this, we're going to go through snapshot_list, looking
# for the index of both last_common_snapshot and last_snapshot.
lcs_index = None
last_index = None
for indx, snap in enumerate(snapshot_list):
if snap['Name'] == last_snapshot['Name']:
last_index = indx
break
if snap['Name'] == last_common_snapshot['Name']:
lcs_index = indx
# Now we're going to do a bit of sanity checking:
if last_index < lcs_index or lcs_index is None:
# This seems a weird case -- the snapshot we've been
# told to do is before the last common one.
raise ZFSBackupError("Last snapshot in source ({}) is before last common snapshot ({})".format(last_snapshot['Name'], last_common_snapshot['Name']))
snapshot_list = snapshot_list[lcs_index:last_index+1]
if debug:
print("Last common snapshot = {}".format(last_common_snapshot),
file=sys.stderr)
print("\tDoing snapshots {}".format(" ".join([x["Name"] for x in snapshot_list])),
file=sys.stderr)
if not each_snapshot:
if last_common_snapshot:
snapshot_list = (snapshot_list[0], snapshot_list[-1])
else:
snapshot_list = [snapshot_list[-1]]
# At this point, snapshot_list either starts with the
# last common snapshot, or there were no common snapshots.
for snapshot in snapshot_list:
resume = None
if last_common_snapshot and snapshot["Name"] == last_common_snapshot["Name"]:
# If we're resuming a send, we want to continue
resume = last_common_snapshot.get("ResumeToken", None)
if not resume:
# We want to skip the last common snapshot,
# so we can use it as the base of an incremental send
# in the next pass
continue
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
backup_dict = { "Name": snapshot["Name"] }
backup_dict["Recursive"] = self.recursive
try:
backup_dict["SizeEstimate"] = _get_snapshot_size_estimate(self.source,
snapshot["Name"],
fromname=last_common_snapshot["Name"] if last_common_snapshot else None,
recursive=self.recursive)
except:
if verbose:
print("Unable to get size estimate for snapshot", file=sys.stderr)
if resume:
command.extend(["-C", resume])
backup_dict["ResumeToken"] = resume
if last_common_snapshot:
command.extend(["-i" if each_snapshot else "-I", "{}".format(last_common_snapshot["Name"])])
backup_dict["incremental"] = True
backup_dict["parent"] = last_common_snapshot["Name"]
else:
backup_dict["incremental"] = False
backup_dict["CreationTime"] = snapshot["CreationTime"]
if debug:
print("backup_dict = {}".format(backup_dict), file=sys.stderr)
command.append("{}@{}".format(self.source, snapshot["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile(mode="a+") as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = POPEN(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
if debug:
print("backup_dict = {}".format(backup_dict), file=sys.stderr)
print("send_proc.stdout = {}".format(send_proc.stdout), file=sys.stderr)
if callable(snapshot_handler):
snapshot_handler(stage="start", **backup_dict)
try:
self.backup_handler(send_proc.stdout, **backup_dict)
except ZFSBackupError:
send_proc.wait()
if send_proc.returncode:
# We'll ignore any errors generated by the filters
error_output.seek(0)
raise ZFSBackupError(error_output.read().rstrip())
else:
raise
else:
send_proc.wait()
if callable(snapshot_handler):
snapshot_handler(stage="complete", **backup_dict)
self._finish_filters()
# Set the last_common_snapshot to make the next iteration an incremental
last_common_snapshot = snapshot
return
def restore(self, snapname=None,
force_full=False,
snapshot_handler=None,
to=None):
"""
Perform a restore. This is essentially the inverse of backup --
the target is the source of data, that are sent to 'zfs recv' (with
appropriate flags).
If snapname is given, then the restore will be done to that
snapshot; if force_full is False, the restore will try to find
the most recent snapshot in common before snapname, and
attempt an incremental restore. Therefore the most common case
for a restore to be done is a full restore to an empty pool/dataset,
which may be done at once, or by restoring a series of incrementals.
If there is no previous snapshot in common, _or_ force_full is True,
then it will need to find the most recent full backup. In the case
of the base class, every snapshot is potentially a full backup, so
it can start with snapname. In the case of ZFSBackupDirectory,
however, it will need to search backwards for a full backup. If there
are no full backups, then it will raise an exception.
If snapname is present in both targt and source, then there will
be no work done. (This would be more suitable for a rollback, after
all.)
Any filters applied to the backup should be applied to the restore;
subclasses that keep track of that information (ZFSBackupDirectory and
ZFSBackupS3 at this point) will use their own knowledge of the filters
used at backup to apply them in the correct order. With ZFSBackup and
ZFSBackupSSH, that's not necessary, since any data transformations are
either ignored or undone as part of the backup process, but compression
filters (as an example) may still be helpful to improve overall performance.
"""
if snapname is None:
# Get the last snapshot available on the target
snapname = self.target_snapshots[-1]["Name"]
try:
snapshot_index = _find_snapshot_index(snapname, self.target_snapshots)
except KeyError:
raise ZFSBackupSnapshotNotFoundError(snapname)
# If the snapshot is already in source, then there's nothing to do
try:
_find_snapshot_index(snapname, self.source_snapshots)
return
except KeyError:
pass
# We want to make sure we include the desired snapshot name.
snapshot_list = self.target_snapshots[:snapshot_index+1]
# Now let's look for the last common snapshot.
# Because of the test above, we know that snapname is not in source.
if force_full is False:
last_common_snapshot = _last_common_snapshot(self.source_snapshots,
snapshot_list)
else:
last_common_snapshot = None
# If last_common_snapshot is set, then we need a list of
# snapshots on the target between last_common_snapshot and
# snapname; if last_common_snapshot is None, then we
# need a list of snapshots on the target starting with the
# most recent full snapshot. This is subclass-specific.
if last_common_snapshot:
start_index = _find_snapshot_index(last_common_snapshot["Name"],
snapshot_list)
else:
start_index = self._most_recent_full_backup_index(snapshot_list)
if debug:
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
print("start_index = {}, snapshot_list = {}".format(start_index, snapshot_list), file=sys.stderr)
# This is now a list of snapshots to restore
restore_snaps = snapshot_list[start_index:]
if debug:
print("Restoring snapshots {}".format(restore_snaps), file=sys.stderr)
for snap in restore_snaps:
# Do I need any other options? Possibliy if doing
# an interrupted restore.
if debug:
print("Loop restore {}".format(snap), file=sys.stderr)
resume = None
if last_common_snapshot and snap["Name"] == last_common_snapshot["Name"]:
# XXX: This isn't right, I think: we can have a resume token
# for a full send.
# If we're resuming we want to be able to continue
resume = last_common_snapshot.get("ResumeToken", None)
if not resume:
# We want to skip the last common snapshot, so we can use it
# as the basis of an incremental send.
continue
command = ["/sbin/zfs", "receive", "-e", "-F", "-v"]
# Copy so we can add some elements to it
restore_dict = snap.copy()
if last_common_snapshot:
restore_dict["parent"] = last_common_snapshot["Name"]
if resume:
restore_dict["ResumeToken"] = resume
command.extend(["-t", resume])
elif "ResumeToken" in restore_dict:
restore_dict.pop("ResumeToken")
if "/" in self.source:
command.append(os.path.dirname(self.source))
else:
command.append(self.source)
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile(mode="a+") as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
if callable(snapshot_handler):
snapshot_handler(state="start", **restore_dict)
recv_proc = POPEN(command,
bufsize=mByte,
stdin=subprocess.PIPE,
stderr=error_output,
stdout=devnull)
try:
self.restore_handler(recv_proc.stdin, **restore_dict)
except ZFSBackupError:
recv_proc.wait()
if recv_proc.returncode:
# We end up ignoring any errors generated by the filters
error_output.seek(0)
raise ZFSBackupError("Restore failed: {}".format(error_output.read().rstrip()))
else:
raise
if callable(snapshot_handler):
snapshot_handler(state="complete", **restore_dict)
self._finish_filters()
last_common_snapshot = snap
return
def _most_recent_full_backup_index(self, snapshots):
"""
Given a list of snapshots, find the most recent full backup.
If no full backup is given, then it raises an exception.
"""
# For the base class, this is always simply the last snapshot
if snapshots:
return len(snapshots) - 1
else:
raise ZFSBackupMissingFullBackupError()
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation,receive_resume_token",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime, resume_token) = snapshot.rstrip().split()
d = {"Name" : name, "CreationTime" : int(ctime) }
if resume_token != "-":
d["ResumeToken"] = resume_token
snapshots.append(d)
return snapshots
def Check(self, **kwargs):
"""
A method to do a verification that the backup is okay.
In the base class, we don't do anything.
"""
pass
class ZFSBackupDirectory(ZFSBackup):
"""
A variant of ZFSBackup that backs up to files, rather than replication.
The layout used is:
target/
prefix/
map.json
chunks/
data files
prefix will default to the hostname if none is given.
target is the root pathname -- note that this doesn't need to be
a ZFS filesystem.
The map file maps from dataset to snapshots.
Since some filesystems (I'm looking at you, msdos) have a
limit of 4gb, we'll keep chunks limited to 2gb.
Each dataset has a chronologically-ordered array of
snapshots.
A snapshot entry in the map contains the name, the
creation time, whether it is recursive, and, if it
is an incremental snapshot, what the previous one was.
It also contains the names of the chunks, and any transformative
filter commands (in order to restore it).
"""
def __init__(self, source, target, prefix=None, recursive=False):
self._prefix = prefix or socket.gethostname()
self._mapfile = None
self._chunk_dirname = "chunks"
super(ZFSBackupDirectory, self).__init__(source, target, recursive)
def __repr__(self):
return "{}({}, {}, prefix={}, recursive={})".format(self.__class__.__name__,
self.source, self.target,
self.prefix, self.recursive)
def validate(self):
"""
Ensure that the destination exists. Since this is just
using files, all we need is os.path.exists
"""
if not os.path.exists(self.target):
raise ZFSBackupError("Target {} does not exist".format(self.target))
return
@property
def mapfile(self):
"""
Return the mapfile. If it isn't loaded, we load it now.
"""
if self._mapfile is None:
mapfile_path = os.path.join(self.target, self.prefix, "map.json")
try:
with open(mapfile_path, "r") as mapfile:
self._mapfile = json.load(mapfile)
except:
# I know, blanket catch, shouldn't do that
self._mapfile = {}
return self._mapfile
@mapfile.setter
def mapfile(self, d):
if debug:
print("Setting mapfile to {}".format(d), file=sys.stderr)
if not self._mapfile or self._mapfile != d:
self._mapfile = d
self._save_mapfile()
def _save_mapfile(self):
"""
Save the map file.
"""
if self._mapfile:
mapfile_path = os.path.join(self.target, self.prefix, "map.json")
if debug:
print("Saving map file to {}".format(mapfile_path), file=sys.stderr)
with open(mapfile_path, "w") as mapfile:
json.dump(self._mapfile, mapfile,
sort_keys=True,
indent=4, separators=(',', ': '))
@property
def target_snapshots(self):
"""
The snapshots are in the mapfile.
First key we care about is the source dataset.
"""
m = self.mapfile
if debug:
print("mapfile = {}".format(m), file=sys.stderr)
if self.source in m:
return m[self.source]["snapshots"]
else:
return []
def _write_chunks(self, stream):
chunks = []
mByte = 1024 * 1024
gByte = 1024 * mByte
done = False
base_path = os.path.join(self.target, self.prefix)
chunk_dir = os.path.join(base_path, self._chunk_dirname)
for d in (base_path, chunk_dir):
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
while not done:
with tempfile.NamedTemporaryFile(dir=chunk_dir, delete=False) as chunk:
chunks.append(os.path.join(self.prefix,
self._chunk_dirname,
os.path.basename(chunk.name)))
total = 0
while total < 2*gByte:
buf = stream.read(mByte)
if not buf:
done = True
break
chunk.write(buf)
total += len(buf)
if debug:
print("Finished writing chunk file {}".format(chunk.name), file=sys.stderr)
return chunks
def backup_handler(self, stream, **kwargs):
# Write the backup to the target. In our case, we're
# doing a couple of things:
# First, we need to make sure the full target directory
# exists -- create it if necessary.
# Sanity check: unlike the base class, we need to
# know the name of the snapshot, and whether it's incremental.
# If it is, we also need to know the previous one
snapshot_name = kwargs.get("Name", None)
incremental = kwargs.get("incremental", None)
parent = kwargs.get("parent", None)
if snapshot_name is None:
raise ZFSBackupError("Missing name of snapshot")
if incremental is None:
raise ZFSBackupError("Missing incremental information about snapshot")
# Next sanity check: if this snapshot is already in the map, abort
source_map = self.mapfile.get(self.source, {})
current_snapshots = source_map.get("snapshots", [])
for x in current_snapshots:
if x["Name"] == snapshot_name:
raise ZFSBackupError("Snapshot {} is already present in target".format(snapshot_name))
filters = []
for f in reversed(self.filters):
if f.transformative and f.restore_command:
filters.append(f.restore_command)
# Now we need to start writing chunks, keeping track of their names.
with tempfile.TemporaryFile() as error_output:
fobj = self._filter_backup(stream, error=error_output)
chunks = self._write_chunks(fobj)
if not chunks:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
# Now we need to update the map to have the chunks.
snapshot_dict = {
"Name" : snapshot_name,
"CreationTime" : kwargs.get("CreationTime", int(time.time())),
"incremental": incremental,
"chunks" : chunks
}
if incremental:
snapshot_dict["parent"] = parent
if filters:
snapshot_dict["filters"] = filters
for key in kwargs.keys():
if key in ("Name", "CreationTime", "incremental", "chunks",
"parent", "filters"):
continue
snapshot_dict[key] = kwargs.get(key)
current_snapshots.append(snapshot_dict)
source_map["snapshots"] = current_snapshots
self.mapfile[self.source] = source_map
self._save_mapfile()
@property
def prefix(self):
return self._prefix
def _get_all_chunks(self):
"""
Returns a set of all the chunks in self.target/self.prefix/self._chunk_dirname
"""
rv = set()
chunk_dir = os.path.join(self.prefix, self._chunk_dirname)
for entry in os.listdir(os.path.join(self.target, chunk_dir)):
if os.path.isdir(os.path.join(self.target, chunk_dir, entry)):
# This shouldn't be the case
continue
rv.add(os.path.join(chunk_dir, entry))
return rv
def Check(self, **kwargs):
"""
Method to ensure that the backup is sane.
In this case, it means checking that every chunk
in the directory is accounted for. We also check
to see if every snapshot has all of the chunks it
lists, and ensure that every incrememental snapshot
has its parent, all the way to a non-incremental.
If there are any problems, we return a list of them.
If cleanup=True in kwargs, we'll clean up the problems
(still returning the list). (Not yet implemented.)
N.B. Due to the nature of this method and class, it
will remove *all* untracked chunks; however, it will only
do a consistency check for the specified dataset, unless
check_all=True in kwargs.
"""
problems = []
cleanup = kwargs.get("cleanup", False)
check_all = kwargs.get("check_all", False)
# First step is to get the backups from the mapfile.
backups = self.mapfile.keys()
# Next we want to get a list of all the chunks.
# These will be relative to the target directory,
# so we'll turn them into ${prefix}/${chunkdir}/${chunkname}
# Since we don't care about order, but do care about lookup,
# we'll put them into a set.
directory_chunks = self._get_all_chunks()
# Let's now ensure every chunk is accounted for
# We put them all into another set
mapfile_chunks = set()
for backup in self.mapfile.itervalues():
for snapshot in backup['snapshots']:
for chunk in snapshot['chunks']:
mapfile_chunks.add(chunk)
# Let's see if there are any extraneous files
extra_chunks = directory_chunks - mapfile_chunks
# And voila, we have a list of chunks that have gone orphaned
for chunk in extra_chunks:
problems.append(("delete_chunk", chunk))
# Next pass, let's ensure that the backups have all of
# their chunks.
# If check_all is True, we'll look at all of the backups,
# otherwise just ours.
if not check_all:
backups = [self.source]
for backup in backups:
snapshot_names = {}
for snapshot in self.mapfile[backup]["snapshots"]:
# The list is supposed to be in order
name = snapshot["Name"]
snapshot_names[name] = True
found_all = True
if verbose:
print("Checking {}@{}".format(backup, name), file=sys.stderr)
for chunk in snapshot["chunks"]:
if not chunk in directory_chunks:
found_all = False
break
if snapshot.get("incremental", False):
if snapshot["parent"] not in snapshot_names:
problems.append(("missing_parent", backup, name, snapshot["parent"]))
if not found_all:
problems.append(("corrupt_snapshot", backup, name))
return problems
class ZFSBackupS3(ZFSBackupDirectory):
"""
Backup to AWS. Optionally with transitions to glacier.
The layout used is:
bucket/
prefix/
map.json
chunks/
data files
The map file maps from dataset to snapshots.
A glacier file is limited to 40tb (and S3 to 5tb),
so we'll actually break the snapshots into 4gbyte
chunks.
We control a lifecycle rule for bucket, which we
will name "${prefix} ZFS Backup Rule"; if glacier
is enabled, we add that rule, and set glacier migration
for "chunks/" for 0 days; if it is not
enabled, then we set the rule to be disabled. (But
we always have the rule there.)
Each dataset has a chronologically-ordered array of
snapshots.
A snapshot entry in the map contains the name, the
creation time, whether it is recursive, and, if it
is an incremental snapshot, what the previous one was.
It also contains the names of the chunks.
So it looks something like:
"tank" : [
"auto-daily-2017-01-01:00:00" : {
"CreationTime" : 12345678,
"Size" : 1024000,
"Recursive" : True,
"Incremental" : null,
"Chunks" : [
"chunks/${random}",
"chunks/${random}"
]
},
"auto-daily-2017-01-02:00:00" : {
...
}
]
Each dataset being backed up has an entry in the map file.
"""
def __init__(self, source,
bucket, s3_key, s3_secret,
recursive=False, server=None,
prefix=None, region=None, glacier=True):
"""
Backing up to S3 requires a key, secret, and bucket.
If prefix is none, it will use the current hostname.
(As a result, prefix must be unique within the bucket.)
If the bucket doesn't exist, it gets created; if
glacier is True, then it will set up a transition rule.
Note that bucket names need to be globally unique.
"""
self._map = None
self._glacier = glacier
self._s3 = boto3.client('s3', aws_access_key_id=s3_key,
aws_secret_access_key=s3_secret,
endpoint_url=server,
region_name=region)
# Note that this may not exist.
self.bucket = bucket.lower()
self._prefix = prefix or socket.gethostname()
# We'll over-load prefix here
super(ZFSBackupS3, self).__init__(source, "",
prefix=prefix,
recursive=recursive)
self._setup_bucket()
def validate(self):
if debug:
print("* * * HERE I AM NOW * * *\n\n", file=sys.stderr)
if debug:
print("\nRunning setup_bucket\n")
return
def __repr__(self):
return "{}({}, {}, <ID>, <SECRET>, recursive={}, server={}, prefix={}, region={}, glacier={}".format(
self.__class__.__name__, self.source, self.bucket, self.recursive, self.server,
self.prefix, self.region, self.glacier)
def _setup_bucket(self):
"""
Create a bucket, if necessary. Also, set up the lifecycle rule
depending on whether or not we're using glacier.
"""
if debug:
print("Trying to setup bucket {}".format(self.bucket), file=sys.stderr)
try:
self.s3.head_bucket(Bucket=self.bucket)
except botocore.exceptions.ClientError as e:
if e.response["Error"]['Code'] == '404':
# Need to create the bucket
if debug:
print("Creating bucket {}".format(self.bucket))
result = self.s3.create_bucket(Bucket=self.bucket)
if debug:
print("When creating bucket {}, response is {}".format(self.bucket, result),
file=sys.stderr)
else:
raise
# Great, now we have a bucket for sure, or have exceptioned out.
# Now we want to get the lifecycle rules.
try:
lifecycle = self.s3.get_bucket_lifecycle_configuration(Bucket=self.bucket)
except botocore.exceptions.ClientError as e:
if e.response["Error"]['Code'] == 'NoSuchLifecycleConfiguration':
lifecycle = {}
elif e.response['Error']['Code'] == "NotImplemented":
lifecycle = None
else:
raise
if lifecycle is not None:
try:
rules = lifecycle["Rules"]
except KeyError:
rules = []
rule_id = "{} ZFS Backup Glacier Transition Rule".format(self.prefix)
rule_indx = None
changed = False
if rules:
if debug:
print("Trying to add/set lifecycle rule", file=sys.stderr)
for indx, rule in enumerate(rules):
if rule["ID"] == rule_id:
rule_indx = indx
break
if debug:
print("rule_indx = {}, appropriate rule = {}".format(rule_indx,
rules[rule_indx] if rule_indx is not None else "<no rules>"), file=sys.stderr)
if rule_indx is None:
# We need to add it
new_rule = {
"ID" : rule_id,
"Prefix" : "{}/".format(self._chunk_dirname),
"Status" : "Enabled",
"Transitions" : [
{
"Days" : 0,
"StorageClass" : "GLACIER"
},
],
# Does this prevent transitions from working?
# 'AbortIncompleteMultipartUpload' : {
# 'DaysAfterInitiation' : 7,
# },
}
rule_indx = len(rules)
rules.append(new_rule)
changed = True
if debug:
print("rule_indx = {}, rules = {}".format(rule_indx, rules), file=sys.stderr)
else:
if (self.glacier == ( rules[rule_indx]["Status"] == "Enabled")):
changed = False
if debug:
print("rule_indx = {}, changed = {}, rules = {}, now let's set it to enabled".format(rule_indx, changed, rules), file=sys.stderr)
rules[rule_indx]["Status"] = "Enabled" if self.glacier else "Disabled"
if changed:
if debug:
print("rules = {}".format(rules), file=sys.stderr)
self.s3.put_bucket_lifecycle_configuration(Bucket=self.bucket,
LifecycleConfiguration={ 'Rules' : rules }
)
return
@property
def glacier(self):
return self._glacier
@property
def prefix(self):
return self._prefix
@property
def s3(self):
return self._s3
@property
def bucket(self):
return self._bucket
@bucket.setter
def bucket(self, b):
self._bucket = b
def _key_exists(self, keyname):
try:
self.s3.head_object(Bucket=self.bucket,
Key=keyname)
return True
except botocore.exceptions.ClientError:
return False
@property
def mapfile(self):
"""
Load the map file from the bucket. We cache it so we
don't keep reloading it.
"""
if self._mapfile is None:
# Check to see if the map file exists in the bucket
map_key = "{}/map.json".format(self.prefix)
if self._key_exists(map_key):
map_file = BytesIO()
self.s3.download_fileobj(Bucket=self.bucket,
Key=map_key,
Fileobj=map_file)
map_file.seek(0)
self._mapfile = json.loads(map_file.getvalue().decode('utf-8'))
else:
if debug:
print("mapfile {} does not exist in bucket".format(map_key), file=sys.stderr)
self._mapfile = {}
return self._mapfile
@mapfile.setter
def mapfile(self, mf):
self._mapfile = mf
def _save_mapfile(self):
if self._mapfile:
map_key = "{}/map.json".format(self.prefix)
buffer = json.dumps(self._mapfile).encode('utf-8')
map_file = BytesIO(buffer)
map_file.seek(0)
self.s3.upload_fileobj(Bucket=self.bucket,
Key=map_key,
Fileobj=map_file)
def _write_chunks(self, stream):
import binascii
chunks = []
mByte = 1024 * 1024
gByte = 1024 * mByte
done = False
chunk_dir = os.path.join(self._chunk_dirname, self.prefix)
while not done:
while True:
chunk_key = binascii.b2a_hex(os.urandom(32)).decode('utf-8')
chunk_key = os.path.join(chunk_dir, chunk_key)
if not self._key_exists(chunk_key):
break
total = 0
uploader = self.s3.create_multipart_upload(Bucket=self.bucket,
ACL='private',
Key=chunk_key)
upload_id = uploader['UploadId']
parts = []
try:
while total < 4*gByte:
part_num = len(parts) + 1
buf = stream.read(10*mByte)
if not buf:
if debug:
print("Breaking out of loop after {} bytes".format(total), file=sys.stderr)
done = True
break
# We need to upload this 10Mbyte part somehow
response = self.s3.upload_part(Bucket=self.bucket,
Key=chunk_key,
Body=buf,
PartNumber=part_num,
UploadId=upload_id)
if debug:
print("response = {}".format(response), file=sys.stderr)
parts.append({ "ETag" : response["ETag"], "PartNumber" : part_num })
total += len(buf)
if parts:
if debug:
print("After {} parts, completing upload".format(len(parts)), file=sys.stderr)
self.s3.complete_multipart_upload(Bucket=self.bucket,
Key=chunk_key,
UploadId=upload_id,
MultipartUpload={ "Parts" : parts })
except:
# This blanket exception catch is intentional
if verbose:
print("Aborting multipart upload after {} parts".format(len(parts)), file=sys.stderr)
self.s3.abort_multipart_upload(Bucket=self.bucket,
Key=chunk_key,
UploadId=upload_id)
raise
chunks.append(chunk_key)
if debug:
print("Wrote {} bytes to chunk {}".format(total, chunk_key), file=sys.stderr)
total = 0
if debug:
print("Wrote out {} chunks".format(len(chunks)), file=sys.stderr)
return chunks
def validate(self):
"""
We don't do a lot of validation, since s3 costs per usage.
We'll lazily check the bucket, and create it if necessary.
"""
return
def AvailableRegions():
"""
List the available regons.
"""
return boto3.session.Session().get_available_regions('s3')
def _get_all_chunks(self):
"""
Returns a set of all the chunks -- keys, in AWS parlance --
that begin with self.bucket/self._chunk_dir/self.prefix/
"""
rv = set()
last_string = ''
while True:
response = self.s3.list_objects_v2(Bucket=self.bucket,
Prefix=os.path.join(self._chunk_dirname, self.prefix),
StartAfter=last_string)
for key in [x.get("Key") for x in response.get("Contents")]:
last_string = key
rv.add(key)
if response.get("IsTruncated") == False:
break
return rv
def Check(self, **kwargs):
"""
Check an S3 backup destination.
This uses the base class, and then checks for multipart uploads.
"""
from datetime import datetime, timedelta
problems = super(ZFSBackupS3, self).Check(**kwargs)
# Now we check for multipart uploads in our bucket
try:
uploads = self.s3.list_multipart_uploads(Bucket=self.bucket)
except botocore.exceptions.ClientError:
return problems
for upload in uploads.get("Uploads", []):
upload_id = upload["UploadId"]
upload_key = upload["Key"]
# Is this correct?
initiated = upload["Initiated"]
now = datetime.now()
delta = now - intitiated
if delta.days > 2:
problems.append(("stale_multpart_upload", self.bucket, upload_key, upload_id))
class ZFSBackupSSH(ZFSBackup):
"""
Replicate to a remote host using ssh.
This runs all of the commands the base class does, but via ssh
to another host.
When running a command on a remote host, we have the following
options:
1) We don't care about input or output, only the return value.
2) We stream to it, or from it.
(1) is mostly for validation -- ensure the target exists, and
we can connect to it.
For (2), we stream to it (writing to stdin), and don't care about
the output until after, for backup.
For (2), we stream _from_ it (reading from its stdout) when getting
a list of snapshots, and when doing a restore.
"""
def __init__(self, source, target, remote_host,
remote_user=None,
ssh_opts=[],
recursive=False):
self._user = remote_user
self._host = remote_host
self._ssh_opts = ssh_opts[:]
super(ZFSBackupSSH, self).__init__(source, target, recursive)
@property
def user(self):
return self._user
@property
def host(self):
return self._host
@property
def ssh_options(self):
return self._ssh_opts
def _build_command(self, cmd, *args):
# First set up ssh.
command = ["/usr/bin/ssh"]
if self.ssh_options:
command.extend(self.ssh_options)
if self.user:
command.append("{}@{}".format(self.user, self.host))
else:
command.append(self.host)
# Then goes the rest of the command
command.append(cmd)
for arg in args:
if ' ' in args or '\t' in args:
# Really need proper quoting
command.append('"{}"'.format(arg))
else:
command.append(arg)
return command
def _run_cmd(self, cmd, *args, **kwargs):
"""
This implements running a command and not caring about
the output. If stdout or stderr are given, those will
be file-like objects that the output and error are written
to. If the command exists with a non-0 value, we raise an
exception.
"""
command = self._build_command(cmd, *args)
try:
CHECK_CALL(command, **kwargs)
except subprocess.CalledProcessError:
raise ZFSBackupError("`{}` failed".format(command))
def _remote_stream(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, but we want to write to or read
from it. We return a subprocess.Popen object, so the caller
needs to specify stdin=subprocess.PIPE, or stdout. Both can't be pipes.
This should only be called by _remote_write or remote_stream
"""
command = self._build_command(cmd, *args)
return POPEN(command[0], *command[1:], **kwargs)
def _remote_write(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, writing to it via stdin.
"""
# First remove stdin=, if it's there.
kwargs["stdin"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def _remote_read(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, reading its stdout.
"""
# First remove stdout=, if it's there.
kwargs["stdout"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def validate(self):
"""
Do a couple of validations.
"""
# See if we can connect to the remote host
with tempfile.TemporaryFile() as error_output:
try:
self._run_cmd("/usr/bin/true", stderr=error_output)
except ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError("Unable to connect to remote host: {}".format(error_output.read()))
# See if the target exists
with open("/dev/null", "w+") as devnull:
try:
self._run_cmd("/sbin/zfs", "list", "-H", self.target,
stdout=devnull, stderr=devnull, stdin=devnull)
except ZFSBackupError:
raise ZFSBackupError("Target {} does not exist on remote host".format(self.target))
return
def restore_handler(self, stream, **kwargs):
"""
Restore from a remote ZFS dataset (via ssh).
"""
if debug:
print("ssh restore_handler({}, {})".format(stream, kwargs), file=sys.stderr)
command = ["/sbin/zfs", "send", "-p"]
if self.recursive:
command.append("-R")
if "ResumeToken" in kwargs:
command.extend(["-t", kwargs["ResumeToken"]])
if "parent" in kwargs:
command.extend(["-I", kwargs["parent"]])
if "/" in self.source:
remote_ds = os.path.join(self.target, self.source.partition("/")[2])
else:
remote_ds = os.path.join(self.target, self.source)
command.append("{}@{}".format(remote_ds, kwargs["Name"]))
# If we have any transformative filters, we need to create them in order.
# Note that, as counterintuitive as it may seem, we use the backup_command for
# each filter on the remote side.
for filter in self.filters:
if filter.transformative and filter.backup_command:
commannd = command + ["|"] + filter.backup_command
command = self._build_command(*command)
if debug:
print("Remote restore command: " + " ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
if debug:
print("In ssh restore_handler, stream = {}".format(stream), file=sys.stderr)
fobj = self._filter_restore(stream, error=error_output)
try:
CHECK_CALL(command, stdout=fobj, stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read().rstrip())
return
def backup_handler(self, stream, **kwargs):
"""
Implement the replication.
"""
# First, we create the intervening dataset paths. See the base class' method.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = self._build_command("/sbin/zfs", "create", "-o", "readonly=on", full_path)
try:
CALL(command, stdout=devnull, stderr=devnull, stdin=devnull)
except subprocess.CalledProcessError:
pass
# If we have any transformative filters, we need to create them in reverse order.
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
for filter in reversed(self.filters):
if filter.transformative and filter.restore_command:
command = filter.restore_command + ["|"] + command
command = self._build_command(*command)
if debug:
print("backup command = {}".format(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
try:
fobj = self._filter_backup(stream, error=error_output)
command_proc = POPEN(command, stdin=fobj, stderr=error_output)
command_proc.wait()
except subprocess.CalledProcessError, ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
@property
def target_snapshots(self):
if not self._target_snapshots:
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
command = self._build_command("/sbin/zfs", "list", "-H", "-p",
"-o", "name,creation", "-r",
"-d", "1", "-t", "snapshot", "-s",
"creation", target_path)
snapshots = []
try:
output = CHECK_OUTPUT(command).split("\n")
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
pass
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, recursive=False):
super(ZFSBackupCount, self).__init__(source, "", recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, recursive={})".format(self.__class__.__name__,
self.source,
self.recursive)
def validate(self):
return
def backup_handler(self, stream, **kwargs):
fobj = self._filter_backup(stream)
mByte = 1024 * 1024
while True:
b = fobj.read(mByte)
if b:
self._count += len(b)
else:
break
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def parse_operation(args):
"""
Determine which operation, and what options for it.
Default is to just parse ["backup"]
"""
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description="Operation and options")
parser.register('type', 'bool', to_bool)
if not args:
args = ["backup"]
ops = parser.add_subparsers(help='sub-operation help', dest='command')
# The current valid operations are backup, restore, list, verify, and delete
# Although only backup and restore are currently implemented
backup_operation = ops.add_parser("backup", help="Backup command")
restore_operation = ops.add_parser("restore", help='Restore command')
verify_operation = ops.add_parser("verify", help='Verify command')
verify_operation.add_argument("--all", action='store_true', dest='check_all',
help='Check every backup for consistency',
default=False)
delete_operation = ops.add_parser('delete', help='Delete command')
list_operation = ops.add_parser("list", help='List command')
rv = parser.parse_args(args)
return rv
def parse_arguments(args=None):
global debug, verbose
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument("--debug", dest='debug',
action='store_true', default=False,
help='Turn on debugging')
parser.add_argument("--verbose", dest='verbose', action='store_true',
default=False, help='Be verbose')
parser.add_argument('--recursive', '-R', dest='recursive',
action='store_true',
default=False,
help='Recursively replicate')
parser.add_argument("--snapshot", "-S", "--dataset", "--pool",
dest='snapshot_name',
default=None,
help='Dataset/pool/snapshot to back up')
parser.add_argument("--encrypted", "-E", dest='encrypted',
action='store_true', default=False,
help='Encrypt snapshots')
parser.add_argument("--cipher", dest='cipher',
default='aes-256-cbc',
help='Encryption cipher to use')
parser.add_argument('--password-file', dest='password_file',
default=None,
help='Password file for encryption')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
parser.add_argument('--pigz', action='store_true',
dest='use_pigz', default=False,
help='Use pigz to compress')
incrementals = parser.add_mutually_exclusive_group()
incrementals.add_argument("--iterate-incrementals", dest="iterate",
action='store_true', default=True)
incrementals.add_argument("--no-iterate-incrementals", dest="iterate",
action='store_false')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
zfs_parser.add_argument("rest", nargs=argparse.REMAINDER)
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
counter_parser.add_argument("rest", nargs=argparse.REMAINDER)
# ssh parser has a lot more options
ssh_parser = subparsers.add_parser("ssh",
help="Replicate to a remote ZFS server")
ssh_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
ssh_parser.add_argument('--host', '-H', dest='remote_host',
required=True,
help='Remote hostname')
ssh_parser.add_argument("--user", '-U', dest='remote_user',
help='Remote user (defaults to current user)')
ssh_parser.add_argument("rest", nargs=argparse.REMAINDER)
# Directory parser has only two options
directory_parser = subparsers.add_parser("directory",
help='Save snapshots to a directory')
directory_parser.add_argument("--dest", "-D", dest='destination', required=True,
help='Path to store snapshots')
directory_parser.add_argument("--prefix", "-P", dest='prefix', default=None,
help='Prefix to use when saving snapshots (defaults to hostname)')
directory_parser.add_argument("rest", nargs=argparse.REMAINDER)
# S3 parser has many options
s3_parser = subparsers.add_parser("s3", help="Save snapshots to an S3 server")
s3_parser.add_argument("--bucket", dest='bucket_name', required=True,
help='Name of bucket in which to save data')
s3_parser.add_argument("--prefix", dest='prefix', default=None,
help='Prefix (inside of bucket); defaults to host name)')
s3_parser.add_argument("--key", "--s3-id", dest='s3_key', required=True,
help='S3 Access ID')
s3_parser.add_argument("--secret", dest='s3_secret', required=True,
help='S3 Secret Key')
s3_parser.add_argument('--server', dest="s3_server", default=None,
help='S3-compatible server')
glacier = s3_parser.add_mutually_exclusive_group()
glacier.add_argument("--glacier", dest='glacier', action='store_true', default=True)
glacier.add_argument("--no-glacier", dest='glacier', action='store_false')
s3_parser.add_argument('--glacer', dest='glacier', default=True,
type=bool, help='Use Glacier transitioning')
s3_parser.add_argument('--region', dest='region', default=None,
help='S3 Region to use')
s3_parser.add_argument("rest", nargs=argparse.REMAINDER)
rv = parser.parse_args(args)
if rv.subcommand is None:
parser.print_help()
sys.exit(1)
return rv
def main():
global debug, verbose
args = parse_arguments()
operation = parse_operation(args.rest)
# Start doing some sanity checks
# Due to the complexity of encryption, we need to handle
# some cases that (as far as I can tell) argparse doesn't.
if args.encrypted:
if args.password_file is None:
print("Password file is required when encrypting backups", file=sys.stderr)
sys.exit(1)
if args.subcommand == "ssh":
print("Encrypting while using ssh replication is not possible", file=sys.stderr)
sys.exit(1)
verbose = args.verbose
debug = args.debug
if debug:
verbose = True
if debug:
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
dataset = args.snapshot_name
snapname = None
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
elif args.subcommand == 'ssh':
backup = ZFSBackupSSH(dataset, args.destination, args.remote_host,
remote_user=args.remote_user,
recursive=args.recursive)
elif args.subcommand == 'directory':
backup = ZFSBackupDirectory(dataset, args.destination, recursive=args.recursive,
prefix=args.prefix)
elif args.subcommand == 's3':
backup = ZFSBackupS3(dataset, args.bucket_name, args.s3_key, args.s3_secret,
recursive=args.recursive, server=args.s3_server,
prefix=args.prefix, region=args.region, glacier=args.glacier)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
before_count = None; after_count = None
if args.compressed:
if verbose:
before_count = ZFSBackupFilterCounter(name="before")
backup.AddFilter(before_count)
backup.AddFilter(ZFSBackupFilterCompressed(pigz=args.use_pigz))
if verbose:
after_count = ZFSBackupFilterCounter(name="after")
backup.AddFilter(after_count)
if args.encrypted:
encrypted_filter = ZFSBackupFilterEncrypted(cipher=args.cipher,
password_file=args.password_file)
backup.AddFilter(encrypted_filter)
if operation.command == "backup":
def handler(**kwargs):
stage = kwargs.get("stage", "")
if stage == "start":
print("Starting backup of snapshot {}@{}".format(dataset, kwargs.get("Name")))
elif stage == "complete":
print("Completed backup of snapshot {}@{}".format(dataset, kwargs.get("Name")))
if verbose:
print("Starting backup of {}".format(dataset))
try:
backup.backup(snapname=snapname,
snapshot_handler=handler if verbose else None,
each_snapshot=args.iterate)
if args.verbose:
print("Done with backup");
except ZFSBackupError as e:
print("Backup failed: {}".format(e.message), file=sys.stderr)
elif operation.command == "restore":
def handler(**kwargs):
stage = kwargs.get("stage", "")
if stage == "start":
print("Starting restore of snapshot {}@{}".format(dataset, kwargs.get("Name")))
elif stage == "complete":
print("Completed restore of snapshot {}@{}".format(dataset, kwargs.get("Name")))
if verbose:
print("Starting restore of {}".format(dataset))
try:
backup.restore(snapname=snapname,
snapshot_handler=handler if verbose else None)
if verbose:
print("Done with restore")
except ZFSBackupError as e:
print("Restore failed: {}".format(e.message), file=sys.stderr)
elif operation.command == 'verify':
problems = backup.Check(check_all=operation.check_all)
if problems:
print(problems)
elif verbose:
print("No problems")
elif operation.command == "list":
# List snapshots
if debug:
print("Listing snapshots", file=sys.stderr)
for snapshot in backup.target_snapshots:
output = "Snapshot {}@{}".format(dataset, snapshot["Name"])
if verbose:
ctime = time.localtime(snapshot.get("CreationTime", 0))
output += "\n\tCreated {}".format(time.strftime("%a, %d %b %Y %H:%M:%S %z", ctime))
if snapshot.get("incremental", False):
output += "\n\tincremental parent={}".format(snapshot.get("parent", "<unknown>"))
filters = snapshot.get("filters", [])
for filter in filters:
output += "\n\tFilter: {}".format(" ".join(filter))
if "chunks" in snapshot:
output += "\n\tChunks:\n"
for chunk in snapshot["chunks"]:
output += "\t\t{}".format(chunk)
for key in snapshot.keys():
if key in ("Name", "CreationTime", "incremental",
"parent", "chunks", "filters"):
continue
output += "\n\t{} = {}".format(key, snapshot[key])
print(output)
if operation.command in ("backup", "restore"):
if isinstance(backup, ZFSBackupCount):
output = "{} bytes".format(backup.count)
print(output)
if before_count and before_count.count and after_count:
pct = (after_count.count * 100.0) / before_count.count
output = "Compressed {} to {} bytes ({:.2f}%)".format(before_count.count,
after_count.count,
pct)
print(output)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
#
# Copyright (c) 2017, Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: ai:ts=4:sw=4
import sys
from os import listdir
import os, fnmatch
import re
import yaml
import argparse
import collections
from devicetree import parse_file
from extract.globals import *
class Loader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.realpath(stream.name)
super(Loader, self).__init__(stream)
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', Loader.include)
def include(self, node):
if isinstance(node, yaml.ScalarNode):
return self.extractFile(self.construct_scalar(node))
elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result += self.extractFile(filename)
return result
elif isinstance(node, yaml.MappingNode):
result = {}
for k, v in self.construct_mapping(node).iteritems():
result[k] = self.extractFile(v)
return result
else:
print("Error:: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError
def extractFile(self, filename):
filepath = os.path.join(os.path.dirname(self._root), filename)
if not os.path.isfile(filepath):
# we need to look in bindings/* directories
# take path and back up 1 directory and parse in '/bindings/*'
filepath = os.path.dirname(os.path.dirname(self._root))
for root, dirnames, file in os.walk(filepath):
if fnmatch.filter(file, filename):
filepath = os.path.join(root, filename)
with open(filepath, 'r') as f:
return yaml.load(f, Loader)
def find_parent_irq_node(node_address):
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
if 'interrupt-parent' in reduced[address]['props']:
interrupt_parent = reduced[address]['props'].get(
'interrupt-parent')
return phandles[interrupt_parent]
def extract_interrupts(node_address, yaml, prop, names, defs, def_label):
node = reduced[node_address]
try:
props = list(node['props'].get(prop))
except:
props = [node['props'].get(prop)]
irq_parent = find_parent_irq_node(node_address)
l_base = def_label.split('/')
index = 0
while props:
prop_def = {}
prop_alias = {}
l_idx = [str(index)]
try:
name = [convert_string_to_label(names.pop(0))]
except:
name = []
cell_yaml = yaml[get_compat(irq_parent)]
l_cell_prefix = [yaml[get_compat(irq_parent)].get(
'cell_string', []).upper()]
for i in range(reduced[irq_parent]['props']['#interrupt-cells']):
l_cell_name = [cell_yaml['#cells'][i].upper()]
if l_cell_name == l_cell_prefix:
l_cell_name = []
l_fqn = '_'.join(l_base + l_cell_prefix + l_idx + l_cell_name)
prop_def[l_fqn] = props.pop(0)
if len(name):
alias_list = l_base + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_list = [alias_label] + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
index += 1
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_reg_prop(node_address, names, defs, def_label, div, post_label):
reg = reduced[node_address]['props']['reg']
if type(reg) is not list: reg = [ reg ]
props = list(reg)
address_cells = reduced['/']['props'].get('#address-cells')
size_cells = reduced['/']['props'].get('#size-cells')
address = ''
for comp in node_address.split('/')[1:-1]:
address += '/' + comp
address_cells = reduced[address]['props'].get(
'#address-cells', address_cells)
size_cells = reduced[address]['props'].get('#size-cells', size_cells)
if post_label is None:
post_label = "BASE_ADDRESS"
index = 0
l_base = def_label.split('/')
l_addr = [convert_string_to_label(post_label)]
l_size = ["SIZE"]
while props:
prop_def = {}
prop_alias = {}
addr = 0
size = 0
# Check is defined should be indexed (_0, _1)
if index == 0 and len(props) < 3:
# 1 element (len 2) or no element (len 0) in props
l_idx = []
else:
l_idx = [str(index)]
try:
name = [names.pop(0).upper()]
except:
name = []
for x in range(address_cells):
addr += props.pop(0) << (32 * x)
for x in range(size_cells):
size += props.pop(0) << (32 * x)
l_addr_fqn = '_'.join(l_base + l_addr + l_idx)
l_size_fqn = '_'.join(l_base + l_size + l_idx)
if address_cells:
prop_def[l_addr_fqn] = hex(addr)
if size_cells:
prop_def[l_size_fqn] = int(size / div)
if len(name):
if address_cells:
prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_addr = [alias_label] + l_addr
alias_size = [alias_label] + l_size
prop_alias['_'.join(alias_addr)] = '_'.join(l_base + l_addr)
prop_alias['_'.join(alias_size)] = '_'.join(l_base + l_size)
insert_defs(node_address, defs, prop_def, prop_alias)
# increment index for definition creation
index += 1
def extract_controller(node_address, prop, prop_values, index, prefix, defs, def_label):
prop_def = {}
prop_alias = {}
# get controller node (referenced via phandle)
cell_parent = phandles[prop_values[0]]
for k in reduced[cell_parent]['props'].keys():
if k[0] == '#' and '-cells' in k:
num_cells = reduced[cell_parent]['props'].get(k)
# get controller node (referenced via phandle)
cell_parent = phandles[prop_values[0]]
try:
l_cell = reduced[cell_parent]['props'].get('label')
except KeyError:
l_cell = None
if l_cell is not None:
l_base = def_label.split('/')
l_base += prefix
# Check is defined should be indexed (_0, _1)
if index == 0 and len(prop_values) < (num_cells + 2):
# 0 or 1 element in prop_values
# ( ie len < num_cells + phandle + 1 )
l_idx = []
else:
l_idx = [str(index)]
for k in reduced[cell_parent]['props']:
if 'controller' in k:
l_cellname = convert_string_to_label(str(k))
label = l_base + [l_cellname] + l_idx
prop_def['_'.join(label)] = "\"" + l_cell + "\""
#generate defs also if node is referenced as an alias in dts
if node_address in aliases:
for i in aliases[node_address]:
alias_label = \
convert_string_to_label(i)
alias = [alias_label] + label[1:]
prop_alias['_'.join(alias)] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# prop off phandle + num_cells to get to next list item
prop_values = prop_values[num_cells+1:]
# recurse if we have anything left
if len(prop_values):
extract_controller(node_address, prop, prop_values, index +1, prefix, defs,
def_label)
def extract_cells(node_address, yaml, prop, prop_values, names, index, prefix, defs,
def_label):
cell_parent = phandles[prop_values.pop(0)]
try:
cell_yaml = yaml[get_compat(cell_parent)]
except:
raise Exception(
"Could not find yaml description for " +
reduced[cell_parent]['name'])
try:
name = names.pop(0).upper()
except:
name = []
# Get number of cells per element of current property
for k in reduced[cell_parent]['props'].keys():
if k[0] == '#' and '-cells' in k:
num_cells = reduced[cell_parent]['props'].get(k)
# Generate label for each field of the property element
l_cell = [str(cell_yaml.get('cell_string', ''))]
l_base = def_label.split('/')
l_base += prefix
# Check if #define should be indexed (_0, _1, ...)
if index == 0 and len(prop_values) < (num_cells + 2):
# Less than 2 elements in prop_values (ie len < num_cells + phandle + 1)
# Indexing is not needed
l_idx = []
else:
l_idx = [str(index)]
prop_def = {}
prop_alias = {}
# Generate label for each field of the property element
for i in range(num_cells):
l_cellname = [str(cell_yaml['#cells'][i]).upper()]
if l_cell == l_cellname:
label = l_base + l_cell + l_idx
else:
label = l_base + l_cell + l_cellname + l_idx
label_name = l_base + name + l_cellname
prop_def['_'.join(label)] = prop_values.pop(0)
if len(name):
prop_alias['_'.join(label_name)] = '_'.join(label)
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = [alias_label] + label[1:]
prop_alias['_'.join(alias)] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# recurse if we have anything left
if len(prop_values):
extract_cells(node_address, yaml, prop, prop_values, names,
index + 1, prefix, defs, def_label)
def extract_pinctrl(node_address, yaml, pinconf, names, index, defs,
def_label):
prop_list = []
if not isinstance(pinconf, list):
prop_list.append(pinconf)
else:
prop_list = list(pinconf)
def_prefix = def_label.split('_')
prop_def = {}
for p in prop_list:
pin_node_address = phandles[p]
pin_subnode = '/'.join(pin_node_address.split('/')[-1:])
cell_yaml = yaml[get_compat(pin_node_address)]
cell_prefix = cell_yaml.get('cell_string', None)
post_fix = []
if cell_prefix is not None:
post_fix.append(cell_prefix)
for subnode in reduced.keys():
if pin_subnode in subnode and pin_node_address != subnode:
# found a subnode underneath the pinmux handle
pin_label = def_prefix + post_fix + subnode.split('/')[-2:]
for i, cells in enumerate(reduced[subnode]['props']):
key_label = list(pin_label) + \
[cell_yaml['#cells'][0]] + [str(i)]
func_label = key_label[:-2] + \
[cell_yaml['#cells'][1]] + [str(i)]
key_label = convert_string_to_label('_'.join(key_label))
func_label = convert_string_to_label('_'.join(func_label))
prop_def[key_label] = cells
prop_def[func_label] = \
reduced[subnode]['props'][cells]
insert_defs(node_address, defs, prop_def, {})
def extract_single(node_address, yaml, prop, key, prefix, defs, def_label):
prop_def = {}
prop_alias = {}
if isinstance(prop, list):
for i, p in enumerate(prop):
k = convert_string_to_label(key)
label = def_label + '_' + k
if isinstance(p, str):
p = "\"" + p + "\""
prop_def[label + '_' + str(i)] = p
else:
k = convert_string_to_label(key)
label = def_label + '_' + k
if prop == 'parent-label':
prop = find_parent_prop(node_address, 'label')
if isinstance(prop, str):
prop = "\"" + prop + "\""
prop_def[label] = prop
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = alias_label + '_' + k
prop_alias[alias] = label
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_string_prop(node_address, yaml, key, label, defs):
prop_def = {}
node = reduced[node_address]
prop = node['props'][key]
k = convert_string_to_label(key)
prop_def[label] = "\"" + prop + "\""
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
def extract_property(node_compat, yaml, node_address, prop, prop_val, names,
prefix, defs, label_override):
if 'base_label' in yaml[node_compat]:
def_label = yaml[node_compat].get('base_label')
else:
def_label = get_node_label(node_compat, node_address)
if 'parent' in yaml[node_compat]:
if 'bus' in yaml[node_compat]['parent']:
# get parent label
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
#check parent has matching child bus value
try:
parent_yaml = \
yaml[reduced[parent_address]['props']['compatible']]
parent_bus = parent_yaml['child']['bus']
except (KeyError, TypeError) as e:
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as bus master but " +
str(parent_address) + " not configured as bus master " +
"in yaml description")
if parent_bus != yaml[node_compat]['parent']['bus']:
bus_value = yaml[node_compat]['parent']['bus']
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as " + bus_value +
" bus master but " + str(parent_address) +
" configured as " + str(parent_bus) +
" bus master")
# Generate alias definition if parent has any alias
if parent_address in aliases:
for i in aliases[parent_address]:
node_alias = i + '_' + def_label
aliases[node_address].append(node_alias)
# Use parent label to generate label
parent_label = get_node_label(
find_parent_prop(node_address,'compatible') , parent_address)
def_label = parent_label + '_' + def_label
# Generate bus-name define
extract_single(node_address, yaml, 'parent-label',
'bus-name', prefix, defs, def_label)
if label_override is not None:
def_label += '_' + label_override
if prop == 'reg':
extract_reg_prop(node_address, names, defs, def_label,
1, prop_val.get('label', None))
elif prop == 'interrupts' or prop == 'interupts-extended':
extract_interrupts(node_address, yaml, prop, names, defs, def_label)
elif 'pinctrl-' in prop:
p_index = int(prop.split('-')[1])
extract_pinctrl(node_address, yaml,
reduced[node_address]['props'][prop],
names[p_index], p_index, defs, def_label)
elif 'clocks' in prop or 'gpios' in prop:
try:
prop_values = list(reduced[node_address]['props'].get(prop))
except:
prop_values = reduced[node_address]['props'].get(prop)
extract_controller(node_address, prop, prop_values, 0, prefix, defs,
def_label)
extract_cells(node_address, yaml, prop, prop_values,
names, 0, prefix, defs, def_label)
else:
extract_single(node_address, yaml,
reduced[node_address]['props'][prop], prop,
prefix, defs, def_label)
def extract_node_include_info(reduced, root_node_address, sub_node_address,
yaml, defs, structs, y_sub):
node = reduced[sub_node_address]
node_compat = get_compat(root_node_address)
label_override = None
if node_compat not in yaml.keys():
return {}, {}
if y_sub is None:
y_node = yaml[node_compat]
else:
y_node = y_sub
if yaml[node_compat].get('use-property-label', False):
try:
label = y_node['properties']['label']
label_override = convert_string_to_label(node['props']['label'])
except KeyError:
pass
# check to see if we need to process the properties
for k, v in y_node['properties'].items():
if 'properties' in v:
for c in reduced:
if root_node_address + '/' in c:
extract_node_include_info(
reduced, root_node_address, c, yaml, defs, structs,
v)
if 'generation' in v:
prefix = []
if v.get('use-name-prefix') is not None:
prefix = [convert_string_to_label(k)]
for c in node['props'].keys():
if c.endswith("-names"):
pass
if re.match(k + '$', c):
if 'pinctrl-' in c:
names = node['props'].get('pinctrl-names', [])
else:
names = node['props'].get(c[:-1] + '-names', [])
if not names:
names = node['props'].get(c + '-names', [])
if not isinstance(names, list):
names = [names]
extract_property(
node_compat, yaml, sub_node_address, c, v, names,
prefix, defs, label_override)
def dict_merge(dct, merge_dct):
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def yaml_traverse_inherited(node):
""" Recursive overload procedure inside ``node``
``inherits`` section is searched for and used as node base when found.
Base values are then overloaded by node values
Additionally, 'id' key of 'inherited' dict is converted to 'node_type'
:param node:
:return: node
"""
if 'inherits' in node.keys():
if 'id' in node['inherits'].keys():
node['inherits']['node_type'] = node['inherits']['id']
node['inherits'].pop('id')
if 'inherits' in node['inherits'].keys():
node['inherits'] = yaml_traverse_inherited(node['inherits'])
dict_merge(node['inherits'], node)
node = node['inherits']
node.pop('inherits')
return node
def yaml_collapse(yaml_list):
collapsed = dict(yaml_list)
for k, v in collapsed.items():
v = yaml_traverse_inherited(v)
collapsed[k]=v
return collapsed
def get_key_value(k, v, tabstop):
label = "#define " + k
# calculate the name's tabs
if len(label) % 8:
tabs = (len(label) + 7) >> 3
else:
tabs = (len(label) >> 3) + 1
line = label
for i in range(0, tabstop - tabs + 1):
line += '\t'
line += str(v)
line += '\n'
return line
def output_keyvalue_lines(fd, defs):
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('# ' + node.split('/')[-1])
fd.write("\n")
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write("%s=%s\n" % (entry, defs[node].get(a)))
else:
fd.write("%s=%s\n" % (prop, defs[node].get(prop)))
fd.write("\n")
def generate_keyvalue_file(defs, kv_file):
with open(kv_file, "w") as fd:
output_keyvalue_lines(fd, defs)
def output_include_lines(fd, defs, fixups):
compatible = reduced['/']['props']['compatible'][0]
fd.write("/**************************************************\n")
fd.write(" * Generated include file for " + compatible)
fd.write("\n")
fd.write(" * DO NOT MODIFY\n")
fd.write(" */\n")
fd.write("\n")
fd.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n")
fd.write("#define _DEVICE_TREE_BOARD_H" + "\n")
fd.write("\n")
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('/* ' + node.split('/')[-1] + ' */')
fd.write("\n")
max_dict_key = lambda d: max(len(k) for k in d.keys())
maxlength = 0
if defs[node].get('aliases'):
maxlength = max_dict_key(defs[node]['aliases'])
maxlength = max(maxlength, max_dict_key(defs[node])) + len('#define ')
if maxlength % 8:
maxtabstop = (maxlength + 7) >> 3
else:
maxtabstop = (maxlength >> 3) + 1
if (maxtabstop * 8 - maxlength) <= 2:
maxtabstop += 1
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write(get_key_value(entry, a, maxtabstop))
else:
fd.write(get_key_value(prop, defs[node].get(prop), maxtabstop))
fd.write("\n")
if fixups:
for fixup in fixups:
if os.path.exists(fixup):
fd.write("\n")
fd.write(
"/* Following definitions fixup the generated include */\n")
try:
with open(fixup, "r") as fixup_fd:
for line in fixup_fd.readlines():
fd.write(line)
fd.write("\n")
except:
raise Exception(
"Input file " + os.path.abspath(fixup) +
" does not exist.")
fd.write("#endif\n")
def generate_include_file(defs, inc_file, fixups):
with open(inc_file, "w") as fd:
output_include_lines(fd, defs, fixups)
def load_and_parse_dts(dts_file):
with open(dts_file, "r") as fd:
dts = parse_file(fd)
return dts
def load_yaml_descriptions(dts, yaml_dir):
compatibles = get_all_compatibles(dts['/'], '/', {})
# find unique set of compatibles across all active nodes
s = set()
for k, v in compatibles.items():
if isinstance(v, list):
for item in v:
s.add(item)
else:
s.add(v)
# scan YAML files and find the ones we are interested in
yaml_files = []
for root, dirnames, filenames in os.walk(yaml_dir):
for filename in fnmatch.filter(filenames, '*.yaml'):
yaml_files.append(os.path.join(root, filename))
yaml_list = {}
file_load_list = set()
for file in yaml_files:
for line in open(file, 'r'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if file not in file_load_list:
file_load_list.add(file)
with open(file, 'r') as yf:
yaml_list[c] = yaml.load(yf, Loader)
if yaml_list == {}:
raise Exception("Missing YAML information. Check YAML sources")
# collapse the yaml inherited information
yaml_list = yaml_collapse(yaml_list)
return yaml_list
def lookup_defs(defs, node, key):
if node not in defs:
return None
if key in defs[node]['aliases']:
key = defs[node]['aliases'][key]
return defs[node].get(key, None)
def generate_node_definitions(yaml_list):
defs = {}
structs = {}
for k, v in reduced.items():
node_compat = get_compat(k)
if node_compat is not None and node_compat in yaml_list:
extract_node_include_info(
reduced, k, k, yaml_list, defs, structs, None)
if defs == {}:
raise Exception("No information parsed from dts file.")
for k, v in regs_config.items():
if k in chosen:
extract_reg_prop(chosen[k], None, defs, v, 1024, None)
for k, v in name_config.items():
if k in chosen:
extract_string_prop(chosen[k], None, "label", v, defs)
# This should go away via future DTDirective class
if 'zephyr,flash' in chosen:
load_defs = {}
node_addr = chosen['zephyr,flash']
flash_keys = ["label", "write-block-size", "erase-block-size"]
for key in flash_keys:
if key in reduced[node_addr]['props']:
prop = reduced[node_addr]['props'][key]
extract_single(node_addr, None, prop, key, None, defs, "FLASH")
# only compute the load offset if a code partition exists and
# it is not the same as the flash base address
if 'zephyr,code-partition' in chosen and \
reduced[chosen['zephyr,flash']] is not \
reduced[chosen['zephyr,code-partition']]:
part_defs = {}
extract_reg_prop(chosen['zephyr,code-partition'], None,
part_defs, "PARTITION", 1, 'offset')
part_base = lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_OFFSET')
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = part_base
load_defs['CONFIG_FLASH_LOAD_SIZE'] = \
lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_SIZE')
else:
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = 0
load_defs['CONFIG_FLASH_LOAD_SIZE'] = 0
else:
# We will add addr/size of 0 for systems with no flash controller
# This is what they already do in the Kconfig options anyway
defs['dummy-flash'] = {
'CONFIG_FLASH_BASE_ADDRESS': 0,
'CONFIG_FLASH_SIZE': 0
}
if 'zephyr,flash' in chosen:
insert_defs(chosen['zephyr,flash'], defs, load_defs, {})
return defs
def parse_arguments():
rdh = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=rdh)
parser.add_argument("-d", "--dts", nargs=1, required=True, help="DTS file")
parser.add_argument("-y", "--yaml", nargs=1, required=True,
help="YAML file")
parser.add_argument("-f", "--fixup", nargs='+',
help="Fixup file(s), we allow multiple")
parser.add_argument("-i", "--include", nargs=1, required=True,
help="Generate include file for the build system")
parser.add_argument("-k", "--keyvalue", nargs=1, required=True,
help="Generate config file for the build system")
return parser.parse_args()
def main():
args = parse_arguments()
dts = load_and_parse_dts(args.dts[0])
# build up useful lists
get_reduced(dts['/'], '/')
get_phandles(dts['/'], '/', {})
get_aliases(dts['/'])
get_chosen(dts['/'])
yaml_list = load_yaml_descriptions(dts, args.yaml[0])
defs = generate_node_definitions(yaml_list)
# generate config and include file
generate_keyvalue_file(defs, args.keyvalue[0])
generate_include_file(defs, args.include[0], args.fixup)
if __name__ == '__main__':
main()
scripts: extract_dts_includes: remove prefix argument
prefix was given as argument in several functions but not used
as property is never defined.
Signed-off-by: Erwan Gouriou <de4d967452b83bb0e37525652734e7d9163b6a4c@linaro.org>
#!/usr/bin/env python3
#
# Copyright (c) 2017, Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: ai:ts=4:sw=4
import sys
from os import listdir
import os, fnmatch
import re
import yaml
import argparse
import collections
from devicetree import parse_file
from extract.globals import *
class Loader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.realpath(stream.name)
super(Loader, self).__init__(stream)
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', Loader.include)
def include(self, node):
if isinstance(node, yaml.ScalarNode):
return self.extractFile(self.construct_scalar(node))
elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result += self.extractFile(filename)
return result
elif isinstance(node, yaml.MappingNode):
result = {}
for k, v in self.construct_mapping(node).iteritems():
result[k] = self.extractFile(v)
return result
else:
print("Error:: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError
def extractFile(self, filename):
filepath = os.path.join(os.path.dirname(self._root), filename)
if not os.path.isfile(filepath):
# we need to look in bindings/* directories
# take path and back up 1 directory and parse in '/bindings/*'
filepath = os.path.dirname(os.path.dirname(self._root))
for root, dirnames, file in os.walk(filepath):
if fnmatch.filter(file, filename):
filepath = os.path.join(root, filename)
with open(filepath, 'r') as f:
return yaml.load(f, Loader)
def find_parent_irq_node(node_address):
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
if 'interrupt-parent' in reduced[address]['props']:
interrupt_parent = reduced[address]['props'].get(
'interrupt-parent')
return phandles[interrupt_parent]
def extract_interrupts(node_address, yaml, prop, names, defs, def_label):
node = reduced[node_address]
try:
props = list(node['props'].get(prop))
except:
props = [node['props'].get(prop)]
irq_parent = find_parent_irq_node(node_address)
l_base = def_label.split('/')
index = 0
while props:
prop_def = {}
prop_alias = {}
l_idx = [str(index)]
try:
name = [convert_string_to_label(names.pop(0))]
except:
name = []
cell_yaml = yaml[get_compat(irq_parent)]
l_cell_prefix = [yaml[get_compat(irq_parent)].get(
'cell_string', []).upper()]
for i in range(reduced[irq_parent]['props']['#interrupt-cells']):
l_cell_name = [cell_yaml['#cells'][i].upper()]
if l_cell_name == l_cell_prefix:
l_cell_name = []
l_fqn = '_'.join(l_base + l_cell_prefix + l_idx + l_cell_name)
prop_def[l_fqn] = props.pop(0)
if len(name):
alias_list = l_base + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_list = [alias_label] + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
index += 1
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_reg_prop(node_address, names, defs, def_label, div, post_label):
reg = reduced[node_address]['props']['reg']
if type(reg) is not list: reg = [ reg ]
props = list(reg)
address_cells = reduced['/']['props'].get('#address-cells')
size_cells = reduced['/']['props'].get('#size-cells')
address = ''
for comp in node_address.split('/')[1:-1]:
address += '/' + comp
address_cells = reduced[address]['props'].get(
'#address-cells', address_cells)
size_cells = reduced[address]['props'].get('#size-cells', size_cells)
if post_label is None:
post_label = "BASE_ADDRESS"
index = 0
l_base = def_label.split('/')
l_addr = [convert_string_to_label(post_label)]
l_size = ["SIZE"]
while props:
prop_def = {}
prop_alias = {}
addr = 0
size = 0
# Check is defined should be indexed (_0, _1)
if index == 0 and len(props) < 3:
# 1 element (len 2) or no element (len 0) in props
l_idx = []
else:
l_idx = [str(index)]
try:
name = [names.pop(0).upper()]
except:
name = []
for x in range(address_cells):
addr += props.pop(0) << (32 * x)
for x in range(size_cells):
size += props.pop(0) << (32 * x)
l_addr_fqn = '_'.join(l_base + l_addr + l_idx)
l_size_fqn = '_'.join(l_base + l_size + l_idx)
if address_cells:
prop_def[l_addr_fqn] = hex(addr)
if size_cells:
prop_def[l_size_fqn] = int(size / div)
if len(name):
if address_cells:
prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_addr = [alias_label] + l_addr
alias_size = [alias_label] + l_size
prop_alias['_'.join(alias_addr)] = '_'.join(l_base + l_addr)
prop_alias['_'.join(alias_size)] = '_'.join(l_base + l_size)
insert_defs(node_address, defs, prop_def, prop_alias)
# increment index for definition creation
index += 1
def extract_controller(node_address, prop, prop_values, index, defs, def_label):
prop_def = {}
prop_alias = {}
# get controller node (referenced via phandle)
cell_parent = phandles[prop_values[0]]
for k in reduced[cell_parent]['props'].keys():
if k[0] == '#' and '-cells' in k:
num_cells = reduced[cell_parent]['props'].get(k)
# get controller node (referenced via phandle)
cell_parent = phandles[prop_values[0]]
try:
l_cell = reduced[cell_parent]['props'].get('label')
except KeyError:
l_cell = None
if l_cell is not None:
l_base = def_label.split('/')
# Check is defined should be indexed (_0, _1)
if index == 0 and len(prop_values) < (num_cells + 2):
# 0 or 1 element in prop_values
# ( ie len < num_cells + phandle + 1 )
l_idx = []
else:
l_idx = [str(index)]
for k in reduced[cell_parent]['props']:
if 'controller' in k:
l_cellname = convert_string_to_label(str(k))
label = l_base + [l_cellname] + l_idx
prop_def['_'.join(label)] = "\"" + l_cell + "\""
#generate defs also if node is referenced as an alias in dts
if node_address in aliases:
for i in aliases[node_address]:
alias_label = \
convert_string_to_label(i)
alias = [alias_label] + label[1:]
prop_alias['_'.join(alias)] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# prop off phandle + num_cells to get to next list item
prop_values = prop_values[num_cells+1:]
# recurse if we have anything left
if len(prop_values):
extract_controller(node_address, prop, prop_values, index +1, prefix, defs,
def_label)
def extract_cells(node_address, yaml, prop, prop_values, names, index, defs,
def_label):
cell_parent = phandles[prop_values.pop(0)]
try:
cell_yaml = yaml[get_compat(cell_parent)]
except:
raise Exception(
"Could not find yaml description for " +
reduced[cell_parent]['name'])
try:
name = names.pop(0).upper()
except:
name = []
# Get number of cells per element of current property
for k in reduced[cell_parent]['props'].keys():
if k[0] == '#' and '-cells' in k:
num_cells = reduced[cell_parent]['props'].get(k)
# Generate label for each field of the property element
l_cell = [str(cell_yaml.get('cell_string', ''))]
l_base = def_label.split('/')
# Check if #define should be indexed (_0, _1, ...)
if index == 0 and len(prop_values) < (num_cells + 2):
# Less than 2 elements in prop_values (ie len < num_cells + phandle + 1)
# Indexing is not needed
l_idx = []
else:
l_idx = [str(index)]
prop_def = {}
prop_alias = {}
# Generate label for each field of the property element
for i in range(num_cells):
l_cellname = [str(cell_yaml['#cells'][i]).upper()]
if l_cell == l_cellname:
label = l_base + l_cell + l_idx
else:
label = l_base + l_cell + l_cellname + l_idx
label_name = l_base + name + l_cellname
prop_def['_'.join(label)] = prop_values.pop(0)
if len(name):
prop_alias['_'.join(label_name)] = '_'.join(label)
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = [alias_label] + label[1:]
prop_alias['_'.join(alias)] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# recurse if we have anything left
if len(prop_values):
extract_cells(node_address, yaml, prop, prop_values, names,
index + 1, defs, def_label)
def extract_pinctrl(node_address, yaml, pinconf, names, index, defs,
def_label):
prop_list = []
if not isinstance(pinconf, list):
prop_list.append(pinconf)
else:
prop_list = list(pinconf)
def_prefix = def_label.split('_')
prop_def = {}
for p in prop_list:
pin_node_address = phandles[p]
pin_subnode = '/'.join(pin_node_address.split('/')[-1:])
cell_yaml = yaml[get_compat(pin_node_address)]
cell_prefix = cell_yaml.get('cell_string', None)
post_fix = []
if cell_prefix is not None:
post_fix.append(cell_prefix)
for subnode in reduced.keys():
if pin_subnode in subnode and pin_node_address != subnode:
# found a subnode underneath the pinmux handle
pin_label = def_prefix + post_fix + subnode.split('/')[-2:]
for i, cells in enumerate(reduced[subnode]['props']):
key_label = list(pin_label) + \
[cell_yaml['#cells'][0]] + [str(i)]
func_label = key_label[:-2] + \
[cell_yaml['#cells'][1]] + [str(i)]
key_label = convert_string_to_label('_'.join(key_label))
func_label = convert_string_to_label('_'.join(func_label))
prop_def[key_label] = cells
prop_def[func_label] = \
reduced[subnode]['props'][cells]
insert_defs(node_address, defs, prop_def, {})
def extract_single(node_address, yaml, prop, key, defs, def_label):
prop_def = {}
prop_alias = {}
if isinstance(prop, list):
for i, p in enumerate(prop):
k = convert_string_to_label(key)
label = def_label + '_' + k
if isinstance(p, str):
p = "\"" + p + "\""
prop_def[label + '_' + str(i)] = p
else:
k = convert_string_to_label(key)
label = def_label + '_' + k
if prop == 'parent-label':
prop = find_parent_prop(node_address, 'label')
if isinstance(prop, str):
prop = "\"" + prop + "\""
prop_def[label] = prop
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = alias_label + '_' + k
prop_alias[alias] = label
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_string_prop(node_address, yaml, key, label, defs):
prop_def = {}
node = reduced[node_address]
prop = node['props'][key]
k = convert_string_to_label(key)
prop_def[label] = "\"" + prop + "\""
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
def extract_property(node_compat, yaml, node_address, prop, prop_val, names,
defs, label_override):
if 'base_label' in yaml[node_compat]:
def_label = yaml[node_compat].get('base_label')
else:
def_label = get_node_label(node_compat, node_address)
if 'parent' in yaml[node_compat]:
if 'bus' in yaml[node_compat]['parent']:
# get parent label
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
#check parent has matching child bus value
try:
parent_yaml = \
yaml[reduced[parent_address]['props']['compatible']]
parent_bus = parent_yaml['child']['bus']
except (KeyError, TypeError) as e:
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as bus master but " +
str(parent_address) + " not configured as bus master " +
"in yaml description")
if parent_bus != yaml[node_compat]['parent']['bus']:
bus_value = yaml[node_compat]['parent']['bus']
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as " + bus_value +
" bus master but " + str(parent_address) +
" configured as " + str(parent_bus) +
" bus master")
# Generate alias definition if parent has any alias
if parent_address in aliases:
for i in aliases[parent_address]:
node_alias = i + '_' + def_label
aliases[node_address].append(node_alias)
# Use parent label to generate label
parent_label = get_node_label(
find_parent_prop(node_address,'compatible') , parent_address)
def_label = parent_label + '_' + def_label
# Generate bus-name define
extract_single(node_address, yaml, 'parent-label',
'bus-name', defs, def_label)
if label_override is not None:
def_label += '_' + label_override
if prop == 'reg':
extract_reg_prop(node_address, names, defs, def_label,
1, prop_val.get('label', None))
elif prop == 'interrupts' or prop == 'interupts-extended':
extract_interrupts(node_address, yaml, prop, names, defs, def_label)
elif 'pinctrl-' in prop:
p_index = int(prop.split('-')[1])
extract_pinctrl(node_address, yaml,
reduced[node_address]['props'][prop],
names[p_index], p_index, defs, def_label)
elif 'clocks' in prop or 'gpios' in prop:
try:
prop_values = list(reduced[node_address]['props'].get(prop))
except:
prop_values = reduced[node_address]['props'].get(prop)
extract_controller(node_address, prop, prop_values, 0, defs,
def_label)
extract_cells(node_address, yaml, prop, prop_values,
names, 0, defs, def_label)
else:
extract_single(node_address, yaml,
reduced[node_address]['props'][prop], prop,
defs, def_label)
def extract_node_include_info(reduced, root_node_address, sub_node_address,
yaml, defs, structs, y_sub):
node = reduced[sub_node_address]
node_compat = get_compat(root_node_address)
label_override = None
if node_compat not in yaml.keys():
return {}, {}
if y_sub is None:
y_node = yaml[node_compat]
else:
y_node = y_sub
if yaml[node_compat].get('use-property-label', False):
try:
label = y_node['properties']['label']
label_override = convert_string_to_label(node['props']['label'])
except KeyError:
pass
# check to see if we need to process the properties
for k, v in y_node['properties'].items():
if 'properties' in v:
for c in reduced:
if root_node_address + '/' in c:
extract_node_include_info(
reduced, root_node_address, c, yaml, defs, structs,
v)
if 'generation' in v:
for c in node['props'].keys():
if c.endswith("-names"):
pass
if re.match(k + '$', c):
if 'pinctrl-' in c:
names = node['props'].get('pinctrl-names', [])
else:
names = node['props'].get(c[:-1] + '-names', [])
if not names:
names = node['props'].get(c + '-names', [])
if not isinstance(names, list):
names = [names]
extract_property(
node_compat, yaml, sub_node_address, c, v, names,
defs, label_override)
def dict_merge(dct, merge_dct):
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def yaml_traverse_inherited(node):
""" Recursive overload procedure inside ``node``
``inherits`` section is searched for and used as node base when found.
Base values are then overloaded by node values
Additionally, 'id' key of 'inherited' dict is converted to 'node_type'
:param node:
:return: node
"""
if 'inherits' in node.keys():
if 'id' in node['inherits'].keys():
node['inherits']['node_type'] = node['inherits']['id']
node['inherits'].pop('id')
if 'inherits' in node['inherits'].keys():
node['inherits'] = yaml_traverse_inherited(node['inherits'])
dict_merge(node['inherits'], node)
node = node['inherits']
node.pop('inherits')
return node
def yaml_collapse(yaml_list):
collapsed = dict(yaml_list)
for k, v in collapsed.items():
v = yaml_traverse_inherited(v)
collapsed[k]=v
return collapsed
def get_key_value(k, v, tabstop):
label = "#define " + k
# calculate the name's tabs
if len(label) % 8:
tabs = (len(label) + 7) >> 3
else:
tabs = (len(label) >> 3) + 1
line = label
for i in range(0, tabstop - tabs + 1):
line += '\t'
line += str(v)
line += '\n'
return line
def output_keyvalue_lines(fd, defs):
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('# ' + node.split('/')[-1])
fd.write("\n")
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write("%s=%s\n" % (entry, defs[node].get(a)))
else:
fd.write("%s=%s\n" % (prop, defs[node].get(prop)))
fd.write("\n")
def generate_keyvalue_file(defs, kv_file):
with open(kv_file, "w") as fd:
output_keyvalue_lines(fd, defs)
def output_include_lines(fd, defs, fixups):
compatible = reduced['/']['props']['compatible'][0]
fd.write("/**************************************************\n")
fd.write(" * Generated include file for " + compatible)
fd.write("\n")
fd.write(" * DO NOT MODIFY\n")
fd.write(" */\n")
fd.write("\n")
fd.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n")
fd.write("#define _DEVICE_TREE_BOARD_H" + "\n")
fd.write("\n")
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('/* ' + node.split('/')[-1] + ' */')
fd.write("\n")
max_dict_key = lambda d: max(len(k) for k in d.keys())
maxlength = 0
if defs[node].get('aliases'):
maxlength = max_dict_key(defs[node]['aliases'])
maxlength = max(maxlength, max_dict_key(defs[node])) + len('#define ')
if maxlength % 8:
maxtabstop = (maxlength + 7) >> 3
else:
maxtabstop = (maxlength >> 3) + 1
if (maxtabstop * 8 - maxlength) <= 2:
maxtabstop += 1
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write(get_key_value(entry, a, maxtabstop))
else:
fd.write(get_key_value(prop, defs[node].get(prop), maxtabstop))
fd.write("\n")
if fixups:
for fixup in fixups:
if os.path.exists(fixup):
fd.write("\n")
fd.write(
"/* Following definitions fixup the generated include */\n")
try:
with open(fixup, "r") as fixup_fd:
for line in fixup_fd.readlines():
fd.write(line)
fd.write("\n")
except:
raise Exception(
"Input file " + os.path.abspath(fixup) +
" does not exist.")
fd.write("#endif\n")
def generate_include_file(defs, inc_file, fixups):
with open(inc_file, "w") as fd:
output_include_lines(fd, defs, fixups)
def load_and_parse_dts(dts_file):
with open(dts_file, "r") as fd:
dts = parse_file(fd)
return dts
def load_yaml_descriptions(dts, yaml_dir):
compatibles = get_all_compatibles(dts['/'], '/', {})
# find unique set of compatibles across all active nodes
s = set()
for k, v in compatibles.items():
if isinstance(v, list):
for item in v:
s.add(item)
else:
s.add(v)
# scan YAML files and find the ones we are interested in
yaml_files = []
for root, dirnames, filenames in os.walk(yaml_dir):
for filename in fnmatch.filter(filenames, '*.yaml'):
yaml_files.append(os.path.join(root, filename))
yaml_list = {}
file_load_list = set()
for file in yaml_files:
for line in open(file, 'r'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if file not in file_load_list:
file_load_list.add(file)
with open(file, 'r') as yf:
yaml_list[c] = yaml.load(yf, Loader)
if yaml_list == {}:
raise Exception("Missing YAML information. Check YAML sources")
# collapse the yaml inherited information
yaml_list = yaml_collapse(yaml_list)
return yaml_list
def lookup_defs(defs, node, key):
if node not in defs:
return None
if key in defs[node]['aliases']:
key = defs[node]['aliases'][key]
return defs[node].get(key, None)
def generate_node_definitions(yaml_list):
defs = {}
structs = {}
for k, v in reduced.items():
node_compat = get_compat(k)
if node_compat is not None and node_compat in yaml_list:
extract_node_include_info(
reduced, k, k, yaml_list, defs, structs, None)
if defs == {}:
raise Exception("No information parsed from dts file.")
for k, v in regs_config.items():
if k in chosen:
extract_reg_prop(chosen[k], None, defs, v, 1024, None)
for k, v in name_config.items():
if k in chosen:
extract_string_prop(chosen[k], None, "label", v, defs)
# This should go away via future DTDirective class
if 'zephyr,flash' in chosen:
load_defs = {}
node_addr = chosen['zephyr,flash']
flash_keys = ["label", "write-block-size", "erase-block-size"]
for key in flash_keys:
if key in reduced[node_addr]['props']:
prop = reduced[node_addr]['props'][key]
extract_single(node_addr, None, prop, key, defs, "FLASH")
# only compute the load offset if a code partition exists and
# it is not the same as the flash base address
if 'zephyr,code-partition' in chosen and \
reduced[chosen['zephyr,flash']] is not \
reduced[chosen['zephyr,code-partition']]:
part_defs = {}
extract_reg_prop(chosen['zephyr,code-partition'], None,
part_defs, "PARTITION", 1, 'offset')
part_base = lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_OFFSET')
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = part_base
load_defs['CONFIG_FLASH_LOAD_SIZE'] = \
lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_SIZE')
else:
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = 0
load_defs['CONFIG_FLASH_LOAD_SIZE'] = 0
else:
# We will add addr/size of 0 for systems with no flash controller
# This is what they already do in the Kconfig options anyway
defs['dummy-flash'] = {
'CONFIG_FLASH_BASE_ADDRESS': 0,
'CONFIG_FLASH_SIZE': 0
}
if 'zephyr,flash' in chosen:
insert_defs(chosen['zephyr,flash'], defs, load_defs, {})
return defs
def parse_arguments():
rdh = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=rdh)
parser.add_argument("-d", "--dts", nargs=1, required=True, help="DTS file")
parser.add_argument("-y", "--yaml", nargs=1, required=True,
help="YAML file")
parser.add_argument("-f", "--fixup", nargs='+',
help="Fixup file(s), we allow multiple")
parser.add_argument("-i", "--include", nargs=1, required=True,
help="Generate include file for the build system")
parser.add_argument("-k", "--keyvalue", nargs=1, required=True,
help="Generate config file for the build system")
return parser.parse_args()
def main():
args = parse_arguments()
dts = load_and_parse_dts(args.dts[0])
# build up useful lists
get_reduced(dts['/'], '/')
get_phandles(dts['/'], '/', {})
get_aliases(dts['/'])
get_chosen(dts['/'])
yaml_list = load_yaml_descriptions(dts, args.yaml[0])
defs = generate_node_definitions(yaml_list)
# generate config and include file
generate_keyvalue_file(defs, args.keyvalue[0])
generate_include_file(defs, args.include[0], args.fixup)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Processing for baseline segmenter output
"""
import PIL
import logging
import warnings
import numpy as np
import shapely.geometry as geom
from PIL import Image, ImageDraw
from numpy.polynomial import Polynomial
from scipy.ndimage import black_tophat
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import grey_dilation
from scipy.spatial import distance_matrix, Delaunay
from scipy.spatial.distance import cdist, pdist, squareform
from shapely.ops import nearest_points, unary_union
from skimage import draw
from skimage.filters import apply_hysteresis_threshold
from skimage.measure import approximate_polygon, find_contours
from skimage.morphology import skeletonize, watershed
from skimage.transform import PiecewiseAffineTransform, warp
from typing import List, Tuple, Union, Dict, Any, Sequence
logger = logging.getLogger('kraken')
def reading_order(lines: Sequence, text_direction: str = 'lr') -> List:
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
logger.info('Compute reading order on {} lines in {} direction'.format(len(lines), text_direction))
order = np.zeros((len(lines), len(lines)), 'B')
def _x_overlaps(u, v):
return u[1].start < v[1].stop and u[1].stop > v[1].start
def _above(u, v):
return u[0].start < v[0].start
def _left_of(u, v):
return u[1].stop < v[1].start
def _separates(w, u, v):
if w[0].stop < min(u[0].start, v[0].start):
return 0
if w[0].start > max(u[0].stop, v[0].stop):
return 0
if w[1].start < u[1].stop and w[1].stop > v[1].start:
return 1
return 0
if text_direction == 'rl':
def horizontal_order(u, v):
return not _left_of(u, v)
else:
horizontal_order = _left_of
for i, u in enumerate(lines):
for j, v in enumerate(lines):
if _x_overlaps(u, v):
if _above(u, v):
order[i, j] = 1
else:
if [w for w in lines if _separates(w, u, v)] == []:
if horizontal_order(u, v):
order[i, j] = 1
return order
def topsort(order: np.array) -> np.array:
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
logger.info('Perform topological sort on partially ordered lines')
n = len(order)
visited = np.zeros(n)
L = []
def _visit(k):
if visited[k]:
return
visited[k] = 1
a, = np.nonzero(np.ravel(order[:, k]))
for l in a:
_visit(l)
L.append(k)
for k in range(n):
_visit(k)
return L
def denoising_hysteresis_thresh(im, low, high, sigma):
im = gaussian_filter(im, sigma)
return apply_hysteresis_threshold(im, low, high)
def _find_superpixels(skeleton, heatmap, min_sp_dist):
logger.debug('Finding superpixels')
conf_map = heatmap * skeleton
sp_idx = np.unravel_index(np.argsort(1.-conf_map, axis=None), conf_map.shape)
if not sp_idx[0].any():
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
zeroes_idx = conf_map[sp_idx].argmin()
if not zeroes_idx:
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
sp_idx = sp_idx[0][:zeroes_idx], sp_idx[1][:zeroes_idx]
sp_can = [(sp_idx[0][0], sp_idx[1][0])]
for x in range(len(sp_idx[0])):
loc = np.array([[sp_idx[0][x], sp_idx[1][x]]])
if min(cdist(sp_can, loc)) > min_sp_dist:
sp_can.extend(loc.tolist())
return np.array(sp_can)
def _compute_sp_states(sp_can, bl_map, sep_map):
"""
Estimates the superpixel state information.
"""
logger.debug('Triangulating superpixels')
tri = Delaunay(sp_can, qhull_options="QJ Pp")
indices, indptr = tri.vertex_neighbor_vertices
# dict mapping each edge to its intensity. Needed for subsequent clustering step.
intensities = {}
# radius of circular environment around SP for ILD estimation
logger.debug('Computing superpixel state information')
for vertex in range(len(sp_can)):
# look up neighboring indices
neighbors = tri.points[indptr[indices[vertex]:indices[vertex+1]]]
# calculate intensity of line segments to neighbors in both bl map and separator map
intensity = []
for nb in neighbors.astype('int'):
key = [tuple(sp_can[vertex]), tuple(nb)]
key.sort()
key = tuple(key)
line_locs = draw.line(*(key[0] + key[1]))
intensities[key] = (bl_map[line_locs].mean(), bl_map[line_locs].var(), sep_map[line_locs].mean(), sep_map[line_locs].max())
intensity.append(intensities[key][0])
logger.debug('Filtering triangulation')
# filter edges in triangulation
for k, v in list(intensities.items()):
if v[0] < 0.4:
del intensities[k]
continue
if v[1] > 5e-02:
del intensities[k]
continue
# filter edges with high separator affinity
if v[2] > 0.125 or v[3] > 0.25 or v[0] < 0.5:
del intensities[k]
continue
return intensities
def _cluster_lines(intensities):
"""
Clusters lines according to their intensities.
"""
edge_list = list(intensities.keys())
def _point_in_cluster(p):
for idx, cluster in enumerate(clusters[1:]):
if p in [point for edge in cluster for point in edge]:
return idx+1
return 0
# cluster
logger.debug('Computing clusters')
n = 0
clusters = [edge_list]
while len(edge_list) != n:
n = len(edge_list)
for edge in edge_list:
cl_p0 = _point_in_cluster(edge[0])
cl_p1 = _point_in_cluster(edge[1])
# new cluster casea
if not cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters.append([edge])
# extend case
elif cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters[cl_p0].append(edge)
elif cl_p1 and not cl_p0:
edge_list.remove(edge)
clusters[cl_p1].append(edge)
# merge case
elif cl_p0 != cl_p1 and cl_p0 and cl_p1:
edge_list.remove(edge)
clusters[min(cl_p0, cl_p1)].extend(clusters.pop(max(cl_p0, cl_p1)))
clusters[min(cl_p0, cl_p1)].append(edge)
return clusters
def _interpolate_lines(clusters):
"""
Interpolates the baseline clusters and adds polygonal information.
"""
logger.debug('Reticulating splines')
lines = []
for cluster in clusters[1:]:
points = sorted(set(point for edge in cluster for point in edge), key=lambda x: x[1])
x = [x[1] for x in points]
y = [x[0] for x in points]
# very short lines might not have enough superpixels to ensure a well-conditioned regression
deg = min(len(x)-1, 3)
poly = Polynomial.fit(x, y, deg=deg)
xp, yp = poly.linspace(max(np.diff(poly.domain)//deg, 2))
xp = xp.astype('int')
yp = yp.astype('int')
lines.append(list(zip(xp, yp)))
return lines
def vectorize_lines(im: np.ndarray, threshold: float = 0.2, min_sp_dist: int = 10):
"""
Vectorizes lines from a binarized array.
Args:
im (np.ndarray): Array of shape (3, H, W) with the first dimension
being a probability distribution over (background,
baseline, separators).
Returns:
[[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
A list of lists containing the points of all baseline polylines.
"""
# split into baseline and separator map
bl_map = im[1]
sep_map = im[2]
# binarize
bin = im > threshold
skel = skeletonize(bin[1])
sp_can = _find_superpixels(skel, heatmap=bl_map, min_sp_dist=min_sp_dist)
if not sp_can.size:
logger.warning('No superpixel candidates found in network output. Likely empty page.')
return []
intensities = _compute_sp_states(sp_can, bl_map, sep_map)
clusters = _cluster_lines(intensities)
lines = _interpolate_lines(clusters)
return lines
def calculate_polygonal_environment(im: PIL.Image.Image, baselines: Sequence[Tuple[int, int]]):
"""
Given a list of baselines and an input image, calculates a polygonal
environment around each baseline.
Args:
im (PIL.Image): Input image
baselines (sequence): List of lists containing a single baseline per
entry.
bl_mask (numpy.array): Optional raw baselines output maps from the
recognition net.
Returns:
List of tuples (polygonization, baseline) where each is a list of coordinates.
"""
bounds = np.array(im.size, dtype=np.float)
im = np.array(im)
# compute tophat features of input image
im_feats = black_tophat(im, 3)
def _ray_intersect_boundaries(ray, direction, aabb):
"""
Simplified version of [0] for 2d and AABB anchored at (0,0).
[0] http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
dir_fraction = np.empty(2, dtype=ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (-ray[0]) * dir_fraction[0]
t2 = (aabb[0] - ray[0]) * dir_fraction[0]
t3 = (-ray[1]) * dir_fraction[1]
t4 = (aabb[1] - ray[1]) * dir_fraction[1]
tmin = max(min(t1, t2), min(t3, t4))
tmax = min(max(t1, t2), max(t3, t4))
t = min(x for x in [tmin, tmax] if x >= 0)
return ray + (direction * t)
def _extract_patch(env_up, env_bottom, baseline):
"""
Calculate a line image batch from a ROI and the original baseline
"""
markers = np.zeros(bounds.astype('int')[::-1], dtype=np.int)
for l in zip(baseline[:-1], baseline[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 2
for l in zip(env_up[:-1], env_up[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
for l in zip(env_bottom[:-1], env_bottom[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
markers = grey_dilation(markers, size=3)
full_polygon = np.concatenate((env_up, env_bottom[::-1]))
r, c = draw.polygon(full_polygon[:,0], full_polygon[:,1])
mask = np.zeros(bounds.astype('int')[::-1], dtype=np.bool)
mask[c, r] = True
patch = im_feats.copy()
patch[mask != True] = 0
coords = np.argwhere(mask)
r_min, c_min = coords.min(axis=0)
r_max, c_max = coords.max(axis=0)
patch = patch[r_min:r_max+1, c_min:c_max+1]
markers = markers[r_min:r_max+1, c_min:c_max+1]
mask = mask[r_min:r_max+1, c_min:c_max+1]
# run watershed
ws = watershed(patch, markers, 8, mask=mask)
ws = grey_dilation(ws, size=3)
# pad output to ensure contour is closed
ws = np.pad(ws, 1)
# find contour of central basin
contours = find_contours(ws, 1.5, fully_connected='high')
contour = np.array(unary_union([geom.Polygon(contour.tolist()) for contour in contours]).boundary, dtype='uint')
## approximate + remove offsets + transpose
contour = np.transpose((approximate_polygon(contour, 5)-1+(r_min, c_min)), (0, 1)).astype('uint')
return contour.tolist()
polygons = []
for idx, line in enumerate(baselines):
# find intercepts with image bounds on each side of baseline
lr = np.array(line[:2], dtype=np.float)
lr_dir = lr[1] - lr[0]
lr_dir = (lr_dir.T / np.sqrt(np.sum(lr_dir**2,axis=-1)))
lr_up_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(-1,1))[::-1], bounds-1).astype('int')
lr_bottom_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(1,-1))[::-1], bounds-1).astype('int')
rr = np.array(line[-2:], dtype=np.float)
rr_dir = rr[1] - rr[0]
rr_dir = (rr_dir.T / np.sqrt(np.sum(rr_dir**2,axis=-1)))
rr_up_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(-1,1))[::-1], bounds-1).astype('int')
rr_bottom_intersect = _ray_intersect_boundaries(rr[0], (rr_dir*(1,-1))[::-1], bounds-1).astype('int')
# build polygon between baseline and bbox intersects
upper_polygon = geom.Polygon([lr_up_intersect.tolist()] + line + [rr_up_intersect.tolist()])
bottom_polygon = geom.Polygon([lr_bottom_intersect.tolist()] + line + [rr_bottom_intersect.tolist()])
# select baselines at least partially in each polygon
side_a = [geom.LineString([lr_up_intersect.tolist(), rr_up_intersect.tolist()])]
side_b = [geom.LineString([lr_bottom_intersect.tolist(), rr_bottom_intersect.tolist()])]
for adj_line in baselines[:idx] + baselines[idx+1:]:
adj_line = geom.LineString(adj_line)
if upper_polygon.intersects(adj_line):
side_a.append(adj_line)
elif bottom_polygon.intersects(adj_line):
side_b.append(adj_line)
side_a = unary_union(side_a)
side_b = unary_union(side_b)
env_up = []
env_bottom = []
# find nearest points from baseline to previously selected baselines
for point in line:
_, upper_limit = nearest_points(geom.Point(point), side_a)
_, bottom_limit = nearest_points(geom.Point(point), side_b)
env_up.extend(list(upper_limit.coords))
env_bottom.extend(list(bottom_limit.coords))
env_up = np.array(env_up, dtype='uint')
env_bottom = np.array(env_bottom, dtype='uint')
polygons.append(_extract_patch(env_up, env_bottom, line))
return polygons
def polygonal_reading_order(lines: Sequence[Tuple[List, List]], text_direction: str = 'lr') -> Sequence[Tuple[List, List]]:
"""
Given a list of baselines, calculates the correct reading order and applies
it to the input.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
text_direction (str): Set principal text direction for column ordering.
Can be 'lr' or 'rl'
Returns:
A reordered input.
"""
bounds = []
for line in lines:
l = geom.LineString(line[0]).bounds
bounds.append((slice(l[0], l[1]), slice(l[2], l[3])))
order = reading_order(bounds, text_direction)
lsort = topsort(order)
return [lines[i] for i in lsort]
def scale_polygonal_lines(lines: Sequence[Tuple[List, List]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]:
"""
Scales baselines/polygon coordinates by a certain factor.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
scale (float or tuple of floats): Scaling factor
"""
if isinstance(scale, float):
scale = (scale, scale)
scaled_lines = []
for line in lines:
bl, pl = line
scaled_lines.append(((np.array(bl) * scale).astype('int').tolist(),
(np.array(pl) * scale).astype('int').tolist()))
return scaled_lines
def _test_intersect(bp, uv, bs):
"""
Returns the intersection points of a ray with direction `uv` from
`bp` with a polygon `bs`.
"""
u = bp - np.roll(bs, 2)
v = bs - np.roll(bs, 2)
points = []
for dir in ((1,-1), (-1,1)):
w = (uv * dir * (1,-1))[::-1]
z = np.dot(v, w)
t1 = np.cross(v, u) / z
t2 = np.dot(u, w) / z
t1 = t1[np.logical_and(t2 >= 0.0, t2 <= 1.0)]
points.extend(bp + (t1[np.where(t1 >= 0)[0].min()] * (uv * dir)))
return np.array(points)
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
"""
Yields the subimages of image im defined in the list of bounding polygons
with baselines preserving order.
Args:
im (PIL.Image.Image): Input image
bounds (list): A list of tuples (x1, y1, x2, y2)
Yields:
(PIL.Image) the extracted subimage
"""
if 'type' in bounds and bounds['type'] == 'baselines':
old_settings = np.seterr(all='ignore')
siz = im.size
white = Image.new(im.mode, siz)
for line in bounds['lines']:
mask = Image.new('1', siz, 0)
draw = ImageDraw.Draw(mask)
draw.polygon([tuple(x) for x in line['boundary']], outline=1, fill=1)
masked_line = Image.composite(im, white, mask)
bl = np.array(line['baseline'])
ls = np.dstack((bl[:-1:], bl[1::]))
bisect_points = np.mean(ls, 2)
norm_vec = (ls[...,1] - ls[...,0])[:,::-1]
norm_vec_len = np.sqrt(np.sum(norm_vec**2, axis=1))
unit_vec = norm_vec / np.tile(norm_vec_len, (2, 1)).T # without
# multiplication
# with (1,-1)-upper/
# (-1, 1)-lower
bounds = np.array(line['boundary'])
src_points = np.stack([_test_intersect(bp, uv, bounds) for bp, uv in zip(bisect_points, unit_vec)])
upper_dist = np.diag(distance_matrix(src_points[:,:2], bisect_points))
upper_dist = np.dstack((np.zeros_like(upper_dist), upper_dist)).squeeze(0)
lower_dist = np.diag(distance_matrix(src_points[:,2:], bisect_points))
lower_dist = np.dstack((np.zeros_like(lower_dist), lower_dist)).squeeze(0)
# map baseline points to straight baseline
bl_dists = np.cumsum(np.diag(np.roll(squareform(pdist(bl)), 1)))
bl_dst_pts = bl[0] + np.dstack((bl_dists, np.zeros_like(bl_dists))).squeeze(0)
rect_bisect_pts = np.mean(np.dstack((bl_dst_pts[:-1:], bl_dst_pts[1::])), 2)
upper_dst_pts = rect_bisect_pts - upper_dist
lower_dst_pts = rect_bisect_pts + lower_dist
src_points = np.concatenate((bl, src_points[:,:2], src_points[:,2:]))
dst_points = np.concatenate((bl_dst_pts, upper_dst_pts, lower_dst_pts))
tform = PiecewiseAffineTransform()
tform.estimate(src_points, dst_points)
i = Image.fromarray((warp(masked_line, tform) * 255).astype('uint8'))
yield i.crop(i.getbbox()), line
else:
if bounds['text_direction'].startswith('vertical'):
angle = 90
else:
angle = 0
for box in bounds['boxes']:
if isinstance(box, tuple):
box = list(box)
if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
box[1::2] > [im.size[1], im.size[1]]):
logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
raise KrakenInputException('Line outside of image bounds')
yield im.crop(box).rotate(angle, expand=True), box
selected wrong indices in polygonization
# -*- coding: utf-8 -*-
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Processing for baseline segmenter output
"""
import PIL
import logging
import warnings
import numpy as np
import shapely.geometry as geom
from PIL import Image, ImageDraw
from numpy.polynomial import Polynomial
from scipy.ndimage import black_tophat
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import grey_dilation
from scipy.spatial import distance_matrix, Delaunay
from scipy.spatial.distance import cdist, pdist, squareform
from shapely.ops import nearest_points, unary_union
from skimage import draw
from skimage.filters import apply_hysteresis_threshold
from skimage.measure import approximate_polygon, find_contours
from skimage.morphology import skeletonize, watershed
from skimage.transform import PiecewiseAffineTransform, warp
from typing import List, Tuple, Union, Dict, Any, Sequence
logger = logging.getLogger('kraken')
def reading_order(lines: Sequence, text_direction: str = 'lr') -> List:
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
logger.info('Compute reading order on {} lines in {} direction'.format(len(lines), text_direction))
order = np.zeros((len(lines), len(lines)), 'B')
def _x_overlaps(u, v):
return u[1].start < v[1].stop and u[1].stop > v[1].start
def _above(u, v):
return u[0].start < v[0].start
def _left_of(u, v):
return u[1].stop < v[1].start
def _separates(w, u, v):
if w[0].stop < min(u[0].start, v[0].start):
return 0
if w[0].start > max(u[0].stop, v[0].stop):
return 0
if w[1].start < u[1].stop and w[1].stop > v[1].start:
return 1
return 0
if text_direction == 'rl':
def horizontal_order(u, v):
return not _left_of(u, v)
else:
horizontal_order = _left_of
for i, u in enumerate(lines):
for j, v in enumerate(lines):
if _x_overlaps(u, v):
if _above(u, v):
order[i, j] = 1
else:
if [w for w in lines if _separates(w, u, v)] == []:
if horizontal_order(u, v):
order[i, j] = 1
return order
def topsort(order: np.array) -> np.array:
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
logger.info('Perform topological sort on partially ordered lines')
n = len(order)
visited = np.zeros(n)
L = []
def _visit(k):
if visited[k]:
return
visited[k] = 1
a, = np.nonzero(np.ravel(order[:, k]))
for l in a:
_visit(l)
L.append(k)
for k in range(n):
_visit(k)
return L
def denoising_hysteresis_thresh(im, low, high, sigma):
im = gaussian_filter(im, sigma)
return apply_hysteresis_threshold(im, low, high)
def _find_superpixels(skeleton, heatmap, min_sp_dist):
logger.debug('Finding superpixels')
conf_map = heatmap * skeleton
sp_idx = np.unravel_index(np.argsort(1.-conf_map, axis=None), conf_map.shape)
if not sp_idx[0].any():
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
zeroes_idx = conf_map[sp_idx].argmin()
if not zeroes_idx:
logger.info('No superpixel candidates found for line vectorizer. Likely empty page.')
return np.empty(0)
sp_idx = sp_idx[0][:zeroes_idx], sp_idx[1][:zeroes_idx]
sp_can = [(sp_idx[0][0], sp_idx[1][0])]
for x in range(len(sp_idx[0])):
loc = np.array([[sp_idx[0][x], sp_idx[1][x]]])
if min(cdist(sp_can, loc)) > min_sp_dist:
sp_can.extend(loc.tolist())
return np.array(sp_can)
def _compute_sp_states(sp_can, bl_map, sep_map):
"""
Estimates the superpixel state information.
"""
logger.debug('Triangulating superpixels')
tri = Delaunay(sp_can, qhull_options="QJ Pp")
indices, indptr = tri.vertex_neighbor_vertices
# dict mapping each edge to its intensity. Needed for subsequent clustering step.
intensities = {}
# radius of circular environment around SP for ILD estimation
logger.debug('Computing superpixel state information')
for vertex in range(len(sp_can)):
# look up neighboring indices
neighbors = tri.points[indptr[indices[vertex]:indices[vertex+1]]]
# calculate intensity of line segments to neighbors in both bl map and separator map
intensity = []
for nb in neighbors.astype('int'):
key = [tuple(sp_can[vertex]), tuple(nb)]
key.sort()
key = tuple(key)
line_locs = draw.line(*(key[0] + key[1]))
intensities[key] = (bl_map[line_locs].mean(), bl_map[line_locs].var(), sep_map[line_locs].mean(), sep_map[line_locs].max())
intensity.append(intensities[key][0])
logger.debug('Filtering triangulation')
# filter edges in triangulation
for k, v in list(intensities.items()):
if v[0] < 0.4:
del intensities[k]
continue
if v[1] > 5e-02:
del intensities[k]
continue
# filter edges with high separator affinity
if v[2] > 0.125 or v[3] > 0.25 or v[0] < 0.5:
del intensities[k]
continue
return intensities
def _cluster_lines(intensities):
"""
Clusters lines according to their intensities.
"""
edge_list = list(intensities.keys())
def _point_in_cluster(p):
for idx, cluster in enumerate(clusters[1:]):
if p in [point for edge in cluster for point in edge]:
return idx+1
return 0
# cluster
logger.debug('Computing clusters')
n = 0
clusters = [edge_list]
while len(edge_list) != n:
n = len(edge_list)
for edge in edge_list:
cl_p0 = _point_in_cluster(edge[0])
cl_p1 = _point_in_cluster(edge[1])
# new cluster casea
if not cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters.append([edge])
# extend case
elif cl_p0 and not cl_p1:
edge_list.remove(edge)
clusters[cl_p0].append(edge)
elif cl_p1 and not cl_p0:
edge_list.remove(edge)
clusters[cl_p1].append(edge)
# merge case
elif cl_p0 != cl_p1 and cl_p0 and cl_p1:
edge_list.remove(edge)
clusters[min(cl_p0, cl_p1)].extend(clusters.pop(max(cl_p0, cl_p1)))
clusters[min(cl_p0, cl_p1)].append(edge)
return clusters
def _interpolate_lines(clusters):
"""
Interpolates the baseline clusters and adds polygonal information.
"""
logger.debug('Reticulating splines')
lines = []
for cluster in clusters[1:]:
points = sorted(set(point for edge in cluster for point in edge), key=lambda x: x[1])
x = [x[1] for x in points]
y = [x[0] for x in points]
# very short lines might not have enough superpixels to ensure a well-conditioned regression
deg = min(len(x)-1, 3)
poly = Polynomial.fit(x, y, deg=deg)
xp, yp = poly.linspace(max(np.diff(poly.domain)//deg, 2))
xp = xp.astype('int')
yp = yp.astype('int')
lines.append(list(zip(xp, yp)))
return lines
def vectorize_lines(im: np.ndarray, threshold: float = 0.2, min_sp_dist: int = 10):
"""
Vectorizes lines from a binarized array.
Args:
im (np.ndarray): Array of shape (3, H, W) with the first dimension
being a probability distribution over (background,
baseline, separators).
Returns:
[[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
A list of lists containing the points of all baseline polylines.
"""
# split into baseline and separator map
bl_map = im[1]
sep_map = im[2]
# binarize
bin = im > threshold
skel = skeletonize(bin[1])
sp_can = _find_superpixels(skel, heatmap=bl_map, min_sp_dist=min_sp_dist)
if not sp_can.size:
logger.warning('No superpixel candidates found in network output. Likely empty page.')
return []
intensities = _compute_sp_states(sp_can, bl_map, sep_map)
clusters = _cluster_lines(intensities)
lines = _interpolate_lines(clusters)
return lines
def calculate_polygonal_environment(im: PIL.Image.Image, baselines: Sequence[Tuple[int, int]]):
"""
Given a list of baselines and an input image, calculates a polygonal
environment around each baseline.
Args:
im (PIL.Image): grayscale input image (mode 'L')
baselines (sequence): List of lists containing a single baseline per
entry.
bl_mask (numpy.array): Optional raw baselines output maps from the
recognition net.
Returns:
List of tuples (polygonization, baseline) where each is a list of coordinates.
"""
bounds = np.array(im.size, dtype=np.float)
im = np.array(im)
# compute tophat features of input image
im_feats = black_tophat(im, 3)
def _ray_intersect_boundaries(ray, direction, aabb):
"""
Simplified version of [0] for 2d and AABB anchored at (0,0).
[0] http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
dir_fraction = np.empty(2, dtype=ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (-ray[0]) * dir_fraction[0]
t2 = (aabb[0] - ray[0]) * dir_fraction[0]
t3 = (-ray[1]) * dir_fraction[1]
t4 = (aabb[1] - ray[1]) * dir_fraction[1]
tmin = max(min(t1, t2), min(t3, t4))
tmax = min(max(t1, t2), max(t3, t4))
t = min(x for x in [tmin, tmax] if x >= 0)
return ray + (direction * t)
def _extract_patch(env_up, env_bottom, baseline):
"""
Calculate a line image batch from a ROI and the original baseline
"""
markers = np.zeros(bounds.astype('int')[::-1], dtype=np.int)
for l in zip(baseline[:-1], baseline[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 2
for l in zip(env_up[:-1], env_up[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
for l in zip(env_bottom[:-1], env_bottom[1:]):
line_locs = draw.line(l[0][1], l[0][0], l[1][1], l[1][0])
markers[line_locs] = 1
markers = grey_dilation(markers, size=3)
full_polygon = np.concatenate((env_up, env_bottom[::-1]))
r, c = draw.polygon(full_polygon[:,0], full_polygon[:,1])
mask = np.zeros(bounds.astype('int')[::-1], dtype=np.bool)
mask[c, r] = True
patch = im_feats.copy()
patch[mask != True] = 0
coords = np.argwhere(mask)
r_min, c_min = coords.min(axis=0)
r_max, c_max = coords.max(axis=0)
patch = patch[r_min:r_max+1, c_min:c_max+1]
markers = markers[r_min:r_max+1, c_min:c_max+1]
mask = mask[r_min:r_max+1, c_min:c_max+1]
# run watershed
ws = watershed(patch, markers, 8, mask=mask)
ws = grey_dilation(ws, size=3)
# pad output to ensure contour is closed
ws = np.pad(ws, 1)
# find contour of central basin
contours = find_contours(ws, 1.5, fully_connected='high')
contour = np.array(unary_union([geom.Polygon(contour.tolist()) for contour in contours]).boundary, dtype='uint')
## approximate + remove offsets + transpose
contour = np.transpose((approximate_polygon(contour, 5)-1+(r_min, c_min)), (0, 1)).astype('uint')
return contour.tolist()
polygons = []
for idx, line in enumerate(baselines):
# find intercepts with image bounds on each side of baseline
lr = np.array(line[:2], dtype=np.float)
lr_dir = lr[1] - lr[0]
lr_dir = (lr_dir.T / np.sqrt(np.sum(lr_dir**2,axis=-1)))
lr_up_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(-1,1))[::-1], bounds-1).astype('int')
lr_bottom_intersect = _ray_intersect_boundaries(lr[0], (lr_dir*(1,-1))[::-1], bounds-1).astype('int')
rr = np.array(line[-2:], dtype=np.float)
rr_dir = rr[1] - rr[0]
rr_dir = (rr_dir.T / np.sqrt(np.sum(rr_dir**2,axis=-1)))
rr_up_intersect = _ray_intersect_boundaries(rr[1], (rr_dir*(-1,1))[::-1], bounds-1).astype('int')
rr_bottom_intersect = _ray_intersect_boundaries(rr[1], (rr_dir*(1,-1))[::-1], bounds-1).astype('int')
# build polygon between baseline and bbox intersects
upper_polygon = geom.Polygon([lr_up_intersect.tolist()] + line + [rr_up_intersect.tolist()])
bottom_polygon = geom.Polygon([lr_bottom_intersect.tolist()] + line + [rr_bottom_intersect.tolist()])
# select baselines at least partially in each polygon
side_a = [geom.LineString([lr_up_intersect.tolist(), rr_up_intersect.tolist()])]
side_b = [geom.LineString([lr_bottom_intersect.tolist(), rr_bottom_intersect.tolist()])]
for adj_line in baselines[:idx] + baselines[idx+1:]:
adj_line = geom.LineString(adj_line)
if upper_polygon.intersects(adj_line):
side_a.append(adj_line)
elif bottom_polygon.intersects(adj_line):
side_b.append(adj_line)
side_a = unary_union(side_a)
side_b = unary_union(side_b)
env_up = []
env_bottom = []
# find nearest points from baseline to previously selected baselines
for point in line:
_, upper_limit = nearest_points(geom.Point(point), side_a)
_, bottom_limit = nearest_points(geom.Point(point), side_b)
env_up.extend(list(upper_limit.coords))
env_bottom.extend(list(bottom_limit.coords))
env_up = np.array(env_up, dtype='uint')
env_bottom = np.array(env_bottom, dtype='uint')
polygons.append(_extract_patch(env_up, env_bottom, line))
return polygons
def polygonal_reading_order(lines: Sequence[Tuple[List, List]], text_direction: str = 'lr') -> Sequence[Tuple[List, List]]:
"""
Given a list of baselines, calculates the correct reading order and applies
it to the input.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
text_direction (str): Set principal text direction for column ordering.
Can be 'lr' or 'rl'
Returns:
A reordered input.
"""
bounds = []
for line in lines:
l = geom.LineString(line[0]).bounds
bounds.append((slice(l[0], l[1]), slice(l[2], l[3])))
order = reading_order(bounds, text_direction)
lsort = topsort(order)
return [lines[i] for i in lsort]
def scale_polygonal_lines(lines: Sequence[Tuple[List, List]], scale: Union[float, Tuple[float, float]]) -> Sequence[Tuple[List, List]]:
"""
Scales baselines/polygon coordinates by a certain factor.
Args:
lines (Sequence): List of tuples containing the baseline and it's
polygonization.
scale (float or tuple of floats): Scaling factor
"""
if isinstance(scale, float):
scale = (scale, scale)
scaled_lines = []
for line in lines:
bl, pl = line
scaled_lines.append(((np.array(bl) * scale).astype('int').tolist(),
(np.array(pl) * scale).astype('int').tolist()))
return scaled_lines
def _test_intersect(bp, uv, bs):
"""
Returns the intersection points of a ray with direction `uv` from
`bp` with a polygon `bs`.
"""
u = bp - np.roll(bs, 2)
v = bs - np.roll(bs, 2)
points = []
for dir in ((1,-1), (-1,1)):
w = (uv * dir * (1,-1))[::-1]
z = np.dot(v, w)
t1 = np.cross(v, u) / z
t2 = np.dot(u, w) / z
t1 = t1[np.logical_and(t2 >= 0.0, t2 <= 1.0)]
points.extend(bp + (t1[np.where(t1 >= 0)[0].min()] * (uv * dir)))
return np.array(points)
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
"""
Yields the subimages of image im defined in the list of bounding polygons
with baselines preserving order.
Args:
im (PIL.Image.Image): Input image
bounds (list): A list of tuples (x1, y1, x2, y2)
Yields:
(PIL.Image) the extracted subimage
"""
if 'type' in bounds and bounds['type'] == 'baselines':
old_settings = np.seterr(all='ignore')
siz = im.size
white = Image.new(im.mode, siz)
for line in bounds['lines']:
mask = Image.new('1', siz, 0)
draw = ImageDraw.Draw(mask)
draw.polygon([tuple(x) for x in line['boundary']], outline=1, fill=1)
masked_line = Image.composite(im, white, mask)
bl = np.array(line['baseline'])
ls = np.dstack((bl[:-1:], bl[1::]))
bisect_points = np.mean(ls, 2)
norm_vec = (ls[...,1] - ls[...,0])[:,::-1]
norm_vec_len = np.sqrt(np.sum(norm_vec**2, axis=1))
unit_vec = norm_vec / np.tile(norm_vec_len, (2, 1)).T # without
# multiplication
# with (1,-1)-upper/
# (-1, 1)-lower
bounds = np.array(line['boundary'])
src_points = np.stack([_test_intersect(bp, uv, bounds) for bp, uv in zip(bisect_points, unit_vec)])
upper_dist = np.diag(distance_matrix(src_points[:,:2], bisect_points))
upper_dist = np.dstack((np.zeros_like(upper_dist), upper_dist)).squeeze(0)
lower_dist = np.diag(distance_matrix(src_points[:,2:], bisect_points))
lower_dist = np.dstack((np.zeros_like(lower_dist), lower_dist)).squeeze(0)
# map baseline points to straight baseline
bl_dists = np.cumsum(np.diag(np.roll(squareform(pdist(bl)), 1)))
bl_dst_pts = bl[0] + np.dstack((bl_dists, np.zeros_like(bl_dists))).squeeze(0)
rect_bisect_pts = np.mean(np.dstack((bl_dst_pts[:-1:], bl_dst_pts[1::])), 2)
upper_dst_pts = rect_bisect_pts - upper_dist
lower_dst_pts = rect_bisect_pts + lower_dist
src_points = np.concatenate((bl, src_points[:,:2], src_points[:,2:]))
dst_points = np.concatenate((bl_dst_pts, upper_dst_pts, lower_dst_pts))
tform = PiecewiseAffineTransform()
tform.estimate(src_points, dst_points)
i = Image.fromarray((warp(masked_line, tform) * 255).astype('uint8'))
yield i.crop(i.getbbox()), line
else:
if bounds['text_direction'].startswith('vertical'):
angle = 90
else:
angle = 0
for box in bounds['boxes']:
if isinstance(box, tuple):
box = list(box)
if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
box[1::2] > [im.size[1], im.size[1]]):
logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
raise KrakenInputException('Line outside of image bounds')
yield im.crop(box).rotate(angle, expand=True), box
|
#!usr/bin/env python
"""
Custom library for interfacing with MediaWiki through API
Released under the MIT License
(C) Legoktm 2008-2009
See COPYING for full License
"""
import urllib2, urllib, re, time, getpass, cookielib
from datetime import datetime
import config
import simplejson, sys, os
class NotLoggedIn(Exception):
"""User is not logged in"""
class UserBlocked(Exception):
"""User is blocked"""
class NoPage(Exception):
"""Page does not exist"""
class IsRedirectPage(Exception):
"""Page is a redirect to target"""
class APIError(Exception):
"""General API error"""
class NotCategory(Exception):
"""When expected page should be category, but is not"""
class API:
def __init__(self, wiki = config.wiki, login=False, debug=False, qcontinue = True):
#set up the cookies
self.COOKIEFILE = os.environ['PWD'] + '/cookies/'+ config.username +'.data'
self.COOKIEFILE = self.COOKIEFILE.replace(' ','_')
self.cj = cookielib.LWPCookieJar()
if os.path.isfile(self.COOKIEFILE):
self.cj.load(self.COOKIEFILE)
elif not login:
raise NotLoggedIn('Please login by first running wiki.py')
if wiki == 'commons':
self.wiki = 'commons.wikimedia'
else:
self.wiki = wiki
self.debug = debug
self.qcontinue = qcontinue
def query(self, params, after = None, write = False):
if os.path.isfile(self.COOKIEFILE):
self.cj.load(self.COOKIEFILE)
self.params = params
self.params['format'] = 'json'
self.encodeparams = urllib.urlencode(self.params)
if after:
self.encodeparams += after
if self.debug:
print self.encodeparams
self.headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": config.username,
"Content-length": len(self.encodeparams),
}
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
self.request = urllib2.Request(config.apipath %(self.wiki), self.encodeparams, self.headers)
# print 'Querying API'
self.response = urllib2.urlopen(self.request)
self.cj.save(self.COOKIEFILE)
text = self.response.read()
newtext = simplejson.loads(text)
#errors should be handled now
try:
if newtext.has_key('error'):
raise APIError(newtext['error'])
except AttributeError:
raise APIError(newtext)
#finish query-continues
if ('query-continue' in newtext) and self.qcontinue:
newtext = self.__longQuery(newtext)
return newtext
def __longQuery(self, firstres):
total = res = firstres
params = self.params
numkeys = len(res['query-continue'].keys())
while numkeys > 0:
keylist = res['query-continue'].keys()
keylist.reverse()
key1 = keylist[0]
key2 = res['query-continue'][key1].keys()[0]
if isinstance(res['query-continue'][key1][key2], int):
cont = res['query-continue'][key1][key2]
else:
cont = res['query-continue'][key1][key2].encode('utf-8')
params[key2] = cont
res = API(qcontinue=False).query(params)
for type in keylist:
total = self.__resultCombine(type, total, res)
if 'query-continue' in res:
numkeys = len(res['query-continue'].keys())
else:
numkeys = 0
return total
def __resultCombine(self, type, old, new):
"""
Experimental-ish result-combiner thing
If the result isn't something from action=query,
this will just explode, but that shouldn't happen hopefully?
(taken from python-wikitools)
"""
ret = old
if type in new['query']: # Basic list, easy
ret['query'][type].extend(new['query'][type])
else: # Else its some sort of prop=thing and/or a generator query
for key in new['query']['pages'].keys(): # Go through each page
if not key in old['query']['pages']: # if it only exists in the new one
ret['query']['pages'][key] = new['query']['pages'][key] # add it to the list
else:
for item in new['query']['pages'][key][type]:
if item not in ret['query']['pages'][key][type]: # prevent duplicates
ret['query']['pages'][key][type].append(item) # else update the existing one
return ret
class Page:
def __init__(self, page):
self.API = API()
self.page = page
self.__basicinfo = self.__basicinfo()
if self.__basicinfo.has_key('redirect'):
self.redirect = True
else:
self.redirect = False
self.ns = self.__basicinfo['ns']
self.Site = Site()
def __basicinfo(self):
params = {
'action':'query',
'prop':'info',
'titles':self.page,
}
res = self.API.query(params)
id = res['query']['pages'].keys()[0]
dict = res['query']['pages'][id]
return dict
def title(self):
return self.page
def get(self, force = False):
if self.redirect and (not force):
raise IsRedirectPage(self.API.query({'action':'query','titles':self.page,'redirects':''})['query']['redirects'][0]['to'])
params = {
'action':'query',
'prop':'revisions',
'titles':self.page,
'rvprop':'content',
}
res = self.API.query(params)['query']['pages']
if res.keys()[0] == '-1':
raise NoPage(self.page)
content = res[res.keys()[0]]['revisions'][0]['*']
return content.encode('utf-8')
def put(self, newtext, summary, watch = False, newsection = False):
#get the token
tokenparams = {
'action':'query',
'prop':'info',
'intoken':'edit',
'titles':self.page
}
token = self.API.query(tokenparams)['query']['pages']
token = token[token.keys()[0]]['edittoken']
# print token
#do the edit
params = {
'action':'edit',
'title':self.page,
'text':newtext,
'summary':summary,
'token':token,
}
print 'Going to change [[%s]]' %(self.page)
if watch:
params['watch'] = ''
if newsection:
params['section'] = 'new'
#check if we have waited 10 seconds since the last edit
FILE = os.environ['PWD'] + '/cookies/lastedit.data'
try:
text = open(FILE, 'r').read()
split = text.split('|')
date = datetime(int(split[0]), int(split[1]), int(split[2]), int(split[3]), int(split[4]), int(split[5]))
except IOError:
date = datetime.now()
delta = datetime.now() - date
if delta.seconds < 10:
print 'Sleeping %s seconds' %(10-delta.seconds)
time.sleep(10-delta.seconds)
else:
print 'Last editted %s seconds ago.' %delta.seconds
print 'Sleeping for 2 seconds'
time.sleep(2)
#update the file
d = datetime.now()
newtext = str(d.year) +'|'+ str(d.month) +'|'+ str(d.day) +'|'+ str(d.hour) +'|'+ str(d.minute) +'|'+ str(d.second)
write = open(FILE, 'w')
write.write(newtext)
write.close()
#the actual write query
res=self.API.query(params, write = True)
if res.has_key('error'):
raise APIError(res['error'])
if res['edit']['result'] == 'Success':
print 'Changing [[%s]] was successful.' %self.page
else:
print 'Changing [[%s]] failed.' %self.page
raise APIError(res)
def titlewonamespace(self, ns=False):
if not ns:
ns = Page(self.page).namespace()
else:
ns = int(ns)
if ns == 0:
return self.page
else:
return self.page.split(':')[1]
def namespace(self):
return self.ns
def lastedit(self, prnt = False):
params = {
'action':'query',
'prop':'revisions',
'titles':self.page,
'rvprop':'user|comment',
}
res = self.API.query(params)['query']['pages']
ret = res[res.keys()[0]]['revisions'][0]
print ret
if prnt:
print 'The last edit on %s was made by: %s with the comment of: %s.' %(page, ret['user'], ret['comment'])
return ret
def istalk(self):
if self.ns != -1 or self.ns != -2:
if self.ns%2 == 0:
return False
elif self.ns%2 == 1:
return True
else:
sys.exit("Error: Python Division error")
else:
return False
def toggletalk(self):
try:
nstext = self.page.split(':')[0]
except:
nstext = ''
nsnum = self.Site.namespacelist()[1][nstext]
if nsnum == -1 or nsnum == -2:
print 'Cannot toggle the talk of a Special or Media page.'
return self.page
istalk = self.istalk()
if istalk:
nsnewtext = self.Site.namespacelist()[0][nsnum-1]
else:
nsnewtext = self.Site.namespacelist()[0][nsnum+1]
tt = nsnewtext + ':' + self.page.split(':')[1]
return tt
def isCategory(self):
return self.namespace() == 14
def isImage(self):
return self.namespace() == 6
def patrol(self, rcid):
params = {
'action':'patrol',
'rcid':rcid,
'token':self.edittoken
}
self.API.query(params)
def exists(self):
if self.__basicinfo.has_key('missing'):
return False
else:
return True
def move(self, newtitle, summary, movetalk = True):
tokenparams = {
'action':'query',
'prop':'info',
'intoken':'move',
'titles':self.page
}
token = self.API.query(tokenparams)['query']['pages']
token = token[token.keys()[0]]['movetoken']
params = {
'action':'move',
'from':self.page,
'to':newtitle,
'reason':summary,
'token':token
}
if movetalk:
res = self.API.query(params,'&movetalk')
else:
res = self.API.query(params)
if res.has_key('error'):
raise APIError(res['error'])
if res.has_key('move'):
print 'Page move of %s to %s succeeded' (self.page, newtitle)
return res
"""
Class that is mainly internal working, but contains information relevant
to the wiki site.
"""
class Site:
def __iter__(self, wiki = config.wiki):
self.wiki = wiki
self.API = API()
def namespacelist(self):
params = {
'action':'query',
'meta':'siteinfo',
'siprop':'namespaces',
}
res = API().query(params)
resd = res['query']['namespaces']
list = resd.keys()
nstotext = {}
texttons = {}
for ns in list:
nstotext[int(ns)] = resd[ns]['*']
texttons[resd[ns]['*']] = int(ns)
self.nslist = (nstotext,texttons)
return self.nslist
"""
Other functions
"""
def checklogin():
paramscheck = {
'action':'query',
'meta':'userinfo',
'uiprop':'hasmsg',
}
querycheck = API().query(paramscheck)
name = querycheck['query']['userinfo']['name']
print name
if querycheck['query']['userinfo'].has_key('messages'):
print 'You have new messages on %s.' %(config.wiki)
if config.quitonmess:
sys.exit()
if querycheck['query']['userinfo'].has_key('anon'):
return False
return name
def login(username = False):
if not username:
username = config.username
try:
password = config.password
except:
password = getpass.getpass('API Login Password for %s: ' %username)
params = {
'action' : 'login',
'lgname' : username,
'lgpassword' : password,
}
query = API(login=True).query(params)
result = query['login']['result'].lower()
if result == 'success':
print 'Successfully logged in on %s.' %(config.wiki)
else:
print 'Failed to login on %s.' %(config.wiki)
raise APIError(query)
if __name__ == "__main__":
login()
add more login failed messages
git-svn-id: 2e40ff80485b5e2f2cc1551d2bae08335b3d761f@102 03ba9718-dac9-11dd-86d7-074df07e0730
#!usr/bin/env python
"""
Custom library for interfacing with MediaWiki through API
Released under the MIT License
(C) Legoktm 2008-2009
See COPYING for full License
"""
import urllib2, urllib, re, time, getpass, cookielib
from datetime import datetime
import config
import simplejson, sys, os
class NotLoggedIn(Exception):
"""User is not logged in"""
class UserBlocked(Exception):
"""User is blocked"""
class NoPage(Exception):
"""Page does not exist"""
class IsRedirectPage(Exception):
"""Page is a redirect to target"""
class APIError(Exception):
"""General API error"""
class NotCategory(Exception):
"""When expected page should be category, but is not"""
class LoginError(Exception):
"""General login error"""
class WrongPass(LoginError):
"""Wrong password entered"""
class LoginThrottled(LoginError):
"""Login throttled by MediaWiki"""
class API:
def __init__(self, wiki = config.wiki, login=False, debug=False, qcontinue = True):
#set up the cookies
self.COOKIEFILE = os.environ['PWD'] + '/cookies/'+ config.username +'.data'
self.COOKIEFILE = self.COOKIEFILE.replace(' ','_')
self.cj = cookielib.LWPCookieJar()
if os.path.isfile(self.COOKIEFILE):
self.cj.load(self.COOKIEFILE)
elif not login:
raise NotLoggedIn('Please login by first running wiki.py')
if wiki == 'commons':
self.wiki = 'commons.wikimedia'
else:
self.wiki = wiki
self.debug = debug
self.qcontinue = qcontinue
def query(self, params, after = None, write = False):
if os.path.isfile(self.COOKIEFILE):
self.cj.load(self.COOKIEFILE)
self.params = params
self.params['format'] = 'json'
self.encodeparams = urllib.urlencode(self.params)
if after:
self.encodeparams += after
if self.debug:
print self.encodeparams
self.headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": config.username,
"Content-length": len(self.encodeparams),
}
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
self.request = urllib2.Request(config.apipath %(self.wiki), self.encodeparams, self.headers)
# print 'Querying API'
self.response = urllib2.urlopen(self.request)
self.cj.save(self.COOKIEFILE)
text = self.response.read()
newtext = simplejson.loads(text)
#errors should be handled now
try:
if newtext.has_key('error'):
raise APIError(newtext['error'])
except AttributeError:
raise APIError(newtext)
#finish query-continues
if ('query-continue' in newtext) and self.qcontinue:
newtext = self.__longQuery(newtext)
return newtext
def __longQuery(self, firstres):
total = res = firstres
params = self.params
numkeys = len(res['query-continue'].keys())
while numkeys > 0:
keylist = res['query-continue'].keys()
keylist.reverse()
key1 = keylist[0]
key2 = res['query-continue'][key1].keys()[0]
if isinstance(res['query-continue'][key1][key2], int):
cont = res['query-continue'][key1][key2]
else:
cont = res['query-continue'][key1][key2].encode('utf-8')
params[key2] = cont
res = API(qcontinue=False).query(params)
for type in keylist:
total = self.__resultCombine(type, total, res)
if 'query-continue' in res:
numkeys = len(res['query-continue'].keys())
else:
numkeys = 0
return total
def __resultCombine(self, type, old, new):
"""
Experimental-ish result-combiner thing
If the result isn't something from action=query,
this will just explode, but that shouldn't happen hopefully?
(taken from python-wikitools)
"""
ret = old
if type in new['query']: # Basic list, easy
ret['query'][type].extend(new['query'][type])
else: # Else its some sort of prop=thing and/or a generator query
for key in new['query']['pages'].keys(): # Go through each page
if not key in old['query']['pages']: # if it only exists in the new one
ret['query']['pages'][key] = new['query']['pages'][key] # add it to the list
else:
for item in new['query']['pages'][key][type]:
if item not in ret['query']['pages'][key][type]: # prevent duplicates
ret['query']['pages'][key][type].append(item) # else update the existing one
return ret
class Page:
def __init__(self, page):
self.API = API()
self.page = page
self.__basicinfo = self.__basicinfo()
if self.__basicinfo.has_key('redirect'):
self.redirect = True
else:
self.redirect = False
self.ns = self.__basicinfo['ns']
self.Site = Site()
def __basicinfo(self):
params = {
'action':'query',
'prop':'info',
'titles':self.page,
}
res = self.API.query(params)
id = res['query']['pages'].keys()[0]
dict = res['query']['pages'][id]
return dict
def title(self):
return self.page
def get(self, force = False):
if self.redirect and (not force):
raise IsRedirectPage(self.API.query({'action':'query','titles':self.page,'redirects':''})['query']['redirects'][0]['to'])
params = {
'action':'query',
'prop':'revisions',
'titles':self.page,
'rvprop':'content',
}
res = self.API.query(params)['query']['pages']
if res.keys()[0] == '-1':
raise NoPage(self.page)
content = res[res.keys()[0]]['revisions'][0]['*']
return content.encode('utf-8')
def put(self, newtext, summary, watch = False, newsection = False):
#get the token
tokenparams = {
'action':'query',
'prop':'info',
'intoken':'edit',
'titles':self.page
}
token = self.API.query(tokenparams)['query']['pages']
token = token[token.keys()[0]]['edittoken']
# print token
#do the edit
params = {
'action':'edit',
'title':self.page,
'text':newtext,
'summary':summary,
'token':token,
}
print 'Going to change [[%s]]' %(self.page)
if watch:
params['watch'] = ''
if newsection:
params['section'] = 'new'
#check if we have waited 10 seconds since the last edit
FILE = os.environ['PWD'] + '/cookies/lastedit.data'
try:
text = open(FILE, 'r').read()
split = text.split('|')
date = datetime(int(split[0]), int(split[1]), int(split[2]), int(split[3]), int(split[4]), int(split[5]))
except IOError:
date = datetime.now()
delta = datetime.now() - date
if delta.seconds < 10:
print 'Sleeping %s seconds' %(10-delta.seconds)
time.sleep(10-delta.seconds)
else:
print 'Last editted %s seconds ago.' %delta.seconds
print 'Sleeping for 2 seconds'
time.sleep(2)
#update the file
d = datetime.now()
newtext = str(d.year) +'|'+ str(d.month) +'|'+ str(d.day) +'|'+ str(d.hour) +'|'+ str(d.minute) +'|'+ str(d.second)
write = open(FILE, 'w')
write.write(newtext)
write.close()
#the actual write query
res=self.API.query(params, write = True)
if res.has_key('error'):
raise APIError(res['error'])
if res['edit']['result'] == 'Success':
print 'Changing [[%s]] was successful.' %self.page
else:
print 'Changing [[%s]] failed.' %self.page
raise APIError(res)
def titlewonamespace(self, ns=False):
if not ns:
ns = Page(self.page).namespace()
else:
ns = int(ns)
if ns == 0:
return self.page
else:
return self.page.split(':')[1]
def namespace(self):
return self.ns
def lastedit(self, prnt = False):
params = {
'action':'query',
'prop':'revisions',
'titles':self.page,
'rvprop':'user|comment',
}
res = self.API.query(params)['query']['pages']
ret = res[res.keys()[0]]['revisions'][0]
print ret
if prnt:
print 'The last edit on %s was made by: %s with the comment of: %s.' %(page, ret['user'], ret['comment'])
return ret
def istalk(self):
if self.ns != -1 or self.ns != -2:
if self.ns%2 == 0:
return False
elif self.ns%2 == 1:
return True
else:
sys.exit("Error: Python Division error")
else:
return False
def toggletalk(self):
try:
nstext = self.page.split(':')[0]
except:
nstext = ''
nsnum = self.Site.namespacelist()[1][nstext]
if nsnum == -1 or nsnum == -2:
print 'Cannot toggle the talk of a Special or Media page.'
return self.page
istalk = self.istalk()
if istalk:
nsnewtext = self.Site.namespacelist()[0][nsnum-1]
else:
nsnewtext = self.Site.namespacelist()[0][nsnum+1]
tt = nsnewtext + ':' + self.page.split(':')[1]
return tt
def isCategory(self):
return self.namespace() == 14
def isImage(self):
return self.namespace() == 6
def patrol(self, rcid):
params = {
'action':'patrol',
'rcid':rcid,
'token':self.edittoken
}
self.API.query(params)
def exists(self):
if self.__basicinfo.has_key('missing'):
return False
else:
return True
def move(self, newtitle, summary, movetalk = True):
tokenparams = {
'action':'query',
'prop':'info',
'intoken':'move',
'titles':self.page
}
token = self.API.query(tokenparams)['query']['pages']
token = token[token.keys()[0]]['movetoken']
params = {
'action':'move',
'from':self.page,
'to':newtitle,
'reason':summary,
'token':token
}
if movetalk:
res = self.API.query(params,'&movetalk')
else:
res = self.API.query(params)
if res.has_key('error'):
raise APIError(res['error'])
if res.has_key('move'):
print 'Page move of %s to %s succeeded' (self.page, newtitle)
return res
"""
Class that is mainly internal working, but contains information relevant
to the wiki site.
"""
class Site:
def __iter__(self, wiki = config.wiki):
self.wiki = wiki
self.API = API()
def namespacelist(self):
params = {
'action':'query',
'meta':'siteinfo',
'siprop':'namespaces',
}
res = API().query(params)
resd = res['query']['namespaces']
list = resd.keys()
nstotext = {}
texttons = {}
for ns in list:
nstotext[int(ns)] = resd[ns]['*']
texttons[resd[ns]['*']] = int(ns)
self.nslist = (nstotext,texttons)
return self.nslist
"""
Other functions
"""
def checklogin():
paramscheck = {
'action':'query',
'meta':'userinfo',
'uiprop':'hasmsg',
}
querycheck = API().query(paramscheck)
name = querycheck['query']['userinfo']['name']
print name
if querycheck['query']['userinfo'].has_key('messages'):
print 'You have new messages on %s.' %(config.wiki)
if config.quitonmess:
sys.exit()
if querycheck['query']['userinfo'].has_key('anon'):
return False
return name
def login(username = False):
if not username:
username = config.username
try:
password = config.password
except:
password = getpass.getpass('API Login Password for %s: ' %username)
params = {
'action' : 'login',
'lgname' : username,
'lgpassword' : password,
}
query = API(login=True).query(params)
result = query['login']['result'].lower()
if result == 'success':
print 'Successfully logged in on %s.' %(config.wiki)
elif result == 'wrongpass':
raise WrongPass
elif result == 'throttled':
raise LoginThrottled('Wait %s seconds before trying again.' %(query['login']['wait'])
else:
print 'Failed to login on %s.' %(config.wiki)
raise APIError(query)
if __name__ == "__main__":
login() |
#! /usr/bin/env python
""" NIPAP shell command
A shell command to interact with NIPAP.
"""
import ConfigParser
import csv
import os
import pipes
import re
import shlex
import string
import subprocess
import sys
import pynipap
from pynipap import Pool, Prefix, Tag, VRF, NipapError
from command import Command
# definitions
valid_countries = [
'AT', 'DE', 'DK', 'EE', 'FI', 'FR',
'GB', 'HR', 'LT', 'LV', 'KZ', 'NL',
'RU', 'SE', 'US' ] # test test, fill up! :)
valid_prefix_types = [ 'host', 'reservation', 'assignment' ]
valid_prefix_status = [ 'assigned', 'reserved', 'quarantine' ]
valid_families = [ 'ipv4', 'ipv6', 'dual-stack' ]
valid_bools = [ 'true', 'false' ]
valid_priorities = [ 'warning', 'low', 'medium', 'high', 'critical' ]
# evil global vars
vrf = None
cfg = None
pool = None
def setup_connection():
""" Set up the global pynipap connection object
"""
# build XML-RPC URI
try:
pynipap.xmlrpc_uri = "http://%(username)s:%(password)s@%(hostname)s:%(port)s" % {
'username': cfg.get('global', 'username'),
'password': cfg.get('global', 'password'),
'hostname': cfg.get('global', 'hostname'),
'port' : cfg.get('global', 'port')
}
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
print >> sys.stderr, "Please define the username, password, hostname and port in your .nipaprc under the section 'global'"
sys.exit(1)
ao = pynipap.AuthOptions({
'authoritative_source': 'nipap',
'username': os.getenv('NIPAP_IMPERSONATE_USERNAME') or cfg.get('global', 'username'),
'full_name': os.getenv('NIPAP_IMPERSONATE_FULL_NAME'),
})
def vrf_format(vrf):
return "VRF '%s' [RT: %s]" % (vrf.name, vrf.rt or '-')
def get_pool(arg = None, opts = None, abort = False):
""" Returns pool to work with
Returns a pynipap.Pool object representing the pool we are working with.
"""
# yep, global variables are evil
global pool
try:
pool = Pool.list({ 'name': arg })[0]
except IndexError:
if abort:
print >> sys.stderr, "Pool '%s' not found." % str(arg)
sys.exit(1)
else:
pool = None
return pool
def get_vrf(arg = None, default_var = 'default_vrf_rt', abort = False):
""" Returns VRF to work in
Returns a pynipap.VRF object representing the VRF we are working
in. If there is a VRF set globally, return this. If not, fetch the
VRF named 'arg'. If 'arg' is None, fetch the default_vrf
attribute from the config file and return this VRF.
"""
# yep, global variables are evil
global vrf
# if there is a VRF set, return it
if vrf is not None:
return vrf
if arg is None:
# fetch default vrf
try:
vrf_rt = cfg.get('global', default_var)
except ConfigParser.NoOptionError:
# default to all VRFs
vrf_rt = 'all'
else:
vrf_rt = arg
if vrf_rt.lower() == 'all':
vrf = VRF()
vrf.rt = 'all'
else:
if vrf_rt.lower() in ('-', 'none'):
vrf_rt = None
try:
vrf = VRF.search({ 'val1': 'rt',
'operator': 'equals',
'val2': vrf_rt
})['result'][0]
except (KeyError, IndexError):
if abort:
print >> sys.stderr, "VRF with [RT: %s] not found." % str(vrf_rt)
sys.exit(1)
else:
vrf = False
return vrf
def _str_to_bool(arg):
""" Return True or False depending on input string
Parses the string 'arg' and returns True if it has the value "true",
False if it has the value "false" and throws an exception otherwise.
"""
if arg is None:
return False
if arg == 'true':
return True
elif arg == 'false':
return False
else:
raise ValueError('Only values true and false permitted')
"""
LIST FUNCTIONS
"""
def _expand_list_query(opts):
""" Parse a dict and return a valid query dict
Parses a dict containing object attributes and values and return a
valid NIPAP query dict which regex matches the values and AND:s
together all individual queries. The regex match is anchored in the
beginning of the string.
Example:
{
'name': 'cust',
'vrf': '123:2'
}
will be expanded to the query dict
{
'operator': 'and',
'val1': {
'operator': 'regex_match',
'val1': 'name',
'val2': '^cust'
},
'val2': {
'operator': 'regex_match',
'val1': 'rt',
'val2': '^123:2'
}
}
"""
# create list of query parts
query_parts = []
for key, val in opts.items():
# standard case
operator = 'regex_match'
val1 = key
val2 = "%s" % val
query_parts.append({
'operator': operator,
'val1': val1,
'val2': val2
})
# Sum all query parts to one query
query = {}
if len(query_parts) > 0:
query = query_parts[0]
if len(query_parts) > 1:
for query_part in query_parts[1:]:
query = {
'operator': 'and',
'val1': query_part,
'val2': query
}
return query
def list_pool(arg, opts, shell_opts):
""" List pools matching a search criteria
"""
search_string = ''
if type(arg) == list or type(arg) == tuple:
search_string = ' '.join(arg)
v = get_vrf(opts.get('vrf_rt'), default_var='default_list_vrf_rt', abort=True)
if v.rt == 'all':
vrf_q = None
else:
vrf_q = {
'operator': 'equals',
'val1': 'vrf_rt',
'val2': v.rt
}
offset = 0
limit = 100
while True:
res = Pool.smart_search(search_string, { 'offset': offset, 'max_result': limit }, vrf_q)
if offset == 0: # first time in loop?
if len(res['result']) == 0:
print "No matching pools found"
return
print "%-19s %-2s %-39s %-13s %-8s %s" % (
"Name", "#", "Description", "Default type", "4 / 6", "Implied VRF"
)
print "------------------------------------------------------------------------------------------------"
for p in res['result']:
if len(str(p.description)) > 38:
desc = p.description[0:34] + "..."
else:
desc = p.description
vrf_rt = '-'
vrf_name = '-'
if p.vrf is not None:
vrf_rt = p.vrf.rt or '-'
vrf_name = p.vrf.name
tags = '-'
if len(p.tags) > 0:
tags = "#%d" % (len(p.tags))
print "%-19s %-2s %-39s %-13s %-2s / %-3s [RT: %s] %s" % (
p.name, tags, desc, p.default_type,
str(p.ipv4_default_prefix_length or '-'),
str(p.ipv6_default_prefix_length or '-'),
vrf_rt, vrf_name
)
if len(res['result']) < limit:
break
offset += limit
def list_vrf(arg, opts, shell_opts):
""" List VRFs matching a search criteria
"""
# rt is a regexp match on the VRF RT but as most people don't expect to see
# 123:123 in the result when searching for '123:1', we anchor it per default
if 'rt' in opts:
opts['rt'] = '^' + opts['rt'] + '$'
query = _expand_list_query(opts)
offset = 0
limit = 100
while True:
res = VRF.search(query, { 'offset': offset, 'max_result': limit })
if offset == 0:
if len(res['result']) == 0:
print "No matching VRFs found."
return
print "%-16s %-22s %-2s %-40s" % ("VRF RT", "Name", "#", "Description")
print "--------------------------------------------------------------------------------"
for v in res['result']:
tags = '-'
if len(v.tags) > 0:
tags = '#%d' % len(v.tags)
if len(unicode(v.description)) > 100:
desc = v.description[0:97] + "..."
else:
desc = v.description
print "%-16s %-22s %-2s %-40s" % (v.rt or '-', v.name, tags, desc)
if len(res['result']) < limit:
break
offset += limit
def list_prefix(arg, opts, shell_opts):
""" List prefixes matching 'arg'
"""
search_string = ''
if type(arg) == list or type(arg) == tuple:
search_string = ' '.join(arg)
v = get_vrf(opts.get('vrf_rt'), default_var='default_list_vrf_rt', abort=True)
if v.rt == 'all':
vrf_text = 'any VRF'
vrf_q = None
else:
vrf_text = vrf_format(v)
vrf_q = {
'operator': 'equals',
'val1': 'vrf_rt',
'val2': v.rt
}
print "Searching for prefixes in %s..." % vrf_text
offset = 0
# small initial limit for "instant" result
limit = 50
min_indent = 0
while True:
res = Prefix.smart_search(search_string, { 'parents_depth': -1,
'include_neighbors': True, 'offset': offset, 'max_result': limit },
vrf_q)
if offset == 0: # first time in loop?
if len(res['result']) == 0:
print "No addresses matching '%s' found." % search_string
return
if shell_opts.show_interpretation:
print "Query interpretation:"
for interp in res['interpretation']:
text = interp['string']
if interp['interpretation'] == 'unclosed quote':
text = "%s: %s, please close quote!" % (interp['string'], interp['interpretation'])
text2 = "This is not a proper search term as it contains en uneven amount of quotes."
elif interp['attribute'] == 'tag' and interp['operator'] == 'equals_any':
text = "%s: %s must contain %s" % (interp['string'], interp['interpretation'], interp['string'])
text2 = "The tag(s) or inherited tag(s) must contain %s" % interp['string']
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contained_within_equals':
if 'strict_prefix' in interp and 'expanded' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['strict_prefix'])
text2 = "Prefix must be contained within %s, which is the base prefix of %s (automatically expanded from %s)." % (interp['strict_prefix'], interp['expanded'], interp['string'])
elif 'strict_prefix' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['strict_prefix'])
text2 = "Prefix must be contained within %s, which is the base prefix of %s." % (interp['strict_prefix'], interp['string'])
elif 'expanded' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['expanded'])
text2 = "Prefix must be contained within %s (automatically expanded from %s)." % (interp['expanded'], interp['string'])
else:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['string'])
text2 = "Prefix must be contained within %s." % (interp['string'])
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contains_equals':
text = "%s: Prefix that contains %s" % (interp['string'],
interp['string'])
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contains_equals':
text = "%s: %s equal to %s" % (interp['string'],
interp['interpretation'], interp['string'])
else:
text = "%s: %s matching %s" % (interp['string'], interp['interpretation'], interp['string'])
print " -", text
print " ", text2
# Guess the width of the prefix column by looking at the initial
# result set.
for p in res['result']:
indent = p.indent * 2 + len(p.prefix)
if indent > min_indent:
min_indent = indent
min_indent += 15
# print column headers
prefix_str = "%%-14s %%-%ds %%-1s %%-2s %%-19s %%-14s %%-14s %%-s" % min_indent
column_header = prefix_str % ('VRF', 'Prefix', '', '#', 'Node',
'Order', 'Customer', 'Description')
print column_header
print "".join("=" for i in xrange(len(column_header)))
for p in res['result']:
if p.display == False:
continue
try:
tags = '-'
if len(p.tags) > 0:
tags = '#%d' % len(p.tags)
print prefix_str % (p.vrf.rt or '-',
"".join(" " for i in xrange(p.indent)) + p.display_prefix,
p.type[0].upper(), tags, p.node, p.order_id,
p.customer_id, p.description
)
except UnicodeEncodeError, e:
print >> sys.stderr, "\nCrazy encoding for prefix %s\n" % p.prefix
if len(res['result']) < limit:
break
offset += limit
# let consecutive limit be higher to tax the XML-RPC backend less
limit = 200
"""
ADD FUNCTIONS
"""
def _prefix_from_opts(opts):
""" Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing
"""
p = Prefix()
p.prefix = opts.get('prefix')
p.type = opts.get('type')
p.description = opts.get('description')
p.node = opts.get('node')
p.country = opts.get('country')
p.order_id = opts.get('order_id')
p.customer_id = opts.get('customer_id')
p.alarm_priority = opts.get('alarm_priority')
p.comment = opts.get('comment')
p.monitor = _str_to_bool(opts.get('monitor'))
p.vlan = opts.get('vlan')
p.status = opts.get('status') or 'assigned' # default to assigned
p.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.expires = opts.get('expires')
return p
def add_prefix(arg, opts, shell_opts):
""" Add prefix to NIPAP
"""
# sanity checks
if 'from-pool' not in opts and 'from-prefix' not in opts and 'prefix' not in opts:
print >> sys.stderr, "ERROR: 'prefix', 'from-pool' or 'from-prefix' must be specified."
sys.exit(1)
if len([opt for opt in opts if opt in ['from-pool', 'from-prefix', 'prefix']]) > 1:
print >> sys.stderr, "ERROR: Use either assignment 'from-pool', 'from-prefix' or manual mode (using 'prefix')"
sys.exit(1)
if 'from-pool' in opts:
return add_prefix_from_pool(arg, opts)
args = {}
p = _prefix_from_opts(opts)
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
if 'from-prefix' in opts:
args['from-prefix'] = [ opts['from-prefix'], ]
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
if 'family' in opts:
if opts['family'] == 'ipv4':
family = 4
elif opts['family'] == 'ipv6':
family = 6
elif opts['family'] == 'dual-stack':
print >> sys.stderr, "ERROR: dual-stack mode only valid for from-pool assignments"
sys.exit(1)
args['family'] = family
# try to automatically figure out type for new prefix when not
# allocating from a pool
# get a list of prefixes that contain this prefix
vrf_id = 0
if p.vrf:
vrf_id = p.vrf.id
if 'from-prefix' in args:
parent_prefix = args['from-prefix'][0]
parent_op = 'equals'
else:
parent_prefix = opts.get('prefix').split('/')[0]
parent_op = 'contains'
# prefix must be a CIDR network, ie no bits set in host part, so we
# remove the prefix length part of the prefix as then the backend will
# assume all bits being set
auto_type_query = {
'val1': {
'val1' : 'prefix',
'operator' : parent_op,
'val2' : parent_prefix
},
'operator': 'and',
'val2': {
'val1' : 'vrf_id',
'operator' : 'equals',
'val2' : vrf_id
}
}
res = Prefix.search(auto_type_query, { })
# no results, ie the requested prefix is a top level prefix
if len(res['result']) == 0:
if p.type is None:
print >> sys.stderr, "ERROR: Type of prefix must be specified ('assignment' or 'reservation')."
sys.exit(1)
else:
# last prefix in list will be the parent of the new prefix
parent = res['result'][-1]
# if the parent is an assignment, we can assume the new prefix to be
# a host and act accordingly
if parent.type == 'assignment':
# automatically set type
if p.type is None:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically setting type 'host' for new prefix."
elif p.type == 'host':
pass
else:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically overriding specified type '%s' with type 'host' for new prefix." % p.type
p.type = 'host'
# if it's a manually specified prefix
if 'prefix' in opts:
# fiddle prefix length to all bits set
if parent.family == 4:
p.prefix = p.prefix.split('/')[0] + '/32'
else:
p.prefix = p.prefix.split('/')[0] + '/128'
# for from-prefix, we set prefix_length to host length
elif 'from-prefix' in opts:
if parent.family == 4:
args['prefix_length'] = 32
else:
args['prefix_length'] = 128
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {})
def add_prefix_from_pool(arg, opts):
""" Add prefix using from-pool to NIPAP
"""
args = {}
# sanity checking
if 'from-pool' in opts:
res = Pool.list({ 'name': opts['from-pool'] })
if len(res) == 0:
print >> sys.stderr, "No pool named '%s' found." % opts['from-pool']
sys.exit(1)
args['from-pool'] = res[0]
if 'family' not in opts:
print >> sys.stderr, "ERROR: You have to specify the address family."
sys.exit(1)
if opts['family'] == 'ipv4':
afis = [4]
elif opts['family'] == 'ipv6':
afis = [6]
elif opts['family'] == 'dual-stack':
afis = [4, 6]
if 'prefix_length' in opts:
print >> sys.stderr, "ERROR: 'prefix_length' can not be specified for 'dual-stack' assignment"
sys.exit(1)
else:
print >> sys.stderr, "ERROR: 'family' must be one of: %s" % " ".join(valid_families)
sys.exit(1)
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
for afi in afis:
p = _prefix_from_opts(opts)
if opts.get('vrf_rt') is None:
# if no VRF is specified use the pools implied VRF
p.vrf = args['from-pool'].vrf
else:
# use the specified VRF
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
# set type to default type of pool unless already set
if p.type is None:
if args['from-pool'].default_type is None:
print >> sys.stderr, "ERROR: Type not specified and no default-type specified for pool: %s" % opts['from-pool']
p.type = args['from-pool'].default_type
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
args['family'] = afi
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {})
def add_vrf(arg, opts, shell_opts):
""" Add VRF to NIPAP
"""
v = VRF()
v.rt = opts.get('rt')
v.name = opts.get('name')
v.description = opts.get('description')
v.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
try:
v.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add VRF to NIPAP: %s" % str(exc)
sys.exit(1)
print "Added %s" % (vrf_format(v))
def add_pool(arg, opts, shell_opts):
""" Add a pool.
"""
p = Pool()
p.name = opts.get('name')
p.description = opts.get('description')
p.default_type = opts.get('default-type')
p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length')
p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length')
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
try:
p.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add pool to NIPAP: %s" % str(exc)
sys.exit(1)
print "Pool '%s' created." % (p.name)
"""
VIEW FUNCTIONS
"""
def view_vrf(arg, opts, shell_opts):
""" View a single VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
print "-- VRF"
print " %-26s : %d" % ("ID", v.id)
print " %-26s : %s" % ("RT", v.rt)
print " %-26s : %s" % ("Name", v.name)
print " %-26s : %s" % ("Description", v.description)
print "-- Extra Attributes"
if v.avps is not None:
for key in sorted(v.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, v.avps[key])
print "-- Tags"
for tag_name in sorted(v.tags, key=lambda s: s.lower()):
print " %s" % tag_name
# statistics
if v.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(v.used_addresses_v4)/v.total_addresses_v4)*100
if v.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(v.used_addresses_v6)/v.total_addresses_v6)*100
print "-- Statistics"
print " %-26s : %s" % ("IPv4 prefixes", v.num_prefixes_v4)
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
v.used_addresses_v4, v.free_addresses_v4, used_percent_v4,
v.total_addresses_v4)
print " %-26s : %s" % ("IPv6 prefixes", v.num_prefixes_v6)
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
v.used_addresses_v6, v.free_addresses_v6, used_percent_v6,
v.total_addresses_v6)
def view_pool(arg, opts, shell_opts):
""" View a single pool
"""
res = Pool.list({ 'name': arg })
if len(res) == 0:
print "No pool with name '%s' found." % arg
return
p = res[0]
vrf_rt = None
vrf_name = None
if p.vrf:
vrf_rt = p.vrf.rt
vrf_name = p.vrf.name
print "-- Pool "
print " %-26s : %d" % ("ID", p.id)
print " %-26s : %s" % ("Name", p.name)
print " %-26s : %s" % ("Description", p.description)
print " %-26s : %s" % ("Default type", p.default_type)
print " %-26s : %s / %s" % ("Implied VRF RT / name", vrf_rt, vrf_name)
print " %-26s : %s / %s" % ("Preflen (v4/v6)", str(p.ipv4_default_prefix_length), str(p.ipv6_default_prefix_length))
print "-- Extra Attributes"
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, p.avps[key])
print "-- Tags"
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print " %s" % tag_name
# statistics
print "-- Statistics"
# total / used / free prefixes
if p.total_prefixes_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_prefixes_v4)/p.total_prefixes_v4)*100
if p.total_prefixes_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_prefixes_v6)/p.total_prefixes_v6)*100
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 prefixes Used / Free",
p.used_prefixes_v4, p.free_prefixes_v4, used_percent_v4,
p.total_prefixes_v4)
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 prefixes Used / Free",
p.used_prefixes_v6, p.free_prefixes_v6, used_percent_v6,
p.total_prefixes_v6)
# total / used / free addresses
if p.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_addresses_v4)/p.total_addresses_v4)*100
if p.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_addresses_v6)/p.total_addresses_v6)*100
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
p.used_addresses_v4, p.free_addresses_v4, used_percent_v4,
p.total_addresses_v4)
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
p.used_addresses_v6, p.free_addresses_v6, used_percent_v6,
p.total_addresses_v6)
print "\n-- Prefixes in pool - v4: %d v6: %d" % (p.member_prefixes_v4,
p.member_prefixes_v6)
res = Prefix.list({ 'pool_id': p.id})
for pref in res:
print " %s" % pref.display_prefix
def view_prefix(arg, opts, shell_opts):
""" View a single prefix.
"""
q = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
if v.rt != 'all':
q['vrf_rt'] = v.rt
res = Prefix.list(q)
if len(res) == 0:
vrf_text = 'any VRF'
if v.rt != 'all':
vrf_text = vrf_format(v)
print >> sys.stderr, "Address %s not found in %s." % (arg, vrf_text)
sys.exit(1)
p = res[0]
vrf = p.vrf.rt
print "-- Address "
print " %-26s : %s" % ("Prefix", p.prefix)
print " %-26s : %s" % ("Display prefix", p.display_prefix)
print " %-26s : %s" % ("Type", p.type)
print " %-26s : %s" % ("Status", p.status)
print " %-26s : IPv%s" % ("Family", p.family)
print " %-26s : %s" % ("VRF", vrf)
print " %-26s : %s" % ("Description", p.description)
print " %-26s : %s" % ("Node", p.node)
print " %-26s : %s" % ("Country", p.country)
print " %-26s : %s" % ("Order", p.order_id)
print " %-26s : %s" % ("Customer", p.customer_id)
print " %-26s : %s" % ("VLAN", p.vlan)
print " %-26s : %s" % ("Alarm priority", p.alarm_priority)
print " %-26s : %s" % ("Monitor", p.monitor)
print " %-26s : %s" % ("Added", p.added)
print " %-26s : %s" % ("Last modified", p.last_modified)
print " %-26s : %s" % ("Expires", p.expires or '-')
if p.family == 4:
print " %-26s : %s / %s (%.2f%% of %s)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
else:
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
print "-- Extra Attributes"
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, p.avps[key])
print "-- Tags"
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Inherited Tags"
for tag_name in sorted(p.inherited_tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Comment"
print p.comment or ''
"""
REMOVE FUNCTIONS
"""
def remove_vrf(arg, opts, shell_opts):
""" Remove VRF
"""
remove_confirmed = shell_opts.force
res = VRF.list({ 'rt': arg })
if len(res) < 1:
print >> sys.stderr, "VRF with [RT: %s] not found." % arg
sys.exit(1)
v = res[0]
if not remove_confirmed:
print "RT: %s\nName: %s\nDescription: %s" % (v.rt, v.name, v.description)
print "\nWARNING: THIS WILL REMOVE THE VRF INCLUDING ALL ITS ADDRESSES"
res = raw_input("Do you really want to remove %s? [y/N]: " % vrf_format(v))
if res == 'y':
remove_confirmed = True
else:
print "Operation canceled."
if remove_confirmed:
v.remove()
print "%s removed." % vrf_format(v)
def remove_pool(arg, opts, shell_opts):
""" Remove pool
"""
remove_confirmed = shell_opts.force
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
if not remove_confirmed:
res = raw_input("Do you really want to remove the pool '%s'? [y/N]: " % p.name)
if res == 'y':
remove_confirmed = True
else:
print "Operation canceled."
if remove_confirmed:
p.remove()
print "Pool '%s' removed." % p.name
def remove_prefix(arg, opts, shell_opts):
""" Remove prefix
"""
# set up some basic variables
remove_confirmed = shell_opts.force
auth_src = set()
recursive = False
if opts.get('recursive') is True:
recursive = True
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
if v.rt != 'all':
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) < 1:
vrf_text = 'any VRF'
if v.rt != 'all':
vrf_text = vrf_format(v)
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_text)
sys.exit(1)
p = res[0]
if p.authoritative_source != 'nipap':
auth_src.add(p.authoritative_source)
if not remove_confirmed:
if recursive is True or p.type == 'assignment':
# recursive delete
# get affected prefixes
query = {
'val1': 'prefix',
'operator': 'contained_within_equals',
'val2': p.prefix
}
# add VRF to query if we have one
if 'vrf_rt' in spec:
vrf_q = {
'val1': 'vrf_rt',
'operator': 'equals',
'val2': spec['vrf_rt']
}
query = {
'val1': query,
'operator': 'and',
'val2': vrf_q
}
pres = Prefix.search(query, { 'parents_depth': 0, 'max_result': 1200 })
# if recursive is False, this delete will fail, ask user to do recursive
# delete instead
if recursive is False:
if len(pres['result']) > 1:
print "WARNING: %s in %s contains %s hosts." % (p.prefix, vrf_format(p.vrf), len(pres['result']))
res = raw_input("Would you like to recursively delete %s and all hosts? [y/N]: " % (p.prefix))
if res.lower() in [ 'y', 'yes' ]:
recursive = True
else:
print >> sys.stderr, "ERROR: Removal of assignment containing hosts is prohibited. Aborting removal of %s in %s." % (p.prefix, vrf_format(p.vrf))
sys.exit(1)
if recursive is True:
if len(pres['result']) <= 1:
res = raw_input("Do you really want to remove the prefix %s in %s? [y/N]: " % (p.prefix, vrf_format(p.vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
else:
print "Recursively deleting %s in %s will delete the following prefixes:" % (p.prefix, vrf_format(p.vrf))
# Iterate prefixes to print a few of them and check the prefixes'
# authoritative source
i = 0
for rp in pres['result']:
if i <= 10:
print "%-29s %-2s %-19s %-14s %-14s %-40s" % ("".join(" " for i in
range(rp.indent)) + rp.display_prefix,
rp.type[0].upper(), rp.node, rp.order_id,
rp.customer_id, rp.description)
if i == 10:
print ".. and %s other prefixes" % (len(pres['result']) - 10)
if rp.authoritative_source != 'nipap':
auth_src.add(rp.authoritative_source)
i += 1
if len(auth_src) == 0:
# Simple case; all prefixes were added from NIPAP
res = raw_input("Do you really want to recursively remove %s prefixes in %s? [y/N]: " % (len(pres['result']),
vrf_format(vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
else:
# we have prefixes with authoritative source != nipap
auth_src = list(auth_src)
plural = ""
# format prompt depending on how many different sources we have
if len(auth_src) == 1:
systems = "'%s'" % auth_src[0]
prompt = "Enter the name of the managing system to continue or anything else to abort: "
else:
systems = ", ".join("'%s'" % x for x in auth_src[1:]) + " and '%s'" % auth_src[0]
plural = "s"
prompt = "Enter the name of the last managing system to continue or anything else to abort: "
print ("Prefix %s in %s contains prefixes managed by the system%s %s. " +
"Are you sure you want to remove them? ") % (p.prefix,
vrf_format(p.vrf), plural, systems)
res = raw_input(prompt)
# Did the user provide the correct answer?
if res.lower() == auth_src[0].lower():
remove_confirmed = True
else:
print >> sys.stderr, "System names did not match."
sys.exit(1)
else:
# non recursive delete
if len(auth_src) > 0:
auth_src = list(auth_src)
print ("Prefix %s in %s is managed by the system '%s'. " +
"Are you sure you want to remove it? ") % (p.prefix,
vrf_format(p.vrf), auth_src[0])
res = raw_input("Enter the name of the managing system to continue or anything else to abort: ")
if res.lower() == auth_src[0].lower():
remove_confirmed = True
else:
print >> sys.stderr, "System names did not match."
sys.exit(1)
else:
res = raw_input("Do you really want to remove the prefix %s in %s? [y/N]: " % (p.prefix, vrf_format(p.vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
if remove_confirmed is True:
p.remove(recursive = recursive)
if recursive is True:
print "Prefix %s and %s other prefixes in %s removed." % (p.prefix,
(len(pres['result']) - 1), vrf_format(p.vrf))
else:
print "Prefix %s in %s removed." % (p.prefix, vrf_format(p.vrf))
else:
print "Operation canceled."
"""
MODIFY FUNCTIONS
"""
def modify_vrf(arg, opts, shell_opts):
""" Modify a VRF with the options set in opts
"""
res = VRF.list({ 'rt': arg })
if len(res) < 1:
print >> sys.stderr, "VRF with [RT: %s] not found." % arg
sys.exit(1)
v = res[0]
if 'rt' in opts:
v.rt = opts['rt']
if 'name' in opts:
v.name = opts['name']
if 'description' in opts:
v.description = opts['description']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
v.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
v.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
v.save()
print "%s saved." % vrf_format(v)
def modify_pool(arg, opts, shell_opts):
""" Modify a pool with the options set in opts
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
if 'name' in opts:
p.name = opts['name']
if 'description' in opts:
p.description = opts['description']
if 'default-type' in opts:
p.default_type = opts['default-type']
if 'ipv4_default_prefix_length' in opts:
p.ipv4_default_prefix_length = opts['ipv4_default_prefix_length']
if 'ipv6_default_prefix_length' in opts:
p.ipv6_default_prefix_length = opts['ipv6_default_prefix_length']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
p.save()
print "Pool '%s' saved." % p.name
def grow_pool(arg, opts, shell_opts):
""" Expand a pool with the ranges set in opts
"""
if not pool:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
if not 'add' in opts:
print >> sys.stderr, "Please supply a prefix to add to pool '%s'" % pool.name
sys.exit(1)
# Figure out VRF.
# If pool already has a member prefix, implied_vrf will be set. Look for new
# prefix to add in the same vrf as implied_vrf.
# If pool has no members, then use get_vrf() to get vrf to search in for
# prefix to add.
if pool.vrf is not None:
v = pool.vrf
else:
v = get_vrf(opts.get('vrf_rt'), abort=True)
q = { 'prefix': opts['add'] }
if v.rt != 'all':
q['vrf_rt'] = v.rt
res = Prefix.list(q)
if len(res) == 0:
print >> sys.stderr, "No prefix found matching %s in %s." % (opts['add'], vrf_format(v))
sys.exit(1)
elif res[0].pool:
if res[0].pool == pool:
print >> sys.stderr, "Prefix %s in %s is already assigned to that pool." % (opts['add'], vrf_format(v))
else:
print >> sys.stderr, "Prefix %s in %s is already assigned to a different pool ('%s')." % (opts['add'], vrf_format(v), res[0].pool.name)
sys.exit(1)
res[0].pool = pool
res[0].save()
print "Prefix %s in %s added to pool '%s'." % (res[0].prefix, vrf_format(v), pool.name)
def shrink_pool(arg, opts, shell_opts):
""" Shrink a pool by removing the ranges in opts from it
"""
if not pool:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
if 'remove' in opts:
res = Prefix.list({'prefix': opts['remove'], 'pool_id': pool.id})
if len(res) == 0:
print >> sys.stderr, "Pool '%s' does not contain %s." % (pool.name,
opts['remove'])
sys.exit(1)
res[0].pool = None
res[0].save()
print "Prefix %s removed from pool '%s'." % (res[0].prefix, pool.name)
else:
print >> sys.stderr, "Please supply a prefix to add or remove to '%s':" % (
pool.name)
for pref in Prefix.list({'pool_id': pool.id}):
print " %s" % pref.prefix
def modify_prefix(arg, opts, shell_opts):
""" Modify the prefix 'arg' with the options 'opts'
"""
modify_confirmed = shell_opts.force
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
if 'prefix' in opts:
p.prefix = opts['prefix']
if 'description' in opts:
p.description = opts['description']
if 'comment' in opts:
p.comment = opts['comment']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
if 'node' in opts:
p.node = opts['node']
if 'type' in opts:
p.type = opts['type']
if 'status' in opts:
p.status = opts['status']
if 'country' in opts:
p.country = opts['country']
if 'order_id' in opts:
p.order_id = opts['order_id']
if 'customer_id' in opts:
p.customer_id = opts['customer_id']
if 'vlan' in opts:
p.vlan = opts['vlan']
if 'alarm_priority' in opts:
p.alarm_priority = opts['alarm_priority']
if 'monitor' in opts:
p.monitor = _str_to_bool(opts['monitor'])
if 'expires' in opts:
p.expires = opts['expires']
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
# Promt user if prefix has authoritative source != nipap
if not modify_confirmed and p.authoritative_source.lower() != 'nipap':
res = raw_input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " %
(p.prefix, vrf_format(p.vrf), p.authoritative_source))
# If the user declines, short-circuit...
if res.lower() not in [ 'y', 'yes' ]:
print "Operation aborted."
return
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def prefix_attr_add(arg, opts, shell_opts):
""" Add attributes to a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def prefix_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
for key in opts.get('extra-attribute', []):
if key not in p.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del p.avps[key]
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def vrf_attr_add(arg, opts, shell_opts):
""" Add attributes to a VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in v.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
v.avps[key] = value
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v)
def vrf_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for key in opts.get('extra-attribute', []):
if key not in v.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del v.avps[key]
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v)
def pool_attr_add(arg, opts, shell_opts):
""" Add attributes to a pool
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save pool changes: %s" % str(exc)
sys.exit(1)
print "Pool '%s' saved." % p.name
def pool_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
for key in opts.get('extra-attribute', []):
if key not in p.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del p.avps[key]
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save pool changes: %s" % str(exc)
sys.exit(1)
print "Pool '%s' saved." % p.name
"""
COMPLETION FUNCTIONS
"""
def _complete_string(key, haystack):
""" Returns valid string completions
Takes the string 'key' and compares it to each of the strings in
'haystack'. The ones which beginns with 'key' are returned as result.
"""
if len(key) == 0:
return haystack
match = []
for straw in haystack:
if string.find(straw, key) == 0:
match.append(straw)
return match
def complete_bool(arg):
""" Complete strings "true" and "false"
"""
return _complete_string(arg, valid_bools)
def complete_country(arg):
""" Complete country codes ("SE", "DE", ...)
"""
return _complete_string(arg, valid_countries)
def complete_family(arg):
""" Complete inet family ("ipv4", "ipv6")
"""
return _complete_string(arg, valid_families)
def complete_tags(arg):
""" Complete NIPAP prefix type
"""
search_string = '^'
if arg is not None:
search_string += arg
res = Tag.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for t in res['result']:
ret.append(t.name)
return ret
def complete_pool_members(arg):
""" Complete member prefixes of pool
"""
# pool should already be globally set
res = []
for member in Prefix.list({ 'pool_id': pool.id }):
res.append(member.prefix)
return _complete_string(arg, res)
def complete_prefix_type(arg):
""" Complete NIPAP prefix type
"""
return _complete_string(arg, valid_prefix_types)
def complete_prefix_status(arg):
""" Complete NIPAP prefix status
"""
return _complete_string(arg, valid_prefix_status)
def complete_priority(arg):
""" Complete NIPAP alarm priority
"""
return _complete_string(arg, valid_priorities)
def complete_node(arg):
""" Complete node hostname
This function is currently a bit special as it looks in the config file
for a command to use to complete a node hostname from an external
system.
It is configured by setting the config attribute "complete_node_cmd" to
a shell command. The string "%search_string%" in the command will be
replaced by the current search string.
"""
# get complete command from config
try:
cmd = cfg.get('global', 'complete_node_cmd')
except ConfigParser.NoOptionError:
return [ '', ]
cmd = re.sub('%search_string%', pipes.quote(arg), cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
res, err = p.communicate()
nodes = res.split('\n')
return nodes
def complete_pool_name(arg):
""" Returns list of matching pool names
"""
search_string = '^'
if arg is not None:
search_string += arg
res = Pool.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for p in res['result']:
ret.append(p.name)
return ret
def complete_vrf(arg):
""" Returns list of matching VRFs
"""
search_string = ''
if arg is not None:
search_string = '^%s' % arg
res = VRF.search({
'operator': 'regex_match',
'val1': 'rt',
'val2': search_string
}, { 'max_result': 100000 } )
ret = []
for v in res['result']:
ret.append(v.rt)
if re.match(search_string, 'none'):
ret.append('none')
return ret
def complete_vrf_virtual(arg):
""" Returns list of matching VRFs
Includes "virtual" VRF 'all' which is used in search
operations
"""
ret = complete_vrf(arg)
search_string = ''
if arg is not None:
search_string = '^%s' % arg
if re.match(search_string, 'all'):
ret.append('all')
return ret
""" The NIPAP command tree
"""
cmds = {
'type': 'command',
'children': {
'address': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_prefix,
'children': {
'add-hosts': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'comment': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'country': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_country,
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'family': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_family,
}
},
'status': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix status: %s' % ' | '.join(valid_prefix_status),
'content_type': unicode,
'complete': complete_prefix_status,
}
},
'type': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix type: reservation | assignment | host',
'content_type': unicode,
'complete': complete_prefix_type,
}
},
'from-pool': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_pool_name,
}
},
'from-prefix': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'node': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_node,
}
},
'order_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'customer_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'prefix': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'monitor': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_bool,
}
},
'vlan': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'alarm_priority': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_priority,
}
},
'expires': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
}
},
},
# list
'list': {
'type': 'command',
'exec': list_prefix,
'rest_argument': {
'type': 'value',
'content_type': unicode,
'description': 'Prefix',
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf_virtual,
},
}
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Prefix to edit',
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
},
'exec_immediately': get_vrf
},
'add': {
'type': 'command',
'exec': prefix_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': prefix_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_prefix,
'children': {
'comment': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'country': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_country,
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'family': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_family,
}
},
'status': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix status: %s' % ' | '.join(valid_prefix_status),
'content_type': unicode,
'complete': complete_prefix_status,
}
},
'type': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix type: reservation | assignment | host',
'content_type': unicode,
'complete': complete_prefix_type,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
'node': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_node,
}
},
'order_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'customer_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'prefix': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'monitor': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_bool,
}
},
'vlan': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'alarm_priority': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_priority,
}
},
'expires': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
}
}
}
}
},
# remove
'remove': {
'type': 'command',
'exec': remove_prefix,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Remove address'
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'recursive': {
'type': 'bool'
}
}
},
# view
'view': {
'type': 'command',
'exec': view_prefix,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Address to view'
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
}
}
}
},
# VRF commands
'vrf': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT'
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
}
}
},
# list
'list': {
'type': 'command',
'exec': list_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT',
'complete': complete_vrf,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
}
}
},
# view
'view': {
'exec': view_vrf,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
}
},
# remove
'remove': {
'exec': remove_vrf,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
},
'children': {
'add': {
'type': 'command',
'exec': vrf_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': vrf_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT'
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
}
}
}
}
},
# pool commands
'pool': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_pool,
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
# list
'list': {
'type': 'command',
'exec': list_pool,
'rest_argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool',
},
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'The implied VRF of the pool',
'complete': complete_vrf_virtual
}
}
}
},
# remove
'remove': {
'type': 'command',
'exec': remove_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
}
},
# resize
'resize': {
'type': 'command',
'exec_immediately': get_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
},
'children': {
'add': {
'type': 'option',
'exec': grow_pool,
'argument': {
'type': 'value',
'content_type': unicode,
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
}
},
'remove': {
'type': 'option',
'exec': shrink_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_pool_members,
}
}
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
},
'children': {
'add': {
'type': 'command',
'exec': pool_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': pool_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_pool,
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
}
}
},
# view
'view': {
'exec': view_pool,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
}
}
}
}
}
}
if __name__ == '__main__':
try:
cmd = Command(cmds, sys.argv[1::])
except ValueError as exc:
print >> sys.stderr, "Error: %s" % str(exc)
sys.exit(1)
# execute command
if cmd.exe is None:
print "Incomplete command specified"
print "valid completions: %s" % " ".join(cmd.next_values())
sys.exit(1)
try:
cmd.exe(cmd.arg, cmd.exe_options)
except NipapError as exc:
print >> sys.stderr, "Command failed:\n %s" % str(exc)
sys.exit(1)
cli: fix viewing of pool statistics
To calculate correct statistics there needs to be at least one member
prefix and the default prefix length need to be set for the address
family in question. When this is not the case, the CLI would throw a
traceback but by adding some checks (similar to the ones in the web UI)
we avoid division by None and instead display a message stating that
statistics aren't available and why.
I've added another issue (#693) to track the issue that the backend
calculates statistics differently for prefixes and addresses.
Fixes #672.
#! /usr/bin/env python
""" NIPAP shell command
A shell command to interact with NIPAP.
"""
import ConfigParser
import csv
import os
import pipes
import re
import shlex
import string
import subprocess
import sys
import pynipap
from pynipap import Pool, Prefix, Tag, VRF, NipapError
from command import Command
# definitions
valid_countries = [
'AT', 'DE', 'DK', 'EE', 'FI', 'FR',
'GB', 'HR', 'LT', 'LV', 'KZ', 'NL',
'RU', 'SE', 'US' ] # test test, fill up! :)
valid_prefix_types = [ 'host', 'reservation', 'assignment' ]
valid_prefix_status = [ 'assigned', 'reserved', 'quarantine' ]
valid_families = [ 'ipv4', 'ipv6', 'dual-stack' ]
valid_bools = [ 'true', 'false' ]
valid_priorities = [ 'warning', 'low', 'medium', 'high', 'critical' ]
# evil global vars
vrf = None
cfg = None
pool = None
def setup_connection():
""" Set up the global pynipap connection object
"""
# build XML-RPC URI
try:
pynipap.xmlrpc_uri = "http://%(username)s:%(password)s@%(hostname)s:%(port)s" % {
'username': cfg.get('global', 'username'),
'password': cfg.get('global', 'password'),
'hostname': cfg.get('global', 'hostname'),
'port' : cfg.get('global', 'port')
}
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
print >> sys.stderr, "Please define the username, password, hostname and port in your .nipaprc under the section 'global'"
sys.exit(1)
ao = pynipap.AuthOptions({
'authoritative_source': 'nipap',
'username': os.getenv('NIPAP_IMPERSONATE_USERNAME') or cfg.get('global', 'username'),
'full_name': os.getenv('NIPAP_IMPERSONATE_FULL_NAME'),
})
def vrf_format(vrf):
return "VRF '%s' [RT: %s]" % (vrf.name, vrf.rt or '-')
def get_pool(arg = None, opts = None, abort = False):
""" Returns pool to work with
Returns a pynipap.Pool object representing the pool we are working with.
"""
# yep, global variables are evil
global pool
try:
pool = Pool.list({ 'name': arg })[0]
except IndexError:
if abort:
print >> sys.stderr, "Pool '%s' not found." % str(arg)
sys.exit(1)
else:
pool = None
return pool
def get_vrf(arg = None, default_var = 'default_vrf_rt', abort = False):
""" Returns VRF to work in
Returns a pynipap.VRF object representing the VRF we are working
in. If there is a VRF set globally, return this. If not, fetch the
VRF named 'arg'. If 'arg' is None, fetch the default_vrf
attribute from the config file and return this VRF.
"""
# yep, global variables are evil
global vrf
# if there is a VRF set, return it
if vrf is not None:
return vrf
if arg is None:
# fetch default vrf
try:
vrf_rt = cfg.get('global', default_var)
except ConfigParser.NoOptionError:
# default to all VRFs
vrf_rt = 'all'
else:
vrf_rt = arg
if vrf_rt.lower() == 'all':
vrf = VRF()
vrf.rt = 'all'
else:
if vrf_rt.lower() in ('-', 'none'):
vrf_rt = None
try:
vrf = VRF.search({ 'val1': 'rt',
'operator': 'equals',
'val2': vrf_rt
})['result'][0]
except (KeyError, IndexError):
if abort:
print >> sys.stderr, "VRF with [RT: %s] not found." % str(vrf_rt)
sys.exit(1)
else:
vrf = False
return vrf
def _str_to_bool(arg):
""" Return True or False depending on input string
Parses the string 'arg' and returns True if it has the value "true",
False if it has the value "false" and throws an exception otherwise.
"""
if arg is None:
return False
if arg == 'true':
return True
elif arg == 'false':
return False
else:
raise ValueError('Only values true and false permitted')
"""
LIST FUNCTIONS
"""
def _expand_list_query(opts):
""" Parse a dict and return a valid query dict
Parses a dict containing object attributes and values and return a
valid NIPAP query dict which regex matches the values and AND:s
together all individual queries. The regex match is anchored in the
beginning of the string.
Example:
{
'name': 'cust',
'vrf': '123:2'
}
will be expanded to the query dict
{
'operator': 'and',
'val1': {
'operator': 'regex_match',
'val1': 'name',
'val2': '^cust'
},
'val2': {
'operator': 'regex_match',
'val1': 'rt',
'val2': '^123:2'
}
}
"""
# create list of query parts
query_parts = []
for key, val in opts.items():
# standard case
operator = 'regex_match'
val1 = key
val2 = "%s" % val
query_parts.append({
'operator': operator,
'val1': val1,
'val2': val2
})
# Sum all query parts to one query
query = {}
if len(query_parts) > 0:
query = query_parts[0]
if len(query_parts) > 1:
for query_part in query_parts[1:]:
query = {
'operator': 'and',
'val1': query_part,
'val2': query
}
return query
def list_pool(arg, opts, shell_opts):
""" List pools matching a search criteria
"""
search_string = ''
if type(arg) == list or type(arg) == tuple:
search_string = ' '.join(arg)
v = get_vrf(opts.get('vrf_rt'), default_var='default_list_vrf_rt', abort=True)
if v.rt == 'all':
vrf_q = None
else:
vrf_q = {
'operator': 'equals',
'val1': 'vrf_rt',
'val2': v.rt
}
offset = 0
limit = 100
while True:
res = Pool.smart_search(search_string, { 'offset': offset, 'max_result': limit }, vrf_q)
if offset == 0: # first time in loop?
if len(res['result']) == 0:
print "No matching pools found"
return
print "%-19s %-2s %-39s %-13s %-8s %s" % (
"Name", "#", "Description", "Default type", "4 / 6", "Implied VRF"
)
print "------------------------------------------------------------------------------------------------"
for p in res['result']:
if len(str(p.description)) > 38:
desc = p.description[0:34] + "..."
else:
desc = p.description
vrf_rt = '-'
vrf_name = '-'
if p.vrf is not None:
vrf_rt = p.vrf.rt or '-'
vrf_name = p.vrf.name
tags = '-'
if len(p.tags) > 0:
tags = "#%d" % (len(p.tags))
print "%-19s %-2s %-39s %-13s %-2s / %-3s [RT: %s] %s" % (
p.name, tags, desc, p.default_type,
str(p.ipv4_default_prefix_length or '-'),
str(p.ipv6_default_prefix_length or '-'),
vrf_rt, vrf_name
)
if len(res['result']) < limit:
break
offset += limit
def list_vrf(arg, opts, shell_opts):
""" List VRFs matching a search criteria
"""
# rt is a regexp match on the VRF RT but as most people don't expect to see
# 123:123 in the result when searching for '123:1', we anchor it per default
if 'rt' in opts:
opts['rt'] = '^' + opts['rt'] + '$'
query = _expand_list_query(opts)
offset = 0
limit = 100
while True:
res = VRF.search(query, { 'offset': offset, 'max_result': limit })
if offset == 0:
if len(res['result']) == 0:
print "No matching VRFs found."
return
print "%-16s %-22s %-2s %-40s" % ("VRF RT", "Name", "#", "Description")
print "--------------------------------------------------------------------------------"
for v in res['result']:
tags = '-'
if len(v.tags) > 0:
tags = '#%d' % len(v.tags)
if len(unicode(v.description)) > 100:
desc = v.description[0:97] + "..."
else:
desc = v.description
print "%-16s %-22s %-2s %-40s" % (v.rt or '-', v.name, tags, desc)
if len(res['result']) < limit:
break
offset += limit
def list_prefix(arg, opts, shell_opts):
""" List prefixes matching 'arg'
"""
search_string = ''
if type(arg) == list or type(arg) == tuple:
search_string = ' '.join(arg)
v = get_vrf(opts.get('vrf_rt'), default_var='default_list_vrf_rt', abort=True)
if v.rt == 'all':
vrf_text = 'any VRF'
vrf_q = None
else:
vrf_text = vrf_format(v)
vrf_q = {
'operator': 'equals',
'val1': 'vrf_rt',
'val2': v.rt
}
print "Searching for prefixes in %s..." % vrf_text
offset = 0
# small initial limit for "instant" result
limit = 50
min_indent = 0
while True:
res = Prefix.smart_search(search_string, { 'parents_depth': -1,
'include_neighbors': True, 'offset': offset, 'max_result': limit },
vrf_q)
if offset == 0: # first time in loop?
if len(res['result']) == 0:
print "No addresses matching '%s' found." % search_string
return
if shell_opts.show_interpretation:
print "Query interpretation:"
for interp in res['interpretation']:
text = interp['string']
if interp['interpretation'] == 'unclosed quote':
text = "%s: %s, please close quote!" % (interp['string'], interp['interpretation'])
text2 = "This is not a proper search term as it contains en uneven amount of quotes."
elif interp['attribute'] == 'tag' and interp['operator'] == 'equals_any':
text = "%s: %s must contain %s" % (interp['string'], interp['interpretation'], interp['string'])
text2 = "The tag(s) or inherited tag(s) must contain %s" % interp['string']
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contained_within_equals':
if 'strict_prefix' in interp and 'expanded' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['strict_prefix'])
text2 = "Prefix must be contained within %s, which is the base prefix of %s (automatically expanded from %s)." % (interp['strict_prefix'], interp['expanded'], interp['string'])
elif 'strict_prefix' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['strict_prefix'])
text2 = "Prefix must be contained within %s, which is the base prefix of %s." % (interp['strict_prefix'], interp['string'])
elif 'expanded' in interp:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['expanded'])
text2 = "Prefix must be contained within %s (automatically expanded from %s)." % (interp['expanded'], interp['string'])
else:
text = "%s: %s within %s" % (interp['string'],
interp['interpretation'],
interp['string'])
text2 = "Prefix must be contained within %s." % (interp['string'])
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contains_equals':
text = "%s: Prefix that contains %s" % (interp['string'],
interp['string'])
elif interp['attribute'] == 'prefix' and interp['operator'] == 'contains_equals':
text = "%s: %s equal to %s" % (interp['string'],
interp['interpretation'], interp['string'])
else:
text = "%s: %s matching %s" % (interp['string'], interp['interpretation'], interp['string'])
print " -", text
print " ", text2
# Guess the width of the prefix column by looking at the initial
# result set.
for p in res['result']:
indent = p.indent * 2 + len(p.prefix)
if indent > min_indent:
min_indent = indent
min_indent += 15
# print column headers
prefix_str = "%%-14s %%-%ds %%-1s %%-2s %%-19s %%-14s %%-14s %%-s" % min_indent
column_header = prefix_str % ('VRF', 'Prefix', '', '#', 'Node',
'Order', 'Customer', 'Description')
print column_header
print "".join("=" for i in xrange(len(column_header)))
for p in res['result']:
if p.display == False:
continue
try:
tags = '-'
if len(p.tags) > 0:
tags = '#%d' % len(p.tags)
print prefix_str % (p.vrf.rt or '-',
"".join(" " for i in xrange(p.indent)) + p.display_prefix,
p.type[0].upper(), tags, p.node, p.order_id,
p.customer_id, p.description
)
except UnicodeEncodeError, e:
print >> sys.stderr, "\nCrazy encoding for prefix %s\n" % p.prefix
if len(res['result']) < limit:
break
offset += limit
# let consecutive limit be higher to tax the XML-RPC backend less
limit = 200
"""
ADD FUNCTIONS
"""
def _prefix_from_opts(opts):
""" Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing
"""
p = Prefix()
p.prefix = opts.get('prefix')
p.type = opts.get('type')
p.description = opts.get('description')
p.node = opts.get('node')
p.country = opts.get('country')
p.order_id = opts.get('order_id')
p.customer_id = opts.get('customer_id')
p.alarm_priority = opts.get('alarm_priority')
p.comment = opts.get('comment')
p.monitor = _str_to_bool(opts.get('monitor'))
p.vlan = opts.get('vlan')
p.status = opts.get('status') or 'assigned' # default to assigned
p.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.expires = opts.get('expires')
return p
def add_prefix(arg, opts, shell_opts):
""" Add prefix to NIPAP
"""
# sanity checks
if 'from-pool' not in opts and 'from-prefix' not in opts and 'prefix' not in opts:
print >> sys.stderr, "ERROR: 'prefix', 'from-pool' or 'from-prefix' must be specified."
sys.exit(1)
if len([opt for opt in opts if opt in ['from-pool', 'from-prefix', 'prefix']]) > 1:
print >> sys.stderr, "ERROR: Use either assignment 'from-pool', 'from-prefix' or manual mode (using 'prefix')"
sys.exit(1)
if 'from-pool' in opts:
return add_prefix_from_pool(arg, opts)
args = {}
p = _prefix_from_opts(opts)
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
if 'from-prefix' in opts:
args['from-prefix'] = [ opts['from-prefix'], ]
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
if 'family' in opts:
if opts['family'] == 'ipv4':
family = 4
elif opts['family'] == 'ipv6':
family = 6
elif opts['family'] == 'dual-stack':
print >> sys.stderr, "ERROR: dual-stack mode only valid for from-pool assignments"
sys.exit(1)
args['family'] = family
# try to automatically figure out type for new prefix when not
# allocating from a pool
# get a list of prefixes that contain this prefix
vrf_id = 0
if p.vrf:
vrf_id = p.vrf.id
if 'from-prefix' in args:
parent_prefix = args['from-prefix'][0]
parent_op = 'equals'
else:
parent_prefix = opts.get('prefix').split('/')[0]
parent_op = 'contains'
# prefix must be a CIDR network, ie no bits set in host part, so we
# remove the prefix length part of the prefix as then the backend will
# assume all bits being set
auto_type_query = {
'val1': {
'val1' : 'prefix',
'operator' : parent_op,
'val2' : parent_prefix
},
'operator': 'and',
'val2': {
'val1' : 'vrf_id',
'operator' : 'equals',
'val2' : vrf_id
}
}
res = Prefix.search(auto_type_query, { })
# no results, ie the requested prefix is a top level prefix
if len(res['result']) == 0:
if p.type is None:
print >> sys.stderr, "ERROR: Type of prefix must be specified ('assignment' or 'reservation')."
sys.exit(1)
else:
# last prefix in list will be the parent of the new prefix
parent = res['result'][-1]
# if the parent is an assignment, we can assume the new prefix to be
# a host and act accordingly
if parent.type == 'assignment':
# automatically set type
if p.type is None:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically setting type 'host' for new prefix."
elif p.type == 'host':
pass
else:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically overriding specified type '%s' with type 'host' for new prefix." % p.type
p.type = 'host'
# if it's a manually specified prefix
if 'prefix' in opts:
# fiddle prefix length to all bits set
if parent.family == 4:
p.prefix = p.prefix.split('/')[0] + '/32'
else:
p.prefix = p.prefix.split('/')[0] + '/128'
# for from-prefix, we set prefix_length to host length
elif 'from-prefix' in opts:
if parent.family == 4:
args['prefix_length'] = 32
else:
args['prefix_length'] = 128
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {})
def add_prefix_from_pool(arg, opts):
""" Add prefix using from-pool to NIPAP
"""
args = {}
# sanity checking
if 'from-pool' in opts:
res = Pool.list({ 'name': opts['from-pool'] })
if len(res) == 0:
print >> sys.stderr, "No pool named '%s' found." % opts['from-pool']
sys.exit(1)
args['from-pool'] = res[0]
if 'family' not in opts:
print >> sys.stderr, "ERROR: You have to specify the address family."
sys.exit(1)
if opts['family'] == 'ipv4':
afis = [4]
elif opts['family'] == 'ipv6':
afis = [6]
elif opts['family'] == 'dual-stack':
afis = [4, 6]
if 'prefix_length' in opts:
print >> sys.stderr, "ERROR: 'prefix_length' can not be specified for 'dual-stack' assignment"
sys.exit(1)
else:
print >> sys.stderr, "ERROR: 'family' must be one of: %s" % " ".join(valid_families)
sys.exit(1)
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
for afi in afis:
p = _prefix_from_opts(opts)
if opts.get('vrf_rt') is None:
# if no VRF is specified use the pools implied VRF
p.vrf = args['from-pool'].vrf
else:
# use the specified VRF
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
# set type to default type of pool unless already set
if p.type is None:
if args['from-pool'].default_type is None:
print >> sys.stderr, "ERROR: Type not specified and no default-type specified for pool: %s" % opts['from-pool']
p.type = args['from-pool'].default_type
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
args['family'] = afi
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {})
def add_vrf(arg, opts, shell_opts):
""" Add VRF to NIPAP
"""
v = VRF()
v.rt = opts.get('rt')
v.name = opts.get('name')
v.description = opts.get('description')
v.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
try:
v.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add VRF to NIPAP: %s" % str(exc)
sys.exit(1)
print "Added %s" % (vrf_format(v))
def add_pool(arg, opts, shell_opts):
""" Add a pool.
"""
p = Pool()
p.name = opts.get('name')
p.description = opts.get('description')
p.default_type = opts.get('default-type')
p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length')
p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length')
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
try:
p.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add pool to NIPAP: %s" % str(exc)
sys.exit(1)
print "Pool '%s' created." % (p.name)
"""
VIEW FUNCTIONS
"""
def view_vrf(arg, opts, shell_opts):
""" View a single VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
print "-- VRF"
print " %-26s : %d" % ("ID", v.id)
print " %-26s : %s" % ("RT", v.rt)
print " %-26s : %s" % ("Name", v.name)
print " %-26s : %s" % ("Description", v.description)
print "-- Extra Attributes"
if v.avps is not None:
for key in sorted(v.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, v.avps[key])
print "-- Tags"
for tag_name in sorted(v.tags, key=lambda s: s.lower()):
print " %s" % tag_name
# statistics
if v.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(v.used_addresses_v4)/v.total_addresses_v4)*100
if v.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(v.used_addresses_v6)/v.total_addresses_v6)*100
print "-- Statistics"
print " %-26s : %s" % ("IPv4 prefixes", v.num_prefixes_v4)
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
v.used_addresses_v4, v.free_addresses_v4, used_percent_v4,
v.total_addresses_v4)
print " %-26s : %s" % ("IPv6 prefixes", v.num_prefixes_v6)
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
v.used_addresses_v6, v.free_addresses_v6, used_percent_v6,
v.total_addresses_v6)
def view_pool(arg, opts, shell_opts):
""" View a single pool
"""
res = Pool.list({ 'name': arg })
if len(res) == 0:
print "No pool with name '%s' found." % arg
return
p = res[0]
vrf_rt = None
vrf_name = None
if p.vrf:
vrf_rt = p.vrf.rt
vrf_name = p.vrf.name
print "-- Pool "
print " %-26s : %d" % ("ID", p.id)
print " %-26s : %s" % ("Name", p.name)
print " %-26s : %s" % ("Description", p.description)
print " %-26s : %s" % ("Default type", p.default_type)
print " %-26s : %s / %s" % ("Implied VRF RT / name", vrf_rt, vrf_name)
print " %-26s : %s / %s" % ("Preflen (v4/v6)", str(p.ipv4_default_prefix_length), str(p.ipv6_default_prefix_length))
print "-- Extra Attributes"
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, p.avps[key])
print "-- Tags"
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print " %s" % tag_name
# statistics
print "-- Statistics"
# IPv4 total / used / free prefixes
if p.member_prefixes_v4 == 0:
print " IPv4 prefixes Used / Free : N/A (No IPv4 member prefixes)"
elif p.ipv4_default_prefix_length is None:
print " IPv4 prefixes Used / Free : N/A (IPv4 default prefix length is not set)"
else:
if p.total_prefixes_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_prefixes_v4)/p.total_prefixes_v4)*100
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 prefixes Used / Free",
p.used_prefixes_v4, p.free_prefixes_v4, used_percent_v4,
p.total_prefixes_v4)
# IPv6 total / used / free prefixes
if p.member_prefixes_v6 == 0:
print " IPv6 prefixes Used / Free : N/A (No IPv6 member prefixes)"
elif p.ipv6_default_prefix_length is None:
print " IPv6 prefixes Used / Free : N/A (IPv6 default prefix length is not set)"
else:
if p.total_prefixes_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_prefixes_v6)/p.total_prefixes_v6)*100
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 prefixes Used / Free",
p.used_prefixes_v6, p.free_prefixes_v6, used_percent_v6,
p.total_prefixes_v6)
# IPv4 total / used / free addresses
if p.member_prefixes_v4 == 0:
print " IPv4 addresses Used / Free : N/A (No IPv4 member prefixes)"
elif p.ipv4_default_prefix_length is None:
print " IPv4 addresses Used / Free : N/A (IPv4 default prefix length is not set)"
else:
if p.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_addresses_v4)/p.total_addresses_v4)*100
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
p.used_addresses_v4, p.free_addresses_v4, used_percent_v4,
p.total_addresses_v4)
# IPv6 total / used / free addresses
if p.member_prefixes_v6 == 0:
print " IPv6 addresses Used / Free : N/A (No IPv6 member prefixes)"
elif p.ipv6_default_prefix_length is None:
print " IPv6 addresses Used / Free : N/A (IPv6 default prefix length is not set)"
else:
if p.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_addresses_v6)/p.total_addresses_v6)*100
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
p.used_addresses_v6, p.free_addresses_v6, used_percent_v6,
p.total_addresses_v6)
print "\n-- Prefixes in pool - v4: %d v6: %d" % (p.member_prefixes_v4,
p.member_prefixes_v6)
res = Prefix.list({ 'pool_id': p.id})
for pref in res:
print " %s" % pref.display_prefix
def view_prefix(arg, opts, shell_opts):
""" View a single prefix.
"""
q = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
if v.rt != 'all':
q['vrf_rt'] = v.rt
res = Prefix.list(q)
if len(res) == 0:
vrf_text = 'any VRF'
if v.rt != 'all':
vrf_text = vrf_format(v)
print >> sys.stderr, "Address %s not found in %s." % (arg, vrf_text)
sys.exit(1)
p = res[0]
vrf = p.vrf.rt
print "-- Address "
print " %-26s : %s" % ("Prefix", p.prefix)
print " %-26s : %s" % ("Display prefix", p.display_prefix)
print " %-26s : %s" % ("Type", p.type)
print " %-26s : %s" % ("Status", p.status)
print " %-26s : IPv%s" % ("Family", p.family)
print " %-26s : %s" % ("VRF", vrf)
print " %-26s : %s" % ("Description", p.description)
print " %-26s : %s" % ("Node", p.node)
print " %-26s : %s" % ("Country", p.country)
print " %-26s : %s" % ("Order", p.order_id)
print " %-26s : %s" % ("Customer", p.customer_id)
print " %-26s : %s" % ("VLAN", p.vlan)
print " %-26s : %s" % ("Alarm priority", p.alarm_priority)
print " %-26s : %s" % ("Monitor", p.monitor)
print " %-26s : %s" % ("Added", p.added)
print " %-26s : %s" % ("Last modified", p.last_modified)
print " %-26s : %s" % ("Expires", p.expires or '-')
if p.family == 4:
print " %-26s : %s / %s (%.2f%% of %s)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
else:
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
print "-- Extra Attributes"
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, p.avps[key])
print "-- Tags"
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Inherited Tags"
for tag_name in sorted(p.inherited_tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Comment"
print p.comment or ''
"""
REMOVE FUNCTIONS
"""
def remove_vrf(arg, opts, shell_opts):
""" Remove VRF
"""
remove_confirmed = shell_opts.force
res = VRF.list({ 'rt': arg })
if len(res) < 1:
print >> sys.stderr, "VRF with [RT: %s] not found." % arg
sys.exit(1)
v = res[0]
if not remove_confirmed:
print "RT: %s\nName: %s\nDescription: %s" % (v.rt, v.name, v.description)
print "\nWARNING: THIS WILL REMOVE THE VRF INCLUDING ALL ITS ADDRESSES"
res = raw_input("Do you really want to remove %s? [y/N]: " % vrf_format(v))
if res == 'y':
remove_confirmed = True
else:
print "Operation canceled."
if remove_confirmed:
v.remove()
print "%s removed." % vrf_format(v)
def remove_pool(arg, opts, shell_opts):
""" Remove pool
"""
remove_confirmed = shell_opts.force
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
if not remove_confirmed:
res = raw_input("Do you really want to remove the pool '%s'? [y/N]: " % p.name)
if res == 'y':
remove_confirmed = True
else:
print "Operation canceled."
if remove_confirmed:
p.remove()
print "Pool '%s' removed." % p.name
def remove_prefix(arg, opts, shell_opts):
""" Remove prefix
"""
# set up some basic variables
remove_confirmed = shell_opts.force
auth_src = set()
recursive = False
if opts.get('recursive') is True:
recursive = True
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
if v.rt != 'all':
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) < 1:
vrf_text = 'any VRF'
if v.rt != 'all':
vrf_text = vrf_format(v)
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_text)
sys.exit(1)
p = res[0]
if p.authoritative_source != 'nipap':
auth_src.add(p.authoritative_source)
if not remove_confirmed:
if recursive is True or p.type == 'assignment':
# recursive delete
# get affected prefixes
query = {
'val1': 'prefix',
'operator': 'contained_within_equals',
'val2': p.prefix
}
# add VRF to query if we have one
if 'vrf_rt' in spec:
vrf_q = {
'val1': 'vrf_rt',
'operator': 'equals',
'val2': spec['vrf_rt']
}
query = {
'val1': query,
'operator': 'and',
'val2': vrf_q
}
pres = Prefix.search(query, { 'parents_depth': 0, 'max_result': 1200 })
# if recursive is False, this delete will fail, ask user to do recursive
# delete instead
if recursive is False:
if len(pres['result']) > 1:
print "WARNING: %s in %s contains %s hosts." % (p.prefix, vrf_format(p.vrf), len(pres['result']))
res = raw_input("Would you like to recursively delete %s and all hosts? [y/N]: " % (p.prefix))
if res.lower() in [ 'y', 'yes' ]:
recursive = True
else:
print >> sys.stderr, "ERROR: Removal of assignment containing hosts is prohibited. Aborting removal of %s in %s." % (p.prefix, vrf_format(p.vrf))
sys.exit(1)
if recursive is True:
if len(pres['result']) <= 1:
res = raw_input("Do you really want to remove the prefix %s in %s? [y/N]: " % (p.prefix, vrf_format(p.vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
else:
print "Recursively deleting %s in %s will delete the following prefixes:" % (p.prefix, vrf_format(p.vrf))
# Iterate prefixes to print a few of them and check the prefixes'
# authoritative source
i = 0
for rp in pres['result']:
if i <= 10:
print "%-29s %-2s %-19s %-14s %-14s %-40s" % ("".join(" " for i in
range(rp.indent)) + rp.display_prefix,
rp.type[0].upper(), rp.node, rp.order_id,
rp.customer_id, rp.description)
if i == 10:
print ".. and %s other prefixes" % (len(pres['result']) - 10)
if rp.authoritative_source != 'nipap':
auth_src.add(rp.authoritative_source)
i += 1
if len(auth_src) == 0:
# Simple case; all prefixes were added from NIPAP
res = raw_input("Do you really want to recursively remove %s prefixes in %s? [y/N]: " % (len(pres['result']),
vrf_format(vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
else:
# we have prefixes with authoritative source != nipap
auth_src = list(auth_src)
plural = ""
# format prompt depending on how many different sources we have
if len(auth_src) == 1:
systems = "'%s'" % auth_src[0]
prompt = "Enter the name of the managing system to continue or anything else to abort: "
else:
systems = ", ".join("'%s'" % x for x in auth_src[1:]) + " and '%s'" % auth_src[0]
plural = "s"
prompt = "Enter the name of the last managing system to continue or anything else to abort: "
print ("Prefix %s in %s contains prefixes managed by the system%s %s. " +
"Are you sure you want to remove them? ") % (p.prefix,
vrf_format(p.vrf), plural, systems)
res = raw_input(prompt)
# Did the user provide the correct answer?
if res.lower() == auth_src[0].lower():
remove_confirmed = True
else:
print >> sys.stderr, "System names did not match."
sys.exit(1)
else:
# non recursive delete
if len(auth_src) > 0:
auth_src = list(auth_src)
print ("Prefix %s in %s is managed by the system '%s'. " +
"Are you sure you want to remove it? ") % (p.prefix,
vrf_format(p.vrf), auth_src[0])
res = raw_input("Enter the name of the managing system to continue or anything else to abort: ")
if res.lower() == auth_src[0].lower():
remove_confirmed = True
else:
print >> sys.stderr, "System names did not match."
sys.exit(1)
else:
res = raw_input("Do you really want to remove the prefix %s in %s? [y/N]: " % (p.prefix, vrf_format(p.vrf)))
if res.lower() in [ 'y', 'yes' ]:
remove_confirmed = True
if remove_confirmed is True:
p.remove(recursive = recursive)
if recursive is True:
print "Prefix %s and %s other prefixes in %s removed." % (p.prefix,
(len(pres['result']) - 1), vrf_format(p.vrf))
else:
print "Prefix %s in %s removed." % (p.prefix, vrf_format(p.vrf))
else:
print "Operation canceled."
"""
MODIFY FUNCTIONS
"""
def modify_vrf(arg, opts, shell_opts):
""" Modify a VRF with the options set in opts
"""
res = VRF.list({ 'rt': arg })
if len(res) < 1:
print >> sys.stderr, "VRF with [RT: %s] not found." % arg
sys.exit(1)
v = res[0]
if 'rt' in opts:
v.rt = opts['rt']
if 'name' in opts:
v.name = opts['name']
if 'description' in opts:
v.description = opts['description']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
v.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
v.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
v.save()
print "%s saved." % vrf_format(v)
def modify_pool(arg, opts, shell_opts):
""" Modify a pool with the options set in opts
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
if 'name' in opts:
p.name = opts['name']
if 'description' in opts:
p.description = opts['description']
if 'default-type' in opts:
p.default_type = opts['default-type']
if 'ipv4_default_prefix_length' in opts:
p.ipv4_default_prefix_length = opts['ipv4_default_prefix_length']
if 'ipv6_default_prefix_length' in opts:
p.ipv6_default_prefix_length = opts['ipv6_default_prefix_length']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
p.save()
print "Pool '%s' saved." % p.name
def grow_pool(arg, opts, shell_opts):
""" Expand a pool with the ranges set in opts
"""
if not pool:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
if not 'add' in opts:
print >> sys.stderr, "Please supply a prefix to add to pool '%s'" % pool.name
sys.exit(1)
# Figure out VRF.
# If pool already has a member prefix, implied_vrf will be set. Look for new
# prefix to add in the same vrf as implied_vrf.
# If pool has no members, then use get_vrf() to get vrf to search in for
# prefix to add.
if pool.vrf is not None:
v = pool.vrf
else:
v = get_vrf(opts.get('vrf_rt'), abort=True)
q = { 'prefix': opts['add'] }
if v.rt != 'all':
q['vrf_rt'] = v.rt
res = Prefix.list(q)
if len(res) == 0:
print >> sys.stderr, "No prefix found matching %s in %s." % (opts['add'], vrf_format(v))
sys.exit(1)
elif res[0].pool:
if res[0].pool == pool:
print >> sys.stderr, "Prefix %s in %s is already assigned to that pool." % (opts['add'], vrf_format(v))
else:
print >> sys.stderr, "Prefix %s in %s is already assigned to a different pool ('%s')." % (opts['add'], vrf_format(v), res[0].pool.name)
sys.exit(1)
res[0].pool = pool
res[0].save()
print "Prefix %s in %s added to pool '%s'." % (res[0].prefix, vrf_format(v), pool.name)
def shrink_pool(arg, opts, shell_opts):
""" Shrink a pool by removing the ranges in opts from it
"""
if not pool:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
if 'remove' in opts:
res = Prefix.list({'prefix': opts['remove'], 'pool_id': pool.id})
if len(res) == 0:
print >> sys.stderr, "Pool '%s' does not contain %s." % (pool.name,
opts['remove'])
sys.exit(1)
res[0].pool = None
res[0].save()
print "Prefix %s removed from pool '%s'." % (res[0].prefix, pool.name)
else:
print >> sys.stderr, "Please supply a prefix to add or remove to '%s':" % (
pool.name)
for pref in Prefix.list({'pool_id': pool.id}):
print " %s" % pref.prefix
def modify_prefix(arg, opts, shell_opts):
""" Modify the prefix 'arg' with the options 'opts'
"""
modify_confirmed = shell_opts.force
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
if 'prefix' in opts:
p.prefix = opts['prefix']
if 'description' in opts:
p.description = opts['description']
if 'comment' in opts:
p.comment = opts['comment']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
if 'node' in opts:
p.node = opts['node']
if 'type' in opts:
p.type = opts['type']
if 'status' in opts:
p.status = opts['status']
if 'country' in opts:
p.country = opts['country']
if 'order_id' in opts:
p.order_id = opts['order_id']
if 'customer_id' in opts:
p.customer_id = opts['customer_id']
if 'vlan' in opts:
p.vlan = opts['vlan']
if 'alarm_priority' in opts:
p.alarm_priority = opts['alarm_priority']
if 'monitor' in opts:
p.monitor = _str_to_bool(opts['monitor'])
if 'expires' in opts:
p.expires = opts['expires']
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
# Promt user if prefix has authoritative source != nipap
if not modify_confirmed and p.authoritative_source.lower() != 'nipap':
res = raw_input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " %
(p.prefix, vrf_format(p.vrf), p.authoritative_source))
# If the user declines, short-circuit...
if res.lower() not in [ 'y', 'yes' ]:
print "Operation aborted."
return
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def prefix_attr_add(arg, opts, shell_opts):
""" Add attributes to a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def prefix_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
for key in opts.get('extra-attribute', []):
if key not in p.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del p.avps[key]
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))
def vrf_attr_add(arg, opts, shell_opts):
""" Add attributes to a VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in v.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
v.avps[key] = value
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v)
def vrf_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for key in opts.get('extra-attribute', []):
if key not in v.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del v.avps[key]
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v)
def pool_attr_add(arg, opts, shell_opts):
""" Add attributes to a pool
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save pool changes: %s" % str(exc)
sys.exit(1)
print "Pool '%s' saved." % p.name
def pool_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
for key in opts.get('extra-attribute', []):
if key not in p.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del p.avps[key]
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save pool changes: %s" % str(exc)
sys.exit(1)
print "Pool '%s' saved." % p.name
"""
COMPLETION FUNCTIONS
"""
def _complete_string(key, haystack):
""" Returns valid string completions
Takes the string 'key' and compares it to each of the strings in
'haystack'. The ones which beginns with 'key' are returned as result.
"""
if len(key) == 0:
return haystack
match = []
for straw in haystack:
if string.find(straw, key) == 0:
match.append(straw)
return match
def complete_bool(arg):
""" Complete strings "true" and "false"
"""
return _complete_string(arg, valid_bools)
def complete_country(arg):
""" Complete country codes ("SE", "DE", ...)
"""
return _complete_string(arg, valid_countries)
def complete_family(arg):
""" Complete inet family ("ipv4", "ipv6")
"""
return _complete_string(arg, valid_families)
def complete_tags(arg):
""" Complete NIPAP prefix type
"""
search_string = '^'
if arg is not None:
search_string += arg
res = Tag.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for t in res['result']:
ret.append(t.name)
return ret
def complete_pool_members(arg):
""" Complete member prefixes of pool
"""
# pool should already be globally set
res = []
for member in Prefix.list({ 'pool_id': pool.id }):
res.append(member.prefix)
return _complete_string(arg, res)
def complete_prefix_type(arg):
""" Complete NIPAP prefix type
"""
return _complete_string(arg, valid_prefix_types)
def complete_prefix_status(arg):
""" Complete NIPAP prefix status
"""
return _complete_string(arg, valid_prefix_status)
def complete_priority(arg):
""" Complete NIPAP alarm priority
"""
return _complete_string(arg, valid_priorities)
def complete_node(arg):
""" Complete node hostname
This function is currently a bit special as it looks in the config file
for a command to use to complete a node hostname from an external
system.
It is configured by setting the config attribute "complete_node_cmd" to
a shell command. The string "%search_string%" in the command will be
replaced by the current search string.
"""
# get complete command from config
try:
cmd = cfg.get('global', 'complete_node_cmd')
except ConfigParser.NoOptionError:
return [ '', ]
cmd = re.sub('%search_string%', pipes.quote(arg), cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
res, err = p.communicate()
nodes = res.split('\n')
return nodes
def complete_pool_name(arg):
""" Returns list of matching pool names
"""
search_string = '^'
if arg is not None:
search_string += arg
res = Pool.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for p in res['result']:
ret.append(p.name)
return ret
def complete_vrf(arg):
""" Returns list of matching VRFs
"""
search_string = ''
if arg is not None:
search_string = '^%s' % arg
res = VRF.search({
'operator': 'regex_match',
'val1': 'rt',
'val2': search_string
}, { 'max_result': 100000 } )
ret = []
for v in res['result']:
ret.append(v.rt)
if re.match(search_string, 'none'):
ret.append('none')
return ret
def complete_vrf_virtual(arg):
""" Returns list of matching VRFs
Includes "virtual" VRF 'all' which is used in search
operations
"""
ret = complete_vrf(arg)
search_string = ''
if arg is not None:
search_string = '^%s' % arg
if re.match(search_string, 'all'):
ret.append('all')
return ret
""" The NIPAP command tree
"""
cmds = {
'type': 'command',
'children': {
'address': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_prefix,
'children': {
'add-hosts': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'comment': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'country': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_country,
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'family': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_family,
}
},
'status': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix status: %s' % ' | '.join(valid_prefix_status),
'content_type': unicode,
'complete': complete_prefix_status,
}
},
'type': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix type: reservation | assignment | host',
'content_type': unicode,
'complete': complete_prefix_type,
}
},
'from-pool': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_pool_name,
}
},
'from-prefix': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'node': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_node,
}
},
'order_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'customer_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'prefix': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'monitor': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_bool,
}
},
'vlan': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'alarm_priority': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_priority,
}
},
'expires': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
}
},
},
# list
'list': {
'type': 'command',
'exec': list_prefix,
'rest_argument': {
'type': 'value',
'content_type': unicode,
'description': 'Prefix',
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf_virtual,
},
}
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Prefix to edit',
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
},
'exec_immediately': get_vrf
},
'add': {
'type': 'command',
'exec': prefix_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': prefix_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_prefix,
'children': {
'comment': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'country': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_country,
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'family': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_family,
}
},
'status': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix status: %s' % ' | '.join(valid_prefix_status),
'content_type': unicode,
'complete': complete_prefix_status,
}
},
'type': {
'type': 'option',
'argument': {
'type': 'value',
'description': 'Prefix type: reservation | assignment | host',
'content_type': unicode,
'complete': complete_prefix_type,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
'node': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_node,
}
},
'order_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'customer_id': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'prefix': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'monitor': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_bool,
}
},
'vlan': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int
}
},
'alarm_priority': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_priority,
}
},
'expires': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
}
}
}
}
}
},
# remove
'remove': {
'type': 'command',
'exec': remove_prefix,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Remove address'
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
'recursive': {
'type': 'bool'
}
}
},
# view
'view': {
'type': 'command',
'exec': view_prefix,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Address to view'
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
}
}
}
},
# VRF commands
'vrf': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT'
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
}
}
},
# list
'list': {
'type': 'command',
'exec': list_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT',
'complete': complete_vrf,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
}
}
},
# view
'view': {
'exec': view_vrf,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
}
},
# remove
'remove': {
'exec': remove_vrf,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF',
'complete': complete_vrf,
},
'children': {
'add': {
'type': 'command',
'exec': vrf_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': vrf_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_vrf,
'children': {
'rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF RT'
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'VRF name',
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Description of the VRF'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
}
}
}
}
},
# pool commands
'pool': {
'type': 'command',
'children': {
# add
'add': {
'type': 'command',
'exec': add_pool,
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
# list
'list': {
'type': 'command',
'exec': list_pool,
'rest_argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool',
},
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'The implied VRF of the pool',
'complete': complete_vrf_virtual
}
}
}
},
# remove
'remove': {
'type': 'command',
'exec': remove_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
}
},
# resize
'resize': {
'type': 'command',
'exec_immediately': get_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
},
'children': {
'add': {
'type': 'option',
'exec': grow_pool,
'argument': {
'type': 'value',
'content_type': unicode,
},
'children': {
'vrf_rt': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_vrf,
}
},
}
},
'remove': {
'type': 'option',
'exec': shrink_pool,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_pool_members,
}
}
}
},
# modify
'modify': {
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
},
'children': {
'add': {
'type': 'command',
'exec': pool_attr_add,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'remove': {
'type': 'command',
'exec': pool_attr_remove,
'children': {
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
},
'set': {
'type': 'command',
'exec': modify_pool,
'children': {
'default-type': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Default prefix type: reservation | assignment | host',
'complete': complete_prefix_type,
}
},
'name': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'Name of the pool'
}
},
'description': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': unicode,
'descripton': 'A short description of the pool'
}
},
'ipv4_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv4 prefix length'
}
},
'ipv6_default_prefix_length': {
'type': 'option',
'argument': {
'type': 'value',
'content_type': int,
'descripton': 'Default IPv6 prefix length'
}
},
'tags': {
'type': 'option',
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
'complete': complete_tags,
}
},
'extra-attribute': {
'type': 'option',
'multiple': True,
'content_type': unicode,
'argument': {
'type': 'value',
'content_type': unicode,
},
},
}
}
}
},
# view
'view': {
'exec': view_pool,
'type': 'command',
'argument': {
'type': 'value',
'content_type': unicode,
'description': 'Pool name',
'complete': complete_pool_name,
}
}
}
}
}
}
if __name__ == '__main__':
try:
cmd = Command(cmds, sys.argv[1::])
except ValueError as exc:
print >> sys.stderr, "Error: %s" % str(exc)
sys.exit(1)
# execute command
if cmd.exe is None:
print "Incomplete command specified"
print "valid completions: %s" % " ".join(cmd.next_values())
sys.exit(1)
try:
cmd.exe(cmd.arg, cmd.exe_options)
except NipapError as exc:
print >> sys.stderr, "Command failed:\n %s" % str(exc)
sys.exit(1)
|
import spotify
import random
import logging
def get_login_details():
file = open('.authentication', 'r')
csv = file.read()
details = csv.split(',')
file.close()
return details
logging.basicConfig(level=logging.DEBUG)
login = get_login_details()
print(login[0])
print(login[1])
session = spotify.Session()
audio = spotify.AlsaSink(session)
loop = spotify.EventLoop(session)
loop.start()
session.login(login[0], login[1])
# Do nothing until logged in
while session.connection.state != spotify.ConnectionState.LOGGED_IN:
session.process_events()
playlist = session.get_playlist('spotify:user:1154159617:playlist:64Dmb6PS1Rr4WT3XRF2imE')
playlist.load()
# Pick track
track_number = random.randint(0, (len(playlist.tracks)-1))
print(track_number)
track = playlist.tracks[track_number]
track.load()
print(track.name)
session.player.load(track)
session.player.play()
Added try/catch statement at end of file which waits for playback to complete before exiting. This is because spotify plays on a different thread.
import spotify
import random
import logging
import threading
def get_login_details():
file = open('.authentication', 'r')
csv = file.read()
details = csv.split(',')
file.close()
return details
#logging.basicConfig(level=logging.DEBUG)
login = get_login_details()
logged_in_event = threading.Event()
end_of_track = threading.Event()
def connection_state_listener(session):
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
logged_in_event.set()
def on_end_of_track(self):
end_of_track.set()
session = spotify.Session()
loop = spotify.EventLoop(session)
loop.start()
session.on(spotify.SessionEvent.CONNECTION_STATE_UPDATED, connection_state_listener)
session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track)
session.login(login[0], login[1])
logged_in_event.wait()
audio = spotify.AlsaSink(session)
playlist = session.get_playlist('spotify:user:1154159617:playlist:64Dmb6PS1Rr4WT3XRF2imE')
playlist.load()
# Pick track
track_number = random.randint(0, (len(playlist.tracks)-1))
track = playlist.tracks[track_number]
track.load()
print(str(track_number) + ": " + track.name)
session.player.load(track)
session.player.play()
try:
while not end_of_track.wait(0.1):
pass
except KeyboardInterrupt:
pass |
#!/usr/bin/python3 -i
#
# Copyright (c) 2017 The Khronos Group Inc.
# Copyright (c) 2017 Valve Corporation
# Copyright (c) 2017 LunarG, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mark Young <marky@lunarg.com>
import re
import sys
from automatic_source_generator import (AutomaticSourceGeneratorOptions,
AutomaticSourceOutputGenerator,
regSortFeatures, write)
# The following commands should not be generated for the layer
VALID_USAGE_DONT_GEN = [
'xrEnumerateApiLayerProperties',
'xrEnumerateInstanceExtensionProperties',
]
# The following commands have a manually defined component to them.
VALID_USAGE_MANUALLY_DEFINED = [
'xrCreateInstance',
'xrDestroyInstance',
'xrCreateSession',
# We manually implement some of the XR_EXT_debug_utils entry-points so that we
# can return validation messages to known debug utils messengers
'xrSetDebugUtilsObjectNameEXT',
'xrCreateDebugUtilsMessengerEXT',
'xrDestroyDebugUtilsMessengerEXT',
'xrSessionBeginDebugUtilsLabelRegionEXT',
'xrSessionEndDebugUtilsLabelRegionEXT',
'xrSessionInsertDebugUtilsLabelEXT',
]
# ValidationSourceGeneratorOptions - subclass of AutomaticSourceGeneratorOptions.
class ValidationSourceGeneratorOptions(AutomaticSourceGeneratorOptions):
def __init__(self,
filename=None,
directory='.',
apiname=None,
profile=None,
versions='.*',
emitversions='.*',
defaultExtensions=None,
addExtensions=None,
removeExtensions=None,
emitExtensions=None,
sortProcedure=regSortFeatures,
prefixText="",
genFuncPointers=True,
protectFile=True,
protectFeature=True,
protectProto=None,
protectProtoStr=None,
apicall='',
apientry='',
apientryp='',
indentFuncProto=True,
indentFuncPointer=False,
alignFuncParam=0,
genEnumBeginEndRange=False):
AutomaticSourceGeneratorOptions.__init__(self, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions,
emitExtensions, sortProcedure)
# ValidationSourceOutputGenerator - subclass of AutomaticSourceOutputGenerator.
class ValidationSourceOutputGenerator(AutomaticSourceOutputGenerator):
"""Generate core validation layer source using XML element attributes from registry"""
def __init__(self,
errFile=sys.stderr,
warnFile=sys.stderr,
diagFile=sys.stdout):
AutomaticSourceOutputGenerator.__init__(
self, errFile, warnFile, diagFile)
# Override the base class header warning so the comment indicates this file.
# self the ValidationSourceOutputGenerator object
def outputGeneratedHeaderWarning(self):
generated_warning = '// *********** THIS FILE IS GENERATED - DO NOT EDIT ***********\n'
generated_warning += '// See validation_layer_generator.py for modifications\n'
generated_warning += '// ************************************************************\n'
write(generated_warning, file=self.outFile)
# Call the base class to properly begin the file, and then add
# the file-specific header information.
# self the ValidationSourceOutputGenerator object
# gen_opts the ValidationSourceGeneratorOptions object
def beginFile(self, genOpts):
AutomaticSourceOutputGenerator.beginFile(self, genOpts)
preamble = ''
if self.genOpts.filename == 'xr_generated_core_validation.hpp':
preamble += '#pragma once\n'
preamble += '#include <vector>\n'
preamble += '#include <string>\n'
preamble += '#include <unordered_map>\n'
preamble += '#include <thread>\n'
preamble += '#include <mutex>\n\n'
preamble += '#include "api_layer_platform_defines.h"\n'
preamble += '#include <openxr/openxr.h>\n'
preamble += '#include <openxr/openxr_platform.h>\n\n'
preamble += '#include "xr_generated_dispatch_table.h"\n'
preamble += '#include "validation_utils.h"\n'
elif self.genOpts.filename == 'xr_generated_core_validation.cpp':
preamble += '#include <sstream>\n'
preamble += '#include <cstring>\n'
preamble += '#include <algorithm>\n\n'
preamble += '#include "xr_generated_core_validation.hpp"\n'
write(preamble, file=self.outFile)
# Write out all the information for the appropriate file,
# and then call down to the base class to wrap everything up.
# self the ValidationSourceOutputGenerator object
def endFile(self):
file_data = ''
if self.genOpts.filename == 'xr_generated_core_validation.hpp':
file_data += self.outputValidationHeaderInfo()
elif self.genOpts.filename == 'xr_generated_core_validation.cpp':
file_data += self.outputCommonTypesForValidation()
file_data += self.outputValidationSourceFuncs()
write(file_data, file=self.outFile)
# Finish processing in superclass
AutomaticSourceOutputGenerator.endFile(self)
def makeInfoName(self, handle_type=None, handle_type_name=None):
if not handle_type_name:
handle_type_name = handle_type.name
base_handle_name = handle_type_name[2:].lower()
return 'g_%s_info' % base_handle_name
def outputInfoMapDeclarations(self, extern):
lines = []
extern_keyword = 'extern ' if extern else ''
for handle in self.api_handles:
handle_name = handle.name
if handle.protect_value:
lines.append('#if %s' % handle.protect_string)
if handle.name == 'XrInstance':
info_type = "InstanceHandleInfo"
else:
info_type = 'HandleInfo<%s>' % handle_name
lines.append('%s%s %s;' % (extern_keyword,
info_type, self.makeInfoName(handle)))
if handle.protect_value:
lines.append('#endif // %s" % handle.protect_string')
return '\n'.join(lines)
# Write out common internal types for validation
# self the ValidationSourceOutputGenerator object
def outputCommonTypesForValidation(self):
common_validation_types = ''
common_validation_types += '// Structure used for indicating status of \'flags\' test.\n'
common_validation_types += 'enum ValidateXrFlagsResult {\n'
common_validation_types += ' VALIDATE_XR_FLAGS_ZERO,\n'
common_validation_types += ' VALIDATE_XR_FLAGS_INVALID,\n'
common_validation_types += ' VALIDATE_XR_FLAGS_SUCCESS,\n'
common_validation_types += '};\n\n'
return common_validation_types
# Generate C++ structures and maps used for validating the states identified
# in the specification.
# self the ValidationSourceOutputGenerator object
def outputValidationStateCheckStructs(self):
validation_state_checks = '// Structure used for state validation.\n'
active_structures = dict()
for cur_state in self.api_states:
type_name = '%s' % cur_state.type
cur_list = []
if active_structures.get(type_name) is not None:
cur_list = active_structures.get(type_name)
cur_list.append(cur_state.variable)
active_structures[type_name] = cur_list
for type_name, variable_list in active_structures.items():
validation_state_checks += 'struct %sValidationStates {\n' % type_name
for variable in variable_list:
validation_state_checks += ' bool %s;\n' % variable
validation_state_checks += '};\n'
validation_state_checks += 'std::unordered_map<%s, %sValidationStates*> g_%s_valid_states;\n' % (
type_name, type_name, type_name[2:].lower())
validation_state_checks += '\n'
return validation_state_checks
# Generate C++ structure and utility function prototypes for validating
# the 'next' chains in structures.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceNextChainProtos(self):
next_chain_info = ''
next_chain_info += '// Result return value for next chain validation\n'
next_chain_info += 'enum NextChainResult {\n'
next_chain_info += ' NEXT_CHAIN_RESULT_VALID = 0,\n'
next_chain_info += ' NEXT_CHAIN_RESULT_ERROR = -1,\n'
next_chain_info += ' NEXT_CHAIN_RESULT_DUPLICATE_STRUCT = -2,\n'
next_chain_info += '};\n\n'
next_chain_info += '// Prototype for validateNextChain command (it uses the validate structure commands so add it after\n'
next_chain_info += 'NextChainResult ValidateNextChain(GenValidUsageXrInstanceInfo *instance_info,\n'
next_chain_info += ' const std::string &command_name,\n'
next_chain_info += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
next_chain_info += ' const void* next,\n'
next_chain_info += ' std::vector<XrStructureType>& valid_ext_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& encountered_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& duplicate_structs);\n\n'
return next_chain_info
# Generate C++ enum and utility function prototypes for validating
# the flags in structures.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceFlagBitValues(self):
flag_value_validate = ''
for flag_tuple in self.api_flags:
if flag_tuple.protect_value:
flag_value_validate += '#if %s\n' % flag_tuple.protect_string
flag_value_validate += '// Function to validate %s flags\n' % flag_tuple.name
flag_value_validate += 'ValidateXrFlagsResult ValidateXr%s(const %s value) {\n' % (
flag_tuple.name[2:], flag_tuple.type)
# We need to return a value indicating that the value is zero because in some
# circumstances, 0 is ok. However, in other cases, 0 is disallowed. So, leave
# it up to the calling function to decide what is correct.
flag_value_validate += ' if (0 == value) {\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_ZERO;\n'
flag_value_validate += ' }\n'
# If the flag has no values defined for this flag, then anything other than
# zero generates an error.
if flag_tuple.valid_flags is None:
flag_value_validate += ' return VALIDATE_XR_FLAGS_INVALID;\n'
else:
# This flag has values set. So, check (and remove) each valid value. Once that's done
# anything left over would be invalid.
flag_value_validate += ' %s int_value = value;\n' % flag_tuple.type
for mask_tuple in self.api_bitmasks:
if mask_tuple.name == flag_tuple.valid_flags:
for cur_value in mask_tuple.values:
if cur_value.protect_value and flag_tuple.protect_value != cur_value.protect_value:
flag_value_validate += '#if %s\n' % cur_value.protect_string
flag_value_validate += ' if ((int_value & %s) != 0) {\n' % cur_value.name
flag_value_validate += ' // Clear the value %s since it is valid\n' % cur_value.name
flag_value_validate += ' int_value &= ~%s;\n' % cur_value.name
flag_value_validate += ' }\n'
if cur_value.protect_value and flag_tuple.protect_value != cur_value.protect_value:
flag_value_validate += '#endif // %s\n' % cur_value.protect_string
break
flag_value_validate += ' if (int_value != 0) {\n'
flag_value_validate += ' // Something is left, it must be invalid\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_INVALID;\n'
flag_value_validate += ' }\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_SUCCESS;\n'
flag_value_validate += '}\n\n'
if flag_tuple.protect_value:
flag_value_validate += '#endif // %s\n' % flag_tuple.protect_string
return flag_value_validate
# Generate C++ functions for validating enums.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceEnumValues(self):
enum_value_validate = ''
for enum_tuple in self.api_enums:
if enum_tuple.protect_value:
enum_value_validate += '#if %s\n' % enum_tuple.protect_string
enum_value_validate += '// Function to validate %s enum\n' % enum_tuple.name
enum_value_validate += 'bool ValidateXrEnum(GenValidUsageXrInstanceInfo *instance_info,\n'
enum_value_validate += ' const std::string &command_name,\n'
enum_value_validate += ' const std::string &validation_name,\n'
enum_value_validate += ' const std::string &item_name,\n'
enum_value_validate += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
enum_value_validate += ' const %s value) {\n' % enum_tuple.name
indent = 1
checked_extension = ''
if enum_tuple.ext_name and not self.isCoreExtensionName(enum_tuple.ext_name):
checked_extension = enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '// Enum requires extension %s, so check that it is enabled\n' % enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % enum_tuple.ext_name
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string vuid = "VUID-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += validation_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += item_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-parameter";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string error_str = "%s requires extension ";\n' % enum_tuple.name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " \\"%s\\" to be enabled, but it is not enabled";\n' % enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'CoreValidLogMessage(instance_info, vuid,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' objects_info, error_str);\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '}\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'switch (value) {\n'
indent += 1
for cur_value in enum_tuple.values:
if cur_value.protect_value and enum_tuple.protect_value != cur_value.protect_value:
enum_value_validate += '#if %s\n' % cur_value.protect_string
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'case %s:\n' % cur_value.name
if cur_value.protect_value and enum_tuple.protect_value != cur_value.protect_value:
enum_value_validate += '#endif // %s\n' % cur_value.protect_string
if cur_value.ext_name and cur_value.ext_name != checked_extension and not self.isCoreExtensionName(cur_value.ext_name):
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '// Enum value %s requires extension %s, so check that it is enabled\n' % (
cur_value.name, cur_value.ext_name)
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % cur_value.ext_name
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string vuid = "VUID-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += validation_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += item_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-parameter";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string error_str = "%s value \\"%s\\"";\n' % (
enum_tuple.name, cur_value.name)
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " being used, which requires extension ";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " \\"%s\\" to be enabled, but it is not enabled";\n' % cur_value.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'CoreValidLogMessage(instance_info, vuid,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' objects_info, error_str);\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '}\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return true;\n'
indent -= 1
elif cur_value.name == 'XR_TYPE_UNKNOWN':
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return false; // Invalid XrStructureType \n'
else:
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return true;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'default:\n'
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += '}\n'
enum_value_validate += '}\n\n'
if enum_tuple.protect_value:
enum_value_validate += '#endif // %s\n' % enum_tuple.protect_string
return enum_value_validate
# Generate prototypes for functions used internal to the source file so other functions can use them
# self the ValidationSourceOutputGenerator object
def outputValidationInternalProtos(self):
validation_internal_protos = ''
for handle in self.api_handles:
if handle.protect_value:
validation_internal_protos += '#if %s\n' % handle.protect_string
validation_internal_protos += 'ValidateXrHandleResult Verify%sHandle(const %s* handle_to_check);\n' % (
handle.name, handle.name)
if handle.protect_value:
validation_internal_protos += '#endif // %s\n' % handle.protect_string
validation_internal_protos += '\n// Write out prototypes for handle parent verification functions\n'
validation_internal_protos += 'bool VerifyXrParent(XrObjectType handle1_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle1,\n'
validation_internal_protos += ' XrObjectType handle2_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle2,\n'
validation_internal_protos += ' bool check_this);\n'
validation_internal_protos += '\n// Function to check if an extension has been enabled\n'
validation_internal_protos += 'bool ExtensionEnabled(std::vector<std::string> &extensions, const char* const check_extension_name);\n'
validation_internal_protos += '\n// Functions to validate structures\n'
for xr_struct in self.api_structures:
if xr_struct.protect_value:
validation_internal_protos += '#if %s\n' % xr_struct.protect_string
validation_internal_protos += 'XrResult ValidateXrStruct(GenValidUsageXrInstanceInfo *instance_info, const std::string &command_name,\n'
validation_internal_protos += ' std::vector<GenValidUsageXrObjectInfo>& objects_info, bool check_members,\n'
validation_internal_protos += ' const %s* value);\n' % xr_struct.name
if xr_struct.protect_value:
validation_internal_protos += '#endif // %s\n' % xr_struct.protect_string
return validation_internal_protos
# Generate C++ functions for validating 'next' chains in a structure.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceNextChainFunc(self):
next_chain_info = ''
next_chain_info += 'NextChainResult ValidateNextChain(GenValidUsageXrInstanceInfo *instance_info,\n'
next_chain_info += ' const std::string &command_name,\n'
next_chain_info += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
next_chain_info += ' const void* next,\n'
next_chain_info += ' std::vector<XrStructureType>& valid_ext_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& encountered_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& duplicate_structs) {\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'NextChainResult return_result = NEXT_CHAIN_RESULT_VALID;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '// NULL is valid\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (nullptr == next) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return return_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '// Non-NULL is not valid if there is no valid extension structs\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (nullptr != next && 0 == valid_ext_structs.size()) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'const XrBaseInStructure* next_header = reinterpret_cast<const XrBaseInStructure*>(next);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'auto valid_ext = std::find(valid_ext_structs.begin(), valid_ext_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (valid_ext == valid_ext_structs.end()) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '// Not a valid extension structure type for this next chain.\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '} else {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '// Check to see if we\'ve already encountered this structure.\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'auto already_encountered_ext = std::find(encountered_structs.begin(), encountered_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'if (already_encountered_ext != encountered_structs.end()) {\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '// Make sure we only put in unique types into our duplicate list.\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'auto already_duplicate = std::find(duplicate_structs.begin(), duplicate_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'if (already_duplicate == duplicate_structs.end()) {\n'
next_chain_info += self.writeIndent(4)
next_chain_info += 'duplicate_structs.push_back(next_header->type);\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'return_result = NEXT_CHAIN_RESULT_DUPLICATE_STRUCT;\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
# Validate the rest of this struct
next_chain_info += self.writeIndent(1)
next_chain_info += 'switch (next_header->type) {\n'
for enum_tuple in self.api_enums:
if enum_tuple.name == 'XrStructureType':
if enum_tuple.protect_value:
next_chain_info += '#if %s\n' % enum_tuple.protect_string
for cur_value in enum_tuple.values:
struct_define_name = self.genXrStructureName(
cur_value.name)
if len(struct_define_name) > 0:
struct_tuple = self.getStruct(struct_define_name)
if struct_tuple.protect_value:
next_chain_info += '#if %s\n' % struct_tuple.protect_string
next_chain_info += self.writeIndent(2)
next_chain_info += 'case %s:\n' % cur_value.name
next_chain_info += self.writeIndent(3)
next_chain_info += 'if (XR_SUCCESS != ValidateXrStruct(instance_info, command_name, objects_info, false,\n'
next_chain_info += self.writeIndent(3)
next_chain_info += ' reinterpret_cast<const %s*>(next))) {\n' % struct_define_name
next_chain_info += self.writeIndent(4)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'break;\n'
if struct_tuple.protect_value:
next_chain_info += '#endif // %s\n' % struct_tuple.protect_string
if enum_tuple.protect_value:
next_chain_info += '#endif //%s\n' % enum_tuple.protect_string
break
next_chain_info += self.writeIndent(2)
next_chain_info += 'default:\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
# Validate any chained structs
next_chain_info += self.writeIndent(1)
next_chain_info += 'NextChainResult next_result = ValidateNextChain(instance_info, command_name,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' objects_info, next_header->next,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' valid_ext_structs,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' encountered_structs,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' duplicate_structs);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (NEXT_CHAIN_RESULT_VALID == next_result && NEXT_CHAIN_RESULT_VALID != return_result) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return return_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '} else {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return next_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += '}\n\n'
return next_chain_info
# Generate C++ header information containing functionality used in both
# the generated and manual code.
# - Structures used to store validation information on a per-handle basis.
# - Unordered_map and mutexes used for storing the structure information on a per handle basis.
# self the ValidationSourceOutputGenerator object
def outputValidationHeaderInfo(self):
commands = []
validation_header_info = ''
cur_extension_name = ''
validation_header_info += '// Unordered Map associating pointer to a vector of session label information to a session\'s handle\n'
validation_header_info += 'extern std::unordered_map<XrSession, std::vector<GenValidUsageXrInternalSessionLabel*>*> g_xr_session_labels;\n\n'
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_header_info += '\n// ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_header_info += '\n// ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
prototype = cur_cmd.cdecl.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
# We need to always export xrGetInstanceProcAddr, even though we automatically generate it.
# Also, we really only need the core function, not the others.
if 'xrGetInstanceProcAddr' in cur_cmd.name:
validation_header_info += '%s\n' % prototype.replace(
" xr", " GenValidUsageXr")
continue
elif cur_cmd.name in VALID_USAGE_DONT_GEN or not cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
continue
if cur_cmd.protect_value:
validation_header_info += '#if %s\n' % cur_cmd.protect_string
# Core call, for us to make from here into the manually implemented code
validation_header_info += '%s\n' % prototype.replace(
" xr", " CoreValidationXr")
# Validate Inputs and Next calls for the validation to make
validation_header_info += 'XrResult %s(' % cur_cmd.name.replace(
"xr", "GenValidUsageInputsXr")
count = 0
for param in cur_cmd.params:
if count > 0:
validation_header_info += ', '
count = count + 1
validation_header_info += param.cdecl.strip()
validation_header_info += ');\n'
validation_header_info += '%s\n' % prototype.replace(
" xr", " GenValidUsageNextXr")
if cur_cmd.protect_value:
validation_header_info += '#endif // %s\n' % cur_cmd.protect_string
validation_header_info += '\n// Current API version of the Core Validation API Layer\n#define XR_CORE_VALIDATION_API_VERSION '
validation_header_info += self.api_version_define
validation_header_info += '\n'
validation_header_info += '\n// Externs for Core Validation\n'
validation_header_info += self.outputInfoMapDeclarations(extern=True)
validation_header_info += 'void GenValidUsageCleanUpMaps(GenValidUsageXrInstanceInfo *instance_info);\n\n'
validation_header_info += '\n// Function to convert XrObjectType to string\n'
validation_header_info += 'std::string GenValidUsageXrObjectTypeToString(const XrObjectType& type);\n\n'
validation_header_info += '// Function to record all the core validation information\n'
validation_header_info += 'extern void CoreValidLogMessage(GenValidUsageXrInstanceInfo *instance_info, const std::string &message_id,\n'
validation_header_info += ' GenValidUsageDebugSeverity message_severity, const std::string &command_name,\n'
validation_header_info += ' std::vector<GenValidUsageXrObjectInfo> objects_info, const std::string &message);\n'
return validation_header_info
# Generate C++ utility functions to verify that all the required extensions have been enabled.
# self the ValidationSourceOutputGenerator object
def writeVerifyExtensions(self):
verify_extensions = 'bool ExtensionEnabled(std::vector<std::string> &extensions, const char* const check_extension_name) {\n'
verify_extensions += self.writeIndent(1)
verify_extensions += 'for (auto enabled_extension: extensions) {\n'
verify_extensions += self.writeIndent(2)
verify_extensions += 'if (enabled_extension == check_extension_name) {\n'
verify_extensions += self.writeIndent(3)
verify_extensions += 'return true;\n'
verify_extensions += self.writeIndent(2)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(1)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(1)
verify_extensions += 'return false;\n'
verify_extensions += '}\n\n'
number_of_instance_extensions = 0
number_of_system_extensions = 0
for extension in self.extensions:
if extension.type == 'instance':
number_of_instance_extensions += 1
elif extension.type == 'system':
number_of_system_extensions += 1
verify_extensions += 'bool ValidateInstanceExtensionDependencies(GenValidUsageXrInstanceInfo *gen_instance_info,\n'
verify_extensions += ' const std::string &command,\n'
verify_extensions += ' const std::string &struct_name,\n'
verify_extensions += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
verify_extensions += ' std::vector<std::string> &extensions) {\n'
indent = 1
if number_of_instance_extensions > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t cur_index = 0; cur_index < extensions.size(); ++cur_index) {\n'
indent += 1
for extension in self.extensions:
number_of_required = len(extension.required_exts) - 1
if extension.type == 'instance' and number_of_required > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (extensions[cur_index] == "%s") {\n' % extension.name
current_count = 0
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t check_index = 0; check_index < extensions.size(); ++check_index) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (cur_index == check_index) {\n'
verify_extensions += self.writeIndent(indent + 1)
verify_extensions += 'continue;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count = 0
for required_ext in extension.required_exts:
if current_count > 0:
found = False
for extension_look in self.extensions:
if extension_look.name == required_ext:
found = True
if extension_look.type != 'instance':
verify_extensions += self.printCodeGenErrorMessage('Instance extension "%s" requires non-instance extension "%s" which is not allowed' % (
self.currentExtension, required_ext))
if not found:
verify_extensions += self.printCodeGenErrorMessage('Instance extension "%s" lists extension "%s" as a requirement, but'
' it is not defined in the registry.' % (
self.currentExtension, required_ext))
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(extensions, "%s")) {\n' % required_ext
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (nullptr != gen_instance_info) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'std::string vuid = "VUID-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += command;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += struct_name;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-parameter";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'CoreValidLogMessage(gen_instance_info, vuid, VALID_USAGE_DEBUG_SEVERITY_ERROR,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' command, objects_info,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "Missing extension dependency \\"%s\\" (required by extension" \\\n' % required_ext
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "\\"%s\\") from enabled extension list");\n' % extension.name
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return false;\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count += 1
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// No instance extensions to check dependencies for\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return true;\n'
verify_extensions += '}\n\n'
verify_extensions += 'bool ValidateSystemExtensionDependencies(GenValidUsageXrInstanceInfo *gen_instance_info,\n'
verify_extensions += ' const std::string &command,\n'
verify_extensions += ' const std::string &struct_name,\n'
verify_extensions += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
verify_extensions += ' std::vector<std::string> &extensions) {\n'
indent = 1
if number_of_system_extensions > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t cur_index = 0; cur_index < extensions.size(); ++cur_index) {\n'
indent += 1
for extension in self.extensions:
number_of_required = len(self.required_exts) - 1
if extension.type == 'system' and number_of_required > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (extensions[cur_index] == "%s") {\n' % extension.name
current_count = 0
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t check_index = 0; check_index < extensions.size(); ++check_index) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (cur_index == check_index) {\n'
verify_extensions += self.writeIndent(indent + 1)
verify_extensions += 'continue;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count = 0
for required_ext in extension.required_exts:
if current_count > 0:
found = False
is_instance = False
for extension_look in self.extensions:
if extension_look.name == required_ext:
found = True
if extension_look.type == 'instance':
is_instance = True
if not is_instance and extension_look.type != 'system':
verify_extensions += self.printCodeGenErrorMessage('System extension "%s" has an extension dependency on extension "%s" '
'which is of an invalid type.' % (
self.currentExtension, required_ext))
if not found:
verify_extensions += self.printCodeGenErrorMessage('System extension "%s" lists extension "%s" as a requirement, but'
' it is not defined in the registry.' % (
self.currentExtension, required_ext))
if is_instance:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// This is an instance extension dependency, so make sure it is enabled in the instance\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(gen_instance_info->enabled_extensions, "%s") {\n' % required_ext
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(extensions, "%s")) {\n' % required_ext
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'std::string vuid = "VUID-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += command;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += struct_name;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-parameter";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'CoreValidLogMessage(gen_instance_info, vuid, VALID_USAGE_DEBUG_SEVERITY_ERROR,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' command, objects_info,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "Missing extension dependency \\"%s\\" (required by extension" \\' % required_ext
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "\\"%s\\") from enabled extension list");\n' % extension.name
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return false;\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count += 1
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// No system extensions to check dependencies for\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return true;\n'
verify_extensions += '}\n\n'
return verify_extensions
# Generate C++ enum and utility functions for verify that handles are valid.
# self the ValidationSourceOutputGenerator object
def writeValidateHandleChecks(self):
verify_handle = ''
for handle in self.api_handles:
if handle.protect_value:
verify_handle += '#if %s\n' % handle.protect_string
indent = 1
lower_handle_name = handle.name[2:].lower()
verify_handle += 'ValidateXrHandleResult Verify%sHandle(const %s* handle_to_check) {\n' % (
handle.name, handle.name)
verify_handle += self.writeIndent(indent)
verify_handle += 'return %s.verifyHandle(handle_to_check);\n' % self.makeInfoName(handle)
verify_handle += '}\n\n'
if handle.protect_value:
verify_handle += '#endif // %s\n' % handle.protect_string
return verify_handle
# Generate C++ utility functions for verify that handles share a parent.
# self the ValidationSourceOutputGenerator object
def writeValidateHandleParent(self):
verify_parent = '// Implementation function to get parent handle information\n'
verify_parent += 'bool GetXrParent(const XrObjectType inhandle_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE inhandle,\n'
verify_parent += ' XrObjectType& outhandle_type, XR_VALIDATION_GENERIC_HANDLE_TYPE& outhandle) {\n'
indent = 1
for handle in self.api_handles:
if handle.name == 'XrInstance':
verify_parent += self.writeIndent(indent)
verify_parent += 'if (inhandle_type == XR_OBJECT_TYPE_INSTANCE) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
else:
handle_info = '%s.get(CONVERT_GENERIC_TO_HANDLE(%s, inhandle))' % (self.makeInfoName(handle), handle.name)
verify_parent += self.writeIndent(indent)
verify_parent += 'if (inhandle_type == %s) {\n' % self.genXrObjectType(
handle.name)
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += '// Get the object and parent of the handle\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'GenValidUsageXrHandleInfo *handle_info = %s;\n' % handle_info
verify_parent += self.writeIndent(indent)
verify_parent += 'outhandle_type = handle_info->direct_parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'outhandle = handle_info->direct_parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return true;\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += ' return false;\n'
verify_parent += '}\n\n'
verify_parent += '// Implementation of VerifyXrParent function\n'
verify_parent += 'bool VerifyXrParent(XrObjectType handle1_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle1,\n'
verify_parent += ' XrObjectType handle2_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle2,\n'
verify_parent += ' bool check_this) {\n'
indent = 1
verify_parent += self.writeIndent(indent)
verify_parent += 'if (CHECK_FOR_NULL_HANDLE(handle1) || CHECK_FOR_NULL_HANDLE(handle2)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (check_this && handle1_type == handle2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (handle1 == handle2);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (handle1_type == XR_OBJECT_TYPE_INSTANCE && handle2_type != XR_OBJECT_TYPE_INSTANCE) {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle2_type, handle2, parent_type, parent_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return VerifyXrParent(handle1_type, handle1, parent_type, parent_handle, true);\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (handle2_type == XR_OBJECT_TYPE_INSTANCE && handle1_type != XR_OBJECT_TYPE_INSTANCE) {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle1_type, handle1, parent_type, parent_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return VerifyXrParent(parent_type, parent_handle, handle2_type, handle2, true);\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '} else {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent1_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent1_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent2_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent2_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle1_type, handle1, parent1_type, parent1_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle2_type, handle2, parent2_type, parent2_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (parent1_type == handle2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (parent1_handle == handle2);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (handle1_type == parent2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (handle1 == parent2_handle);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return VerifyXrParent(parent1_type, parent1_handle, parent2_type, parent2_handle, true);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return false;\n'
indent -= 1
verify_parent += '}\n\n'
return verify_parent
# Generate inline C++ code to check if a 'next' chain is valid for the current structure.
# self the ValidationSourceOutputGenerator object
# struct_type the name of the type of structure performing the validation check
# member the member generated in automatic_source_generator.py to validate
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateStructNextCheck(self, struct_type, struct_name, member, indent):
validate_struct_next = self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> valid_ext_structs;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> duplicate_ext_structs;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> encountered_structs;\n'
if member.valid_extension_structs and len(member.valid_extension_structs) > 0:
for valid_struct in member.valid_extension_structs:
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'valid_ext_structs.push_back(%s);\n' % self.genXrStructureType(
valid_struct)
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'NextChainResult next_result = ValidateNextChain(instance_info, command_name, objects_info,\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' %s->%s, valid_ext_structs,\n' % (
struct_name, member.name)
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' encountered_structs,\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' duplicate_ext_structs);\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '// No valid extension structs for this \'next\'. Therefore, must be NULL\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '// or only contain a list of valid extension structures.\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'if (NEXT_CHAIN_RESULT_ERROR == next_result) {\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'CoreValidLogMessage(instance_info, "VUID-%s-%s-next",\n' % (struct_type,
member.name)
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' objects_info, "Invalid structure(s) in \\"next\\" chain for %s struct \\"%s\\"");\n' % (struct_type,
member.name)
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '} else if (NEXT_CHAIN_RESULT_DUPLICATE_STRUCT == next_result) {\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'char struct_type_buffer[XR_MAX_STRUCTURE_NAME_SIZE];\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'std::string error_message = "Multiple structures of the same type(s) in \\"next\\" chain for ";\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'error_message += "%s : ";\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'if (nullptr != instance_info) {\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += 'bool wrote_struct = false;\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += 'for (uint32_t dup = 0; dup < duplicate_ext_structs.size(); ++dup) {\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += 'if (XR_SUCCESS == instance_info->dispatch_table->StructureTypeToString(instance_info->instance,\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += ' duplicate_ext_structs[dup],\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += ' struct_type_buffer)) {\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += 'if (wrote_struct) {\n'
validate_struct_next += self.writeIndent(indent + 5)
validate_struct_next += 'error_message += ", ";\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += '} else {\n'
validate_struct_next += self.writeIndent(indent + 5)
validate_struct_next += 'wrote_struct = true;\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += 'error_message += struct_type_buffer;\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'CoreValidLogMessage(instance_info, "VUID-%s-next-unique",\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' objects_info,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += '"Multiple structures of the same type(s) in \\"next\\" chain for %s struct");\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '}\n'
return validate_struct_next
# Generate inline C++ code to check if a pointer to a variable or array is valid.
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# member_param_name the name of the member or parameter getting validated
# member_param_type the type of the member or parameter getting validated
# pointer_to_check the full name of the pointer to check (usually cmd_struct_name +
# member_param_name in some fashion)
# full_count_var the full name of the array count variable (if this is an array), or None
# short_count_var the short name of the array count variable (if this is an array), or None
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidatePointerArrayNonNull(self, cmd_struct_name, member_param_name, member_param_type,
pointer_to_check, full_count_var, short_count_var, is_in_cmd,
indent):
array_check = self.writeIndent(indent)
instance_info_string = 'instance_info'
command_string = 'command_name'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
command_string = '"%s"' % cmd_struct_name
error_prefix = 'Invalid NULL for'
else:
error_prefix = '%s contains invalid NULL for' % cmd_struct_name
if full_count_var is None or len(full_count_var) == 0:
array_check += '// Non-optional pointer/array variable that needs to not be NULL\n'
array_check += self.writeIndent(indent)
array_check += 'if (nullptr == %s) {\n' % pointer_to_check
indent = indent + 1
array_check += self.writeIndent(indent)
array_check += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s, objects_info,\n' % command_string
array_check += self.writeIndent(indent)
array_check += ' "%s %s \\"%s\\" which is not "\n' % (error_prefix,
member_param_type,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' "optional and must be non-NULL");\n'
else:
array_check += '// Pointer/array variable with a length variable. Make sure that\n'
array_check += self.writeIndent(indent)
array_check += '// if length variable is non-zero that the pointer is not NULL\n'
array_check += self.writeIndent(indent)
array_check += 'if (nullptr == %s && 0 != %s) {\n' % (
pointer_to_check, full_count_var)
indent = indent + 1
array_check += self.writeIndent(indent)
array_check += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s, objects_info,\n' % command_string
array_check += self.writeIndent(indent)
array_check += ' "%s %s \\"%s\\" is which not "\n' % (error_prefix,
member_param_type,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' "optional since \\"%s\\" is set and must be non-NULL");\n' % short_count_var
array_check += self.writeIndent(indent)
array_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
array_check += self.writeIndent(indent)
array_check += '}\n'
return array_check
# Write an inline check to make sure an Enum is valid
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# cmd_name_param the name of the parameter containing the command name
# param_type the type of enum to validate getting validated
# param_name the name of the parameter to validate
# full_param_name the full name of the parameter to check (usually cmd_struct_name +
# member_param_name in some fashion)
# param_is_pointer Boolean indicate that the parameter is a pointer
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineEnum(self, cmd_struct_name, cmd_name_param, param_type, param_name, full_param_name,
param_is_pointer, is_in_cmd, indent):
int_indent = indent
inline_enum_str = self.writeIndent(int_indent)
inline_enum_str += '// Make sure the enum type %s value is valid\n' % param_type
inline_enum_str += self.writeIndent(int_indent)
pointer_string = ''
if param_is_pointer:
pointer_string = '*'
instance_info_string = 'instance_info'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
error_prefix = 'Invalid'
else:
error_prefix = '%s contains invalid' % cmd_struct_name
inline_enum_str += 'if (!ValidateXrEnum(%s, %s, "%s", "%s", objects_info, %s%s)) {\n' % (
instance_info_string, cmd_name_param, cmd_struct_name, param_name, pointer_string, full_param_name)
int_indent = int_indent + 1
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'std::ostringstream oss_enum;\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'oss_enum << std::hex << static_cast<int32_t>(%s%s);\n' % (pointer_string,
full_param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'std::string error_str = "%s %s \\"%s\\" enum value 0x";\n' % (error_prefix,
param_type,
param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'error_str += oss_enum.str();\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += ' objects_info, error_str);\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += '}\n'
return inline_enum_str
# Write an inline check to make sure a flag is valid
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# cmd_name_param the name of the parameter containing the command name
# param_type the type of flag to validate getting validated
# param_name the name of the parameter to validate
# full_param_name the full name of the parameter to check (usually cmd_struct_name +
# member_param_name in some fashion)
# param_is_pointer Boolean indicating that the parameter is a pointer
# is_optional Boolean indicating that the parameter is optional
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineFlag(self, cmd_struct_name, cmd_name_param, param_type, param_name, full_param_name,
param_is_pointer, is_optional, is_in_cmd, indent):
int_indent = indent
inline_flag_str = self.writeIndent(int_indent)
# Add underscore between lowercase then uppercase
result_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', param_type)
# Change to uppercase
result_name = result_name[3:].lower()
result_name += '_result'
pointer_string = ''
if param_is_pointer:
pointer_string = '*'
instance_info_string = 'instance_info'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
error_prefix = 'Invalid'
else:
error_prefix = '%s invalid member' % cmd_struct_name
inline_flag_str += 'ValidateXrFlagsResult %s = ValidateXr%s(%s%s);\n' % (result_name,
param_type[2:],
pointer_string,
full_param_name)
if self.flagHasValidValues(param_type):
if not is_optional:
# Must be non-zero
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Flags must be non-zero in this case.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_ZERO == %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-requiredbitmask",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, "%s \\"%s\\" flag must be non-zero");\n' % (param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '} else if (VALIDATE_XR_FLAGS_SUCCESS != %s) {\n' % result_name
else:
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Valid flags available, so it must be invalid to fail.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_INVALID == %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Otherwise, flags must be valid.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'std::ostringstream oss_enum;\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'oss_enum << std::hex << static_cast<int32_t>(%s%s);\n' % (pointer_string,
full_param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'std::string error_str = "%s %s \\"%s\\" flag value 0x";\n' % (error_prefix,
param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'error_str += oss_enum.str();\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'error_str += " contains illegal bit";\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, error_str);\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '}\n'
else:
# Must be zero
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Flags must be zero in this case.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_ZERO != %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-zerobitmask",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, "%s \\"%s\\" flag must be zero");\n' % (param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '}\n'
return inline_flag_str
# Write an inline check to make sure a handle is valid
# self the ValidationSourceOutputGenerator object
# cmd_name the name of the command generating this validation check.
# vuid_name the name of the structure or command to put in the VUID
# member_param the member or parameter generated in automatic_source_generator.py to validate
# mem_par_desc_name Descriptive name of parameter
# output_result_type Boolean indicating we need to output the handle result type (since it hasn't
# been defined in the C++ code yet).
# return_on_null Boolean indicating we need to return immediately if we encounter a NULL
# instance_info_name Name of the parameter storing the instance information
# element_in_array This is a single element in an array
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineHandleValidation(self, cmd_name, vuid_name, member_param, mem_par_desc_name,
output_result_type, return_on_null, instance_info_name,
element_in_array, indent):
inline_validate_handle = ''
adjust_to_pointer = ''
if (not element_in_array and member_param.pointer_count == 0) or (element_in_array and member_param.pointer_count == 1):
adjust_to_pointer = '&'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '{\n'
indent += 1
inline_validate_handle += self.writeIndent(indent) + "// writeValidateInlineHandleValidation\n"
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'ValidateXrHandleResult handle_result = Verify%sHandle(%s%s);\n' % (member_param.type, adjust_to_pointer,
mem_par_desc_name)
wrote_first_if = False
if member_param.is_optional:
# If we have to return on a Handle that has a value of XR_NULL_HANDLE, do so.
if return_on_null:
wrote_first_if = True
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'if (handle_result == VALIDATE_XR_HANDLE_NULL) {\n'
inline_validate_handle += self.writeIndent(indent + 1)
inline_validate_handle += '// Handle is optional so NULL is valid. But we can\'t do anything else, either.\n'
inline_validate_handle += self.writeIndent(indent + 1)
inline_validate_handle += 'return XR_SUCCESS;\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}'
# Otherwise, catch the non-success case. If we catch the NULL handle above, we add an "else" to
# the if below.
if not member_param.no_auto_validity:
if wrote_first_if:
inline_validate_handle += ' else '
else:
inline_validate_handle += self.writeIndent(indent)
indent = indent + 1
if member_param.is_optional:
inline_validate_handle += 'if (handle_result == VALIDATE_XR_HANDLE_INVALID) {\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '// Not a valid handle\n'
else:
inline_validate_handle += 'if (handle_result != VALIDATE_XR_HANDLE_SUCCESS) {\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '// Not a valid handle or NULL (which is not valid in this case)\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'std::ostringstream oss;\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'oss << "Invalid %s handle \\"%s\\" 0x";\n' % (member_param.type,
member_param.name)
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'oss << std::hex << reinterpret_cast<const void*>(%s);\n' % mem_par_desc_name
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_name,
vuid_name,
member_param.name)
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += ' objects_info, oss.str());\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'return XR_ERROR_HANDLE_INVALID;\n'
indent = indent - 1
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}\n'
else:
inline_validate_handle += '\n'
indent -= 1
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}\n'
return inline_validate_handle
def outputParamMemberContents(self, is_command, struct_command_name, param_member, param_member_prefix, instance_info_variable,
command_name_variable, is_first_param, primary_handle, primary_handle_desc_name, primary_handle_tuple,
wrote_handle_proto, indent):
param_member_contents = ''
is_loop = False
is_pointer = False
is_array = param_member.is_array
check_pointer_array_null = False
loop_string = ''
wrote_loop = False
prefixed_param_member_name = param_member_prefix
prefixed_param_member_name += param_member.name
pre_loop_prefixed_param_member_name = prefixed_param_member_name
loop_param_name = 'value_'
loop_param_name += param_member.name.lower()
loop_param_name += '_inc'
if len(param_member.array_count_var) != 0:
is_array = True
if param_member.pointer_count > 0:
is_pointer = True
elif len(param_member.pointer_count_var) != 0:
is_array = True
if param_member.pointer_count > 1:
is_pointer = True
elif param_member.pointer_count > 0:
is_pointer = True
if is_array or is_pointer:
check_pointer_array_null = not param_member.is_optional and not param_member.is_static_array
short_count_var = None
full_count_var = None
if is_array:
long_count_name = param_member_prefix
if param_member.is_static_array:
short_count_var = param_member.static_array_sizes[0]
long_count_name = param_member.static_array_sizes[0]
elif len(param_member.array_count_var) != 0:
short_count_var = param_member.array_count_var
if self.isAllUpperCase(param_member.array_count_var):
long_count_name = param_member.array_count_var
else:
long_count_name += param_member.array_count_var
else:
short_count_var = param_member.pointer_count_var
if self.isAllUpperCase(param_member.pointer_count_var):
long_count_name = param_member.pointer_count_var
else:
long_count_name += param_member.pointer_count_var
if check_pointer_array_null:
full_count_var = long_count_name
param_member_contents += self.writeValidatePointerArrayNonNull(struct_command_name,
param_member.name,
param_member.type,
prefixed_param_member_name,
full_count_var,
short_count_var,
is_command,
indent)
if (param_member.is_handle or self.isEnumType(param_member.type) or
(self.isStruct(param_member.type) and not self.isStructAlwaysValid(param_member.type))):
loop_string += self.writeIndent(indent)
loop_string += 'for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (loop_param_name,
loop_param_name,
long_count_name,
loop_param_name)
indent = indent + 1
prefixed_param_member_name = '%s[%s]' % (
prefixed_param_member_name, loop_param_name)
is_loop = True
elif check_pointer_array_null:
param_member_contents += self.writeValidatePointerArrayNonNull(struct_command_name,
param_member.name,
param_member.type,
prefixed_param_member_name,
None,
None,
is_command,
indent)
if not param_member.is_static_array and len(param_member.array_length_for) > 0:
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Optional array must be non-NULL when %s is non-zero\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (0 != %s && nullptr == %s%s) {\n' % (
prefixed_param_member_name, param_member_prefix, param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_variable,
struct_command_name,
param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.array_length_for)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is NULL, but %s is greater than 0");\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
else:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Non-optional array length must be non-zero\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (0 >= %s && nullptr != %s%s) {\n' % (
prefixed_param_member_name, param_member_prefix, param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-arraylength",\n' % (instance_info_variable,
struct_command_name,
param_member.name)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is non-optional and must be greater than 0");\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
first_time_handle_check = not wrote_handle_proto
if param_member.is_handle:
if param_member.pointer_count == 0:
param_member_contents += self.writeValidateInlineHandleValidation(command_name_variable,
struct_command_name,
param_member,
prefixed_param_member_name,
first_time_handle_check,
is_command,
instance_info_variable,
False,
indent)
# If the first item is a handle, and this is a different handle, we need to verify that
# one is either the parent of the other, or that they share a common ancestor.
if primary_handle_tuple is not None and not first_time_handle_check:
current_handle_tuple = self.getHandle(param_member.type)
param_member_contents += self.writeInlineParentCheckCall(instance_info_variable,
primary_handle_tuple,
primary_handle,
primary_handle_desc_name,
current_handle_tuple,
param_member,
prefixed_param_member_name,
struct_command_name,
command_name_variable,
indent)
elif not is_command:
primary_handle_tuple = self.getHandle(param_member.type)
primary_handle = param_member
primary_handle_desc_name = prefixed_param_member_name
elif is_array:
param_member_contents += loop_string
wrote_loop = True
param_member_contents += self.writeValidateInlineHandleValidation(command_name_variable,
struct_command_name,
param_member,
prefixed_param_member_name,
first_time_handle_check,
is_command,
instance_info_variable,
True,
indent)
elif self.isStruct(param_member.type) and not self.isStructAlwaysValid(param_member.type):
param_member_contents += loop_string
wrote_loop = True
is_relation_group = False
relation_group = None
# Check to see if this struct is the base of a relation group
for cur_rel_group in self.struct_relation_groups:
if cur_rel_group.generic_struct_name == param_member.type:
relation_group = cur_rel_group
is_relation_group = True
break
# If this struct is the base of a relation group, check to see if this call really should go to any one of
# it's children instead of itself.
if is_relation_group:
for child in relation_group.child_struct_names:
child_struct = self.getStruct(child)
if child_struct.protect_value:
param_member_contents += '#if %s\n' % child_struct.protect_string
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Validate if %s is a child structure of type %s and it is valid\n' % (
param_member.type, child)
param_member_contents += self.writeIndent(indent)
base_child_struct_name = child[2:].lower()
if is_pointer or is_array:
new_type_info = param_member.cdecl.replace(
param_member.type, child)
new_type_info = new_type_info.replace(
param_member.name, "")
new_type_info = new_type_info.strip().rstrip()
param_member_contents += '%s new_%s_value = reinterpret_cast<%s>(%s);\n' % (
new_type_info, base_child_struct_name, new_type_info, pre_loop_prefixed_param_member_name)
param_member_contents += self.writeIndent(indent)
deref_string = '->' if is_pointer else '.'
if is_array:
param_member_contents += 'if (new_%s_value[%s]%stype == %s) {\n' % (
base_child_struct_name, loop_param_name, deref_string, self.genXrStructureType(child))
else:
param_member_contents += 'if (new_%s_value%stype == %s) {\n' % (
base_child_struct_name, deref_string, self.genXrStructureType(child))
else:
param_member_contents += 'const %s* new_%s_value = reinterpret_cast<const %s*>(&%s);\n' % (
child, base_child_struct_name, child, pre_loop_prefixed_param_member_name)
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (new_%s_value->type == %s) {\n' % (
base_child_struct_name, self.genXrStructureType(child))
indent = indent + 1
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (nullptr != new_%s_value) {\n' % base_child_struct_name
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += ' false,'
else:
param_member_contents += ' check_members,'
if is_array:
if is_pointer:
param_member_contents += ' new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' &new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' new_%s_value);\n' % base_child_struct_name
else:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += 'false,'
else:
param_member_contents += ' check_members,'
if is_array:
param_member_contents += ' new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' new_%s_value);\n' % base_child_struct_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (XR_SUCCESS != xr_result) {\n'
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'std::string error_message = "'
if is_command:
param_member_contents += 'Command %s param %s";\n' % (
struct_command_name, param_member.name)
else:
param_member_contents += 'Structure %s member %s";\n' % (
struct_command_name, param_member.name)
if is_array:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += "[";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += std::to_string(%s);\n' % loop_param_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += "]";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += " is invalid";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' error_message);\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return XR_ERROR_VALIDATION_FAILURE;\n'
if is_array:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'break;\n'
param_member_contents += self.writeIndent(indent - 1)
param_member_contents += '} else {\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'continue;\n'
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
if child_struct.protect_value:
param_member_contents += '#endif // %s\n' % child_struct.protect_string
param_member_contents += self.writeIndent(indent)
if is_relation_group:
param_member_contents += '// Validate that the base-structure %s is valid\n' % (
param_member.type)
else:
param_member_contents += '// Validate that the structure %s is valid\n' % (
param_member.type)
param_member_contents += self.writeIndent(indent)
if is_pointer:
if param_member.is_optional:
param_member_contents += 'if (nullptr != %s) {\n' % prefixed_param_member_name
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += ' false,'
else:
param_member_contents += ' check_members,'
param_member_contents += ' %s);\n' % prefixed_param_member_name
else:
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s, objects_info,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
if param_member.is_const:
param_member_contents += 'true,'
else:
param_member_contents += 'false,'
else:
param_member_contents += 'check_members,'
param_member_contents += ' %s);\n' % prefixed_param_member_name
else:
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s, objects_info,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += 'true,'
else:
param_member_contents += 'check_members,'
param_member_contents += ' &%s);\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (XR_SUCCESS != xr_result) {\n'
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is invalid");\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return xr_result;\n'
indent = indent - 1
if is_pointer and param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
elif self.isEnumType(param_member.type):
if is_array:
param_member_contents += loop_string
wrote_loop = True
param_member_contents += self.writeValidateInlineEnum(struct_command_name,
command_name_variable,
param_member.type,
param_member.name,
prefixed_param_member_name,
is_pointer,
is_command,
indent)
elif self.isFlagType(param_member.type):
param_member_contents += self.writeValidateInlineFlag(struct_command_name,
command_name_variable,
param_member.type,
param_member.name,
prefixed_param_member_name,
is_pointer,
param_member.is_optional,
is_command,
indent)
elif "void" not in param_member.type:
if param_member.is_null_terminated:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" null-termination\n' % (struct_command_name,
param_member.name)
elif param_member.pointer_count > 0:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" type\n' % (struct_command_name,
param_member.name)
elif param_member.is_static_array and "char" in param_member.type:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (%s < std::strlen(%s)) {\n' % (
param_member.static_array_sizes[0], prefixed_param_member_name)
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' length is too long.");\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
if is_loop:
indent = indent - 1
if wrote_loop:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
return param_member_contents
# Write the validation function for every struct we know about.
# self the ValidationSourceOutputGenerator object
def writeValidateStructFuncs(self):
struct_check = ''
# Now write out the actual functions
for xr_struct in self.api_structures:
if xr_struct.name in self.structs_with_no_type:
continue
indent = 1
is_relation_group = False
relation_group = None
if xr_struct.protect_value:
struct_check += '#if %s\n' % xr_struct.protect_string
struct_check += 'XrResult ValidateXrStruct(GenValidUsageXrInstanceInfo *instance_info, const std::string &command_name,\n'
struct_check += ' std::vector<GenValidUsageXrObjectInfo>& objects_info, bool check_members,\n'
struct_check += ' const %s* value) {\n' % xr_struct.name
setup_bail = False
struct_check += ' XrResult xr_result = XR_SUCCESS;\n'
# Check to see if this struct is the base of a relation group
for cur_rel_group in self.struct_relation_groups:
if cur_rel_group.generic_struct_name == xr_struct.name:
relation_group = cur_rel_group
is_relation_group = True
break
# If this struct is the base of a relation group, check to see if this call really should go to any one of
# it's children instead of itself.
if is_relation_group:
for member in xr_struct.members:
if member.name == 'next':
struct_check += self.writeIndent(indent)
struct_check += '// NOTE: Can\'t validate "VUID-%s-next-next" because it is a base structure\n' % xr_struct.name
else:
struct_check += self.writeIndent(indent)
struct_check += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" because it is a base structure\n' % (
xr_struct.name, member.name)
for child in relation_group.child_struct_names:
child_struct = self.getStruct(child)
if child_struct.protect_value:
struct_check += '#if %s\n' % child_struct.protect_string
struct_check += self.writeIndent(indent)
struct_check += 'if (value->type == %s) {\n' % self.genXrStructureType(
child)
indent += 1
struct_check += self.writeIndent(indent)
struct_check += 'const %s* new_value = reinterpret_cast<const %s*>(value);\n' % (
child, child)
if child_struct.ext_name and not self.isCoreExtensionName(child_struct.ext_name):
struct_check += self.writeIndent(indent)
struct_check += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % child_struct.ext_name
indent += 1
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s being used with child struct type ";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += "\\"%s\\"";\n' % self.genXrStructureType(
child)
struct_check += self.writeIndent(indent)
struct_check += 'error_str += " which requires extension \\"%s\\" to be enabled, but it is not enabled";\n' % child_struct.ext_name
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-type-type",\n' % (
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent -= 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
struct_check += self.writeIndent(indent)
struct_check += 'return ValidateXrStruct(instance_info, command_name, objects_info, check_members, new_value);\n'
indent -= 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
if child_struct.protect_value:
struct_check += '#endif // %s\n' % child_struct.protect_string
struct_check += self.writeIndent(indent)
struct_check += 'std::ostringstream oss_type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'oss_type << std::hex << value->type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s has an invalid XrStructureType 0x";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += oss_type.str();\n'
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-type-type",\n' % (
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += '}\n\n'
continue
first_member_handle_tuple = None
first_member_handle = None
count = 0
wrote_handle_check_proto = False
has_enable_extension_count = False
has_enable_extension_names = False
for member in xr_struct.members:
# If we're not supposed to check this, then skip it
if member.no_auto_validity:
continue
if member.name == 'type':
struct_check += self.writeIndent(indent)
struct_check += '// Make sure the structure type is correct\n'
struct_check += self.writeIndent(indent)
struct_check += 'if (value->type != %s) {\n' % self.genXrStructureType(
xr_struct.name)
indent = indent + 1
struct_check += self.writeIndent(indent)
struct_check += 'std::ostringstream oss_type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'oss_type << std::hex << value->type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s has an invalid XrStructureType 0x";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += oss_type.str();\n'
struct_check += self.writeIndent(indent)
struct_check += 'error_str += ", expected %s";\n' % self.genXrStructureType(
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-%s-type",\n' % (
xr_struct.name, member.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
continue
elif member.name == 'next':
struct_check += self.writeValidateStructNextCheck(
xr_struct.name, 'value', member, indent)
elif member.name == 'enabledExtensionCount':
has_enable_extension_count = True
elif member.name == 'enabledExtensionNames':
has_enable_extension_names = True
elif not setup_bail:
struct_check += self.writeIndent(indent)
struct_check += '// If we are not to check the rest of the members, just return here.\n'
struct_check += self.writeIndent(indent)
struct_check += 'if (!check_members || XR_SUCCESS != xr_result) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return xr_result;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
setup_bail = True
struct_check += self.outputParamMemberContents(False, xr_struct.name, member, 'value->',
"instance_info", "command_name",
count == 0,
first_member_handle,
first_member_handle,
first_member_handle_tuple,
wrote_handle_check_proto,
indent)
if member.is_handle:
wrote_handle_check_proto = True
count = count + 1
# We only have extensions to check if both the count and enable fields are there
if has_enable_extension_count and has_enable_extension_names:
# This is create instance, so check all instance extensions
struct_check += self.writeIndent(indent)
struct_check += 'std::vector<std::string> enabled_extension_vec;\n'
struct_check += self.writeIndent(indent)
struct_check += 'for (uint32_t extension = 0; extension < value->enabledExtensionCount; ++extension) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'enabled_extension_vec.push_back(value->enabledExtensionNames[extension]);\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
if xr_struct.name == 'XrInstanceCreateInfo':
struct_check += self.writeIndent(indent)
struct_check += 'if (!ValidateInstanceExtensionDependencies(nullptr, command_name, "%s",\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, enabled_extension_vec)) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
else:
struct_check += self.writeIndent(indent)
struct_check += 'if (!ValidateSystemExtensionDependencies(instance_info, command_name, "%s",\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, enabled_extension_vec)) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
struct_check += self.writeIndent(indent)
struct_check += '// Everything checked out properly\n'
struct_check += self.writeIndent(indent)
struct_check += 'return xr_result;\n'
struct_check += '}\n\n'
if xr_struct.protect_value:
struct_check += '#endif // %s\n' % xr_struct.protect_string
struct_check += '\n'
return struct_check
# Write an inline validation check for handle parents
# self the ValidationSourceOutputGenerator object
# instance_info_string string used to identify the variable associated with the instance information struct.
# first_handle_tuple the handle tuple associated with the type of the first handle
# first_handle_mem_param the member/param of the first handle
# first_handle_desc_name the descriptive name of the first handle
# cur_handle_tuple the handle tuple associated with the type of the current handle
# cur_handle_mem_param the member/param of the current handle
# cur_handle_desc_name the descriptive name of the current handle
# vuid_name the VUID identifier to associate this check and member/param name with
# cmd_name_param the parameter containing the associated command name
# indent the number of tab-stops to indent the current inline strings
def writeInlineParentCheckCall(self, instance_info_string, first_handle_tuple, first_handle_mem_param, first_handle_desc_name,
cur_handle_tuple, cur_handle_mem_param, cur_handle_desc_name, vuid_name,
cmd_name_param, indent):
parent_check_string = ''
parent_id = 'commonparent'
if (first_handle_tuple.name == cur_handle_tuple.parent or
cur_handle_tuple.name == first_handle_tuple.parent):
parent_id = '%s-parent' % cur_handle_mem_param.name
parent_check_string += self.writeIndent(indent)
pointer_deref = ''
if cur_handle_mem_param.pointer_count > 0:
pointer_deref = '*'
compare_flag = 'true'
if first_handle_mem_param.type == cur_handle_mem_param.type:
compare_flag = 'false'
if cur_handle_mem_param.is_optional:
parent_check_string += '// If the second handle is optional, only check for a common parent if\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += '// it is not XR_NULL_HANDLE\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!CHECK_FOR_NULL_HANDLE(%s) && !VerifyXrParent(%s, CONVERT_HANDLE_TO_GENERIC(%s),\n' % (
cur_handle_desc_name,
self.genXrObjectType(first_handle_mem_param.type),
first_handle_desc_name)
parent_check_string += ' %s, CONVERT_HANDLE_TO_GENERIC(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type),
pointer_deref,
cur_handle_desc_name,
compare_flag)
else:
parent_check_string += '// Verify that the handles share a common ancestry\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!VerifyXrParent(%s, CONVERT_HANDLE_TO_GENERIC(%s),\n' % (
self.genXrObjectType(first_handle_mem_param.type), first_handle_desc_name)
parent_check_string += ' %s, CONVERT_HANDLE_TO_GENERIC(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type), pointer_deref, cur_handle_desc_name, compare_flag)
indent = indent + 1
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::ostringstream oss_handle_1;\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_handle_1 << std::hex << reinterpret_cast<const void*>(%s);\n' % first_handle_desc_name
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::ostringstream oss_handle_2;\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_handle_2 << std::hex << reinterpret_cast<const void*>(%s%s);\n' % (
pointer_deref, cur_handle_desc_name)
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::string error_str = "%s ";\n' % first_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_1.str();\n'
if first_handle_tuple.name == cur_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must be a parent to %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
elif cur_handle_tuple.name == first_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must be a child of %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
else:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " and %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must share a parent";\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'CoreValidLogMessage(%s, "VUID-%s-%s",\n' % (instance_info_string,
vuid_name,
parent_id)
parent_check_string += self.writeIndent(indent)
parent_check_string += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
parent_check_string += self.writeIndent(indent)
parent_check_string += ' objects_info, error_str);\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
parent_check_string += self.writeIndent(indent)
parent_check_string += '}\n'
return parent_check_string
# Generate C++ code to validate the inputs of the current command.
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
def genValidateInputsFunc(self, cur_command):
pre_validate_func = ''
pre_validate_func += 'XrResult %s(' % cur_command.name.replace("xr",
"GenValidUsageInputsXr")
pre_validate_func += '\n'
count = 0
for param in cur_command.params:
if count > 0:
pre_validate_func += ',\n'
pre_validate_func += ' '
pre_validate_func += param.cdecl.strip()
count = count + 1
pre_validate_func += ') {\n'
wrote_handle_check_proto = False
is_first_param_handle = cur_command.params[0].is_handle
first_param_handle_tuple = self.getHandle(cur_command.params[0].type)
# If the first parameter is a handle and we either have to validate that handle, or check
# for extension information, then we will need the instance information.
indent = 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'try {\n'
indent = indent + 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'XrResult xr_result = XR_SUCCESS;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'std::vector<GenValidUsageXrObjectInfo> objects_info;\n'
if first_param_handle_tuple != None:
handle_param = cur_command.params[0]
first_handle_name = self.getFirstHandleName(handle_param)
obj_type = self.genXrObjectType(handle_param.type)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n\n'% (first_handle_name, obj_type)
lower_handle_name = first_param_handle_tuple.name[2:].lower()
if first_param_handle_tuple.name == 'XrInstance':
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = g_instance_info.get(%s);\n' % first_handle_name
else:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'auto info_with_instance = %s.getWithInstanceInfo(%s);\n' % (
self.makeInfoName(handle_type_name=handle_param.type), first_handle_name)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrHandleInfo *gen_%s_info = info_with_instance.first;\n' % lower_handle_name
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = info_with_instance.second;\n'
# If any of the associated handles has validation state tracking, get the
# appropriate struct setup for validation later in the function
valid_type_list = []
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
command_param_of_type = ''
for param in cur_command.params:
if param.type == cur_state.type:
command_param_of_type = param.name
break
if (len(command_param_of_type) > 0) and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'auto %s_valid = g_%s_valid_states[%s];\n' % (
cur_state.type[2:].lower(), cur_state.type[2:].lower(), command_param_of_type)
for additional_ext in cur_command.required_exts:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '// Check to make sure that the extension this command is in has been enabled\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'if (!ExtensionEnabled(gen_instance_info->enabled_extensions, "%s")) {\n' % additional_ext
pre_validate_func += self.writeIndent(indent + 1)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '}\n'
instance_info_variable = 'gen_instance_info' if first_param_handle_tuple else 'nullptr'
# Check for non-optional null pointers
count = 0
for param in cur_command.params:
# TODO use_pointer_deref never gets used?
use_pointer_deref = False
if len(param.array_count_var) != 0 or len(param.pointer_count_var) != 0:
if ((len(param.array_count_var) != 0 and param.pointer_count > 0) or
(len(param.pointer_count_var) != 0 and param.pointer_count > 1)):
use_pointer_deref = True
elif param.pointer_count > 0:
use_pointer_deref = True
if count > 0 and param.is_handle and not param.pointer_count > 0:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n' % (param.name, self.genXrObjectType(
param.type))
if not param.no_auto_validity:
command_name_string = '"%s"' % cur_command.name
pre_validate_func += self.outputParamMemberContents(True, cur_command.name, param, '',
instance_info_variable,
command_name_string,
count == 0,
cur_command.params[0],
cur_command.params[0].name,
first_param_handle_tuple,
wrote_handle_check_proto,
indent)
wrote_handle_check_proto = True
count = count + 1
base_handle_name = cur_command.params[0].type[2:].lower()
# If this command needs to be checked to ensure that it is executing between
# a "begin" and an "end" command, do so.
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called at the proper time between the\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// appropriate commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (!%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is required to be called between successful calls to ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for begin_command in cur_state.begin_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % begin_command
pre_validate_func += ' and '
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += ' commands";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-checkstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# If this command needs to indicate that a validation state has begun, do so.
if cur_command.begins_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
# First, make sure we're not calling two (or more) "begins" in a row
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called first or only after the corresponding\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// "completion" commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is called again without first successfully calling ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += '";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-beginstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# Begin the appropriate state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Begin the %s state\n' % cur_state.state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '%s_valid->%s = true;\n' % (
cur_state.type[2:].lower(), cur_state.variable)
# If this command needs to indicate an end of a validation state, do so.
if cur_command.ends_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
# First, make sure we're not calling two (or more) "ends" in a row (or before a "begin")
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called after the corresponding\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// "begin" commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (!%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is called again without first successfully calling ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for begin_command in cur_state.begin_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % begin_command
pre_validate_func += '";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-endstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# End the appropriate state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// End the %s state\n' % cur_state.state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '%s_valid->%s = false;\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'return XR_SUCCESS;\n'
indent = indent - 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '} catch (...) {\n'
pre_validate_func += self.writeIndent(indent + 1)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '}\n'
pre_validate_func += '}\n\n'
return pre_validate_func
# Generate C++ code to call down to the next layer/loader terminator/runtime
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
# has_return Boolean indicating that the command must return a value (usually XrResult)
# is_create Boolean indicating that the command is a create command
# is_destroy Boolean indicating that the command is a destroy command
# is_sempath_query Boolean indicating that this is a semantic path query (treat similar to a create)
def genNextValidateFunc(self, cur_command, has_return, is_create, is_destroy, is_sempath_query):
next_validate_func = ''
# Note: We don't make a "next" call for xrCreateInstance in a layer because we
# actually have to call xrCreateApiLayerInstance. Also, we have to setup the first
# entry into the dispatch table so it's a special case all around.
if 'xrCreateInstance' in cur_command.name:
return ''
prototype = cur_command.cdecl
prototype = prototype.replace(" xr", " GenValidUsageNextXr")
prototype = prototype.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
prototype = prototype.replace(";", " {")
next_validate_func += '%s\n' % (prototype)
if has_return:
return_prefix = ' '
return_prefix += cur_command.return_type.text
return_prefix += ' result'
if cur_command.return_type.text == 'XrResult':
return_prefix += ' = XR_SUCCESS;\n'
else:
return_prefix += ';\n'
next_validate_func += return_prefix
next_validate_func += ' try {\n'
# Next, we have to call down to the next implementation of this command in the call chain.
# Before we can do that, we have to figure out what the dispatch table is
base_handle_name = cur_command.params[0].type[2:].lower()
if cur_command.params[0].is_handle:
handle_tuple = self.getHandle(cur_command.params[0].type)
first_handle_name = self.getFirstHandleName(cur_command.params[0])
if handle_tuple.name == 'XrInstance':
next_validate_func += ' GenValidUsageXrInstanceInfo *gen_instance_info = g_instance_info.get(%s);\n' % first_handle_name
else:
next_validate_func += ' GenValidUsageXrHandleInfo *gen_%s_info = ' % base_handle_name
next_validate_func += 'g_%s_info.get(%s);\n' % (
base_handle_name, first_handle_name)
next_validate_func += ' GenValidUsageXrInstanceInfo *gen_instance_info = gen_%s_info->instance_info;\n' % base_handle_name
else:
next_validate_func += '#error("Bug")\n'
# Call down, looking for the returned result if required.
next_validate_func += ' '
if has_return:
next_validate_func += 'result = '
next_validate_func += 'gen_instance_info->dispatch_table->%s(' % cur_command.name[2:]
count = 0
for param in cur_command.params:
if count > 0:
next_validate_func += ', '
next_validate_func += param.name
count = count + 1
next_validate_func += ');\n'
# If this is a create command, we have to create an entry in the appropriate
# unordered_map pointing to the correct dispatch table for the newly created
# object. Likewise, if it's a delete command, we have to remove the entry
# for the dispatch table from the unordered_map
last_name = ''
last_lower_type = ''
if cur_command.params[-1].is_handle:
last_handle_tuple = self.getHandle(cur_command.params[-1].type)
last_lower_type = last_handle_tuple.name[2:].lower()
last_name = cur_command.params[-1].name
if is_create:
assert(last_handle_tuple.name != 'XrInstance')
next_validate_func += ' if (XR_SUCCESS == result && nullptr != %s) {\n' % last_name
next_validate_func += ' std::unique_ptr<GenValidUsageXrHandleInfo> handle_info(new GenValidUsageXrHandleInfo());\n'
next_validate_func += ' handle_info->instance_info = gen_instance_info;\n'
next_validate_func += ' handle_info->direct_parent_type = %s;\n' % self.genXrObjectType(
cur_command.params[0].type)
next_validate_func += ' handle_info->direct_parent_handle = CONVERT_HANDLE_TO_GENERIC(%s);\n' % cur_command.params[
0].name
next_validate_func += ' %s.insert(*%s, std::move(handle_info));\n' % (self.makeInfoName(last_handle_tuple), last_name)
# If this object contains a state that needs tracking, allocate it
valid_type_list = []
for cur_state in self.api_states:
if last_handle_tuple.name == cur_state.type and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '// Check to see if this object that has been created has a validation\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '// state structure that needs to be created as well.\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '%sValidationStates *%s_valid_state = new %sValidationStates;\n' % (
cur_state.type, cur_state.type[2:].lower(), cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '(*%s_valid_state) = {};\n' % cur_state.type[2:].lower()
next_validate_func += self.writeIndent(3)
next_validate_func += 'g_%s_valid_states[(*%s)] = %s_valid_state;\n' % (
cur_state.type[2:].lower(), last_name, cur_state.type[2:].lower())
next_validate_func += ' }\n'
elif is_destroy:
if cur_command.params[-1].type == 'XrSession':
next_validate_func += '\n // Clean up any labels associated with this session\n'
next_validate_func += ' CoreValidationDeleteSessionLabels(session);\n\n'
# Only remove the handle from our map if the runtime returned success
next_validate_func += ' if (XR_SUCCEEDED(result)) {\n'
# If this object contains a state that needs tracking, free it
valid_type_list = []
for cur_state in self.api_states:
if last_handle_tuple.name == cur_state.type and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '// Check to see if this object that is about to be destroyed has a\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '// validation state structure that needs to be cleaned up.\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '%sValidationStates *%s_valid_state = g_%s_valid_states[%s];\n' % (
cur_state.type, cur_state.type[2:].lower(), cur_state.type[2:].lower(), last_name)
next_validate_func += self.writeIndent(3)
next_validate_func += 'if (nullptr != %s_valid_state) {\n' % cur_state.type[2:].lower(
)
next_validate_func += self.writeIndent(4)
next_validate_func += 'delete %s_valid_state;\n' % cur_state.type[2:].lower(
)
next_validate_func += self.writeIndent(4)
next_validate_func += 'g_%s_valid_states.erase(%s);\n' % (
cur_state.type[2:].lower(), last_name)
next_validate_func += self.writeIndent(3)
next_validate_func += '}\n'
next_validate_func += ' g_%s_info.erase(%s);\n' % (last_lower_type, last_name)
next_validate_func += ' }\n'
if 'xrDestroyInstance' in cur_command.name:
next_validate_func += ' GenValidUsageCleanUpMaps(gen_instance_info);\n'
# Catch any exceptions that may have occurred. If any occurred between any of the
# valid mutex lock/unlock statements, perform the unlock now. Notice that a create can
# also allocate items, so we want to special case catching the failure of the allocation.
if is_create or is_sempath_query:
next_validate_func += ' } catch (std::bad_alloc&) {\n'
next_validate_func += ' result = XR_ERROR_OUT_OF_MEMORY;\n'
next_validate_func += ' } catch (...) {\n'
if has_return:
next_validate_func += ' result = XR_ERROR_VALIDATION_FAILURE;\n'
next_validate_func += ' }\n'
if has_return:
next_validate_func += ' return result;\n'
next_validate_func += '}\n\n'
return next_validate_func
# Generate a top-level automatic C++ validation function which will be used until
# a manual function is defined.
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
# has_return Boolean indicating that the command must return a value (usually XrResult)
def genAutoValidateFunc(self, cur_command, has_return):
auto_validate_func = ''
prototype = cur_command.cdecl
prototype = prototype.replace(" xr", " GenValidUsageXr")
prototype = prototype.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
prototype = prototype.replace(";", " {")
auto_validate_func += '%s\n' % (prototype)
auto_validate_func += self.writeIndent(1)
if has_return:
auto_validate_func += '%s test_result = ' % cur_command.return_type.text
# Define the pre-validate call
auto_validate_func += '%s(' % cur_command.name.replace("xr",
"GenValidUsageInputsXr")
count = 0
for param in cur_command.params:
if count > 0:
auto_validate_func += ', '
count = count + 1
auto_validate_func += param.name
auto_validate_func += ');\n'
if has_return and cur_command.return_type.text == 'XrResult':
auto_validate_func += self.writeIndent(1)
auto_validate_func += 'if (XR_SUCCESS != test_result) {\n'
auto_validate_func += self.writeIndent(2)
auto_validate_func += 'return test_result;\n'
auto_validate_func += self.writeIndent(1)
auto_validate_func += '}\n'
# Make the calldown to the next layer
auto_validate_func += self.writeIndent(1)
if has_return:
auto_validate_func += 'return '
auto_validate_func += '%s(' % cur_command.name.replace("xr",
"GenValidUsageNextXr")
count = 0
for param in cur_command.params:
if count > 0:
auto_validate_func += ', '
count = count + 1
auto_validate_func += param.name
auto_validate_func += ');\n'
auto_validate_func += '}\n\n'
return auto_validate_func
# Implementation for generated validation commands
# self the ValidationSourceOutputGenerator object
def outputValidationSourceFuncs(self):
commands = []
validation_source_funcs = ''
cur_extension_name = ''
# First, output the mapping and mutex items
validation_source_funcs += '// Unordered Map associating pointer to a vector of session label information to a session\'s handle\n'
validation_source_funcs += 'std::unordered_map<XrSession, std::vector<GenValidUsageXrInternalSessionLabel*>*> g_xr_session_labels;\n\n'
validation_source_funcs += self.outputInfoMapDeclarations(extern=False)
validation_source_funcs += '\n'
validation_source_funcs += self.outputValidationInternalProtos()
validation_source_funcs += '// Function used to clean up any residual map values that point to an instance prior to that\n'
validation_source_funcs += '// instance being deleted.\n'
validation_source_funcs += 'void GenValidUsageCleanUpMaps(GenValidUsageXrInstanceInfo *instance_info) {\n'
for handle in self.api_handles:
base_handle_name = handle.name[2:].lower()
if handle.protect_value:
validation_source_funcs += '#if %s\n' % handle.protect_string
if handle.name == 'XrInstance':
validation_source_funcs += ' EraseAllInstanceTableMapElements(instance_info);\n'
else:
validation_source_funcs += ' g_%s_info.removeHandlesForInstance(instance_info);\n' % base_handle_name
if handle.protect_value:
validation_source_funcs += '#endif // %s\n' % handle.protect_string
validation_source_funcs += '}\n'
validation_source_funcs += '\n'
validation_source_funcs += '// Function to convert XrObjectType to string\n'
validation_source_funcs += 'std::string GenValidUsageXrObjectTypeToString(const XrObjectType& type) {\n'
validation_source_funcs += ' std::string object_string;\n'
count = 0
for object_type in self.api_object_types:
object_string = object_type.name.replace("XR_OBJECT_TYPE_", "")
object_string = object_string.replace("_", "")
if object_string == "UNKNOWN":
if count == 0:
validation_source_funcs += ' if '
else:
validation_source_funcs += ' } else if '
validation_source_funcs += '(type == XR_OBJECT_TYPE_UNKNOWN) {\n'
validation_source_funcs += ' object_string = "Unknown XR Object";\n'
else:
for handle in self.api_handles:
handle_name = handle.name[2:].upper()
if handle_name != object_string:
continue
if object_type.protect_value:
validation_source_funcs += '#if %s\n' % object_type.protect_string
if count == 0:
validation_source_funcs += ' if '
else:
validation_source_funcs += ' } else if '
validation_source_funcs += '(type == %s) {\n' % object_type.name
validation_source_funcs += ' object_string = "%s";\n' % handle.name
if object_type.protect_value:
validation_source_funcs += '#endif // %s\n' % object_type.protect_string
count = count + 1
validation_source_funcs += ' }\n'
validation_source_funcs += ' return object_string;\n'
validation_source_funcs += '}\n\n'
validation_source_funcs += self.outputValidationStateCheckStructs()
validation_source_funcs += self.outputValidationSourceNextChainProtos()
validation_source_funcs += self.outputValidationSourceFlagBitValues()
validation_source_funcs += self.outputValidationSourceEnumValues()
validation_source_funcs += self.writeVerifyExtensions()
validation_source_funcs += self.writeValidateHandleChecks()
validation_source_funcs += self.writeValidateHandleParent()
validation_source_funcs += self.writeValidateStructFuncs()
validation_source_funcs += self.outputValidationSourceNextChainFunc()
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_source_funcs += '\n// ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_source_funcs += '\n// ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
if cur_cmd.name in VALID_USAGE_DONT_GEN:
continue
# We fill in the GetInstanceProcAddr manually at the end
if cur_cmd.name == 'xrGetInstanceProcAddr':
continue
if cur_cmd.protect_value:
validation_source_funcs += '#if %s\n' % cur_cmd.protect_string
validation_source_funcs += '\n'
is_create = False
is_destroy = False
has_return = False
is_sempath_query = False
if ('xrCreate' in cur_cmd.name or 'xrConnect' in cur_cmd.name) and cur_cmd.params[-1].is_handle:
is_create = True
has_return = True
elif ('xrDestroy' in cur_cmd.name or 'xrDisconnect' in cur_cmd.name) and cur_cmd.params[-1].is_handle:
is_destroy = True
has_return = True
elif (cur_cmd.return_type != None):
has_return = True
validation_source_funcs += self.genValidateInputsFunc(cur_cmd)
validation_source_funcs += self.genNextValidateFunc(
cur_cmd, has_return, is_create, is_destroy, is_sempath_query)
if not cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
validation_source_funcs += self.genAutoValidateFunc(
cur_cmd, has_return)
if cur_cmd.protect_value:
validation_source_funcs += '#endif // %s\n' % cur_cmd.protect_string
validation_source_funcs += '\n'
validation_source_funcs += '\n// API Layer\'s xrGetInstanceProcAddr\n'
validation_source_funcs += 'XrResult GenValidUsageXrGetInstanceProcAddr(\n'
validation_source_funcs += ' XrInstance instance,\n'
validation_source_funcs += ' const char* name,\n'
validation_source_funcs += ' PFN_xrVoidFunction* function) {\n'
validation_source_funcs += ' try {\n'
validation_source_funcs += ' std::string func_name = name;\n'
validation_source_funcs += ' std::vector<GenValidUsageXrObjectInfo> objects;\n'
validation_source_funcs += ' if (g_instance_info.verifyHandle(&instance) == VALIDATE_XR_HANDLE_INVALID) {\n'
validation_source_funcs += ' // Make sure the instance is valid if it is not XR_NULL_HANDLE\n'
validation_source_funcs += ' std::vector<GenValidUsageXrObjectInfo> objects;\n'
validation_source_funcs += ' objects.resize(1);\n'
validation_source_funcs += ' objects[0].handle = CONVERT_HANDLE_TO_GENERIC(instance);\n'
validation_source_funcs += ' objects[0].type = XR_OBJECT_TYPE_INSTANCE;\n'
validation_source_funcs += ' CoreValidLogMessage(nullptr, "VUID-xrGetInstanceProcAddr-instance-parameter",\n'
validation_source_funcs += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "xrGetInstanceProcAddr", objects,\n'
validation_source_funcs += ' "Invalid instance handle provided.");\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' // NOTE: Can\'t validate "VUID-xrGetInstanceProcAddr-name-parameter" null-termination\n'
validation_source_funcs += ' // If we setup the function, just return\n'
validation_source_funcs += ' if (function == nullptr) {\n'
validation_source_funcs += ' CoreValidLogMessage(nullptr, "VUID-xrGetInstanceProcAddr-function-parameter",\n'
validation_source_funcs += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "xrGetInstanceProcAddr", objects,\n'
validation_source_funcs += ' "function is NULL");\n'
validation_source_funcs += ' return XR_ERROR_VALIDATION_FAILURE;\n'
validation_source_funcs += ' }\n'
count = 0
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_source_funcs += '\n // ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_source_funcs += '\n // ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
if cur_cmd.name in VALID_USAGE_DONT_GEN:
continue
has_return = False
if (cur_cmd.return_type != None):
has_return = True
if cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
# Remove 'xr' from proto name and use manual name
layer_command_name = cur_cmd.name.replace(
"xr", "CoreValidationXr")
else:
# Remove 'xr' from proto name and use generated name
layer_command_name = cur_cmd.name.replace(
"xr", "GenValidUsageXr")
if cur_cmd.protect_value:
validation_source_funcs += '#if %s\n' % cur_cmd.protect_string
if count == 0:
validation_source_funcs += ' if (func_name == "%s") {\n' % cur_cmd.name
else:
validation_source_funcs += ' } else if (func_name == "%s") {\n' % cur_cmd.name
count = count + 1
validation_source_funcs += ' *function = reinterpret_cast<PFN_xrVoidFunction>(%s);\n' % layer_command_name
if cur_cmd.protect_value:
validation_source_funcs += '#endif // %s\n' % cur_cmd.protect_string
validation_source_funcs += ' }\n'
validation_source_funcs += ' // If we setup the function, just return\n'
validation_source_funcs += ' if (*function != nullptr) {\n'
validation_source_funcs += ' return XR_SUCCESS;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' // We have not found it, so pass it down to the next layer/runtime\n'
validation_source_funcs += ' GenValidUsageXrInstanceInfo* instance_valid_usage_info = g_instance_info.get(instance);\n'
validation_source_funcs += ' if (nullptr == instance_valid_usage_info) {\n'
validation_source_funcs += ' return XR_ERROR_HANDLE_INVALID;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' return instance_valid_usage_info->dispatch_table->GetInstanceProcAddr(instance, name, function);\n'
validation_source_funcs += ' } catch (...) {\n'
validation_source_funcs += ' return XR_ERROR_VALIDATION_FAILURE;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += '}\n'
return validation_source_funcs
validation: simplify genValidateInputsFunc
#!/usr/bin/python3 -i
#
# Copyright (c) 2017 The Khronos Group Inc.
# Copyright (c) 2017 Valve Corporation
# Copyright (c) 2017 LunarG, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mark Young <marky@lunarg.com>
import re
import sys
from automatic_source_generator import (AutomaticSourceGeneratorOptions,
AutomaticSourceOutputGenerator,
regSortFeatures, write)
# The following commands should not be generated for the layer
VALID_USAGE_DONT_GEN = [
'xrEnumerateApiLayerProperties',
'xrEnumerateInstanceExtensionProperties',
]
# The following commands have a manually defined component to them.
VALID_USAGE_MANUALLY_DEFINED = [
'xrCreateInstance',
'xrDestroyInstance',
'xrCreateSession',
# We manually implement some of the XR_EXT_debug_utils entry-points so that we
# can return validation messages to known debug utils messengers
'xrSetDebugUtilsObjectNameEXT',
'xrCreateDebugUtilsMessengerEXT',
'xrDestroyDebugUtilsMessengerEXT',
'xrSessionBeginDebugUtilsLabelRegionEXT',
'xrSessionEndDebugUtilsLabelRegionEXT',
'xrSessionInsertDebugUtilsLabelEXT',
]
# ValidationSourceGeneratorOptions - subclass of AutomaticSourceGeneratorOptions.
class ValidationSourceGeneratorOptions(AutomaticSourceGeneratorOptions):
def __init__(self,
filename=None,
directory='.',
apiname=None,
profile=None,
versions='.*',
emitversions='.*',
defaultExtensions=None,
addExtensions=None,
removeExtensions=None,
emitExtensions=None,
sortProcedure=regSortFeatures,
prefixText="",
genFuncPointers=True,
protectFile=True,
protectFeature=True,
protectProto=None,
protectProtoStr=None,
apicall='',
apientry='',
apientryp='',
indentFuncProto=True,
indentFuncPointer=False,
alignFuncParam=0,
genEnumBeginEndRange=False):
AutomaticSourceGeneratorOptions.__init__(self, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions,
emitExtensions, sortProcedure)
# ValidationSourceOutputGenerator - subclass of AutomaticSourceOutputGenerator.
class ValidationSourceOutputGenerator(AutomaticSourceOutputGenerator):
"""Generate core validation layer source using XML element attributes from registry"""
def __init__(self,
errFile=sys.stderr,
warnFile=sys.stderr,
diagFile=sys.stdout):
AutomaticSourceOutputGenerator.__init__(
self, errFile, warnFile, diagFile)
# Override the base class header warning so the comment indicates this file.
# self the ValidationSourceOutputGenerator object
def outputGeneratedHeaderWarning(self):
generated_warning = '// *********** THIS FILE IS GENERATED - DO NOT EDIT ***********\n'
generated_warning += '// See validation_layer_generator.py for modifications\n'
generated_warning += '// ************************************************************\n'
write(generated_warning, file=self.outFile)
# Call the base class to properly begin the file, and then add
# the file-specific header information.
# self the ValidationSourceOutputGenerator object
# gen_opts the ValidationSourceGeneratorOptions object
def beginFile(self, genOpts):
AutomaticSourceOutputGenerator.beginFile(self, genOpts)
preamble = ''
if self.genOpts.filename == 'xr_generated_core_validation.hpp':
preamble += '#pragma once\n'
preamble += '#include <vector>\n'
preamble += '#include <string>\n'
preamble += '#include <unordered_map>\n'
preamble += '#include <thread>\n'
preamble += '#include <mutex>\n\n'
preamble += '#include "api_layer_platform_defines.h"\n'
preamble += '#include <openxr/openxr.h>\n'
preamble += '#include <openxr/openxr_platform.h>\n\n'
preamble += '#include "xr_generated_dispatch_table.h"\n'
preamble += '#include "validation_utils.h"\n'
elif self.genOpts.filename == 'xr_generated_core_validation.cpp':
preamble += '#include <sstream>\n'
preamble += '#include <cstring>\n'
preamble += '#include <algorithm>\n\n'
preamble += '#include "xr_generated_core_validation.hpp"\n'
write(preamble, file=self.outFile)
# Write out all the information for the appropriate file,
# and then call down to the base class to wrap everything up.
# self the ValidationSourceOutputGenerator object
def endFile(self):
file_data = ''
if self.genOpts.filename == 'xr_generated_core_validation.hpp':
file_data += self.outputValidationHeaderInfo()
elif self.genOpts.filename == 'xr_generated_core_validation.cpp':
file_data += self.outputCommonTypesForValidation()
file_data += self.outputValidationSourceFuncs()
write(file_data, file=self.outFile)
# Finish processing in superclass
AutomaticSourceOutputGenerator.endFile(self)
def makeInfoName(self, handle_type=None, handle_type_name=None):
if not handle_type_name:
handle_type_name = handle_type.name
base_handle_name = handle_type_name[2:].lower()
return 'g_%s_info' % base_handle_name
def outputInfoMapDeclarations(self, extern):
lines = []
extern_keyword = 'extern ' if extern else ''
for handle in self.api_handles:
handle_name = handle.name
if handle.protect_value:
lines.append('#if %s' % handle.protect_string)
if handle.name == 'XrInstance':
info_type = "InstanceHandleInfo"
else:
info_type = 'HandleInfo<%s>' % handle_name
lines.append('%s%s %s;' % (extern_keyword,
info_type, self.makeInfoName(handle)))
if handle.protect_value:
lines.append('#endif // %s" % handle.protect_string')
return '\n'.join(lines)
# Write out common internal types for validation
# self the ValidationSourceOutputGenerator object
def outputCommonTypesForValidation(self):
common_validation_types = ''
common_validation_types += '// Structure used for indicating status of \'flags\' test.\n'
common_validation_types += 'enum ValidateXrFlagsResult {\n'
common_validation_types += ' VALIDATE_XR_FLAGS_ZERO,\n'
common_validation_types += ' VALIDATE_XR_FLAGS_INVALID,\n'
common_validation_types += ' VALIDATE_XR_FLAGS_SUCCESS,\n'
common_validation_types += '};\n\n'
return common_validation_types
# Generate C++ structures and maps used for validating the states identified
# in the specification.
# self the ValidationSourceOutputGenerator object
def outputValidationStateCheckStructs(self):
validation_state_checks = '// Structure used for state validation.\n'
active_structures = dict()
for cur_state in self.api_states:
type_name = '%s' % cur_state.type
cur_list = []
if active_structures.get(type_name) is not None:
cur_list = active_structures.get(type_name)
cur_list.append(cur_state.variable)
active_structures[type_name] = cur_list
for type_name, variable_list in active_structures.items():
validation_state_checks += 'struct %sValidationStates {\n' % type_name
for variable in variable_list:
validation_state_checks += ' bool %s;\n' % variable
validation_state_checks += '};\n'
validation_state_checks += 'std::unordered_map<%s, %sValidationStates*> g_%s_valid_states;\n' % (
type_name, type_name, type_name[2:].lower())
validation_state_checks += '\n'
return validation_state_checks
# Generate C++ structure and utility function prototypes for validating
# the 'next' chains in structures.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceNextChainProtos(self):
next_chain_info = ''
next_chain_info += '// Result return value for next chain validation\n'
next_chain_info += 'enum NextChainResult {\n'
next_chain_info += ' NEXT_CHAIN_RESULT_VALID = 0,\n'
next_chain_info += ' NEXT_CHAIN_RESULT_ERROR = -1,\n'
next_chain_info += ' NEXT_CHAIN_RESULT_DUPLICATE_STRUCT = -2,\n'
next_chain_info += '};\n\n'
next_chain_info += '// Prototype for validateNextChain command (it uses the validate structure commands so add it after\n'
next_chain_info += 'NextChainResult ValidateNextChain(GenValidUsageXrInstanceInfo *instance_info,\n'
next_chain_info += ' const std::string &command_name,\n'
next_chain_info += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
next_chain_info += ' const void* next,\n'
next_chain_info += ' std::vector<XrStructureType>& valid_ext_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& encountered_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& duplicate_structs);\n\n'
return next_chain_info
# Generate C++ enum and utility function prototypes for validating
# the flags in structures.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceFlagBitValues(self):
flag_value_validate = ''
for flag_tuple in self.api_flags:
if flag_tuple.protect_value:
flag_value_validate += '#if %s\n' % flag_tuple.protect_string
flag_value_validate += '// Function to validate %s flags\n' % flag_tuple.name
flag_value_validate += 'ValidateXrFlagsResult ValidateXr%s(const %s value) {\n' % (
flag_tuple.name[2:], flag_tuple.type)
# We need to return a value indicating that the value is zero because in some
# circumstances, 0 is ok. However, in other cases, 0 is disallowed. So, leave
# it up to the calling function to decide what is correct.
flag_value_validate += ' if (0 == value) {\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_ZERO;\n'
flag_value_validate += ' }\n'
# If the flag has no values defined for this flag, then anything other than
# zero generates an error.
if flag_tuple.valid_flags is None:
flag_value_validate += ' return VALIDATE_XR_FLAGS_INVALID;\n'
else:
# This flag has values set. So, check (and remove) each valid value. Once that's done
# anything left over would be invalid.
flag_value_validate += ' %s int_value = value;\n' % flag_tuple.type
for mask_tuple in self.api_bitmasks:
if mask_tuple.name == flag_tuple.valid_flags:
for cur_value in mask_tuple.values:
if cur_value.protect_value and flag_tuple.protect_value != cur_value.protect_value:
flag_value_validate += '#if %s\n' % cur_value.protect_string
flag_value_validate += ' if ((int_value & %s) != 0) {\n' % cur_value.name
flag_value_validate += ' // Clear the value %s since it is valid\n' % cur_value.name
flag_value_validate += ' int_value &= ~%s;\n' % cur_value.name
flag_value_validate += ' }\n'
if cur_value.protect_value and flag_tuple.protect_value != cur_value.protect_value:
flag_value_validate += '#endif // %s\n' % cur_value.protect_string
break
flag_value_validate += ' if (int_value != 0) {\n'
flag_value_validate += ' // Something is left, it must be invalid\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_INVALID;\n'
flag_value_validate += ' }\n'
flag_value_validate += ' return VALIDATE_XR_FLAGS_SUCCESS;\n'
flag_value_validate += '}\n\n'
if flag_tuple.protect_value:
flag_value_validate += '#endif // %s\n' % flag_tuple.protect_string
return flag_value_validate
# Generate C++ functions for validating enums.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceEnumValues(self):
enum_value_validate = ''
for enum_tuple in self.api_enums:
if enum_tuple.protect_value:
enum_value_validate += '#if %s\n' % enum_tuple.protect_string
enum_value_validate += '// Function to validate %s enum\n' % enum_tuple.name
enum_value_validate += 'bool ValidateXrEnum(GenValidUsageXrInstanceInfo *instance_info,\n'
enum_value_validate += ' const std::string &command_name,\n'
enum_value_validate += ' const std::string &validation_name,\n'
enum_value_validate += ' const std::string &item_name,\n'
enum_value_validate += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
enum_value_validate += ' const %s value) {\n' % enum_tuple.name
indent = 1
checked_extension = ''
if enum_tuple.ext_name and not self.isCoreExtensionName(enum_tuple.ext_name):
checked_extension = enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '// Enum requires extension %s, so check that it is enabled\n' % enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % enum_tuple.ext_name
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string vuid = "VUID-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += validation_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += item_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-parameter";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string error_str = "%s requires extension ";\n' % enum_tuple.name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " \\"%s\\" to be enabled, but it is not enabled";\n' % enum_tuple.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'CoreValidLogMessage(instance_info, vuid,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' objects_info, error_str);\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '}\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'switch (value) {\n'
indent += 1
for cur_value in enum_tuple.values:
if cur_value.protect_value and enum_tuple.protect_value != cur_value.protect_value:
enum_value_validate += '#if %s\n' % cur_value.protect_string
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'case %s:\n' % cur_value.name
if cur_value.protect_value and enum_tuple.protect_value != cur_value.protect_value:
enum_value_validate += '#endif // %s\n' % cur_value.protect_string
if cur_value.ext_name and cur_value.ext_name != checked_extension and not self.isCoreExtensionName(cur_value.ext_name):
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '// Enum value %s requires extension %s, so check that it is enabled\n' % (
cur_value.name, cur_value.ext_name)
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % cur_value.ext_name
indent += 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string vuid = "VUID-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += validation_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += item_name;\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'vuid += "-parameter";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'std::string error_str = "%s value \\"%s\\"";\n' % (
enum_tuple.name, cur_value.name)
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " being used, which requires extension ";\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'error_str += " \\"%s\\" to be enabled, but it is not enabled";\n' % cur_value.ext_name
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'CoreValidLogMessage(instance_info, vuid,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += ' objects_info, error_str);\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += '}\n'
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'return true;\n'
indent -= 1
elif cur_value.name == 'XR_TYPE_UNKNOWN':
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return false; // Invalid XrStructureType \n'
else:
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return true;\n'
indent -= 1
enum_value_validate += self.writeIndent(indent)
enum_value_validate += 'default:\n'
enum_value_validate += self.writeIndent(indent + 1)
enum_value_validate += 'return false;\n'
indent -= 1
enum_value_validate += '}\n'
enum_value_validate += '}\n\n'
if enum_tuple.protect_value:
enum_value_validate += '#endif // %s\n' % enum_tuple.protect_string
return enum_value_validate
# Generate prototypes for functions used internal to the source file so other functions can use them
# self the ValidationSourceOutputGenerator object
def outputValidationInternalProtos(self):
validation_internal_protos = ''
for handle in self.api_handles:
if handle.protect_value:
validation_internal_protos += '#if %s\n' % handle.protect_string
validation_internal_protos += 'ValidateXrHandleResult Verify%sHandle(const %s* handle_to_check);\n' % (
handle.name, handle.name)
if handle.protect_value:
validation_internal_protos += '#endif // %s\n' % handle.protect_string
validation_internal_protos += '\n// Write out prototypes for handle parent verification functions\n'
validation_internal_protos += 'bool VerifyXrParent(XrObjectType handle1_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle1,\n'
validation_internal_protos += ' XrObjectType handle2_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle2,\n'
validation_internal_protos += ' bool check_this);\n'
validation_internal_protos += '\n// Function to check if an extension has been enabled\n'
validation_internal_protos += 'bool ExtensionEnabled(std::vector<std::string> &extensions, const char* const check_extension_name);\n'
validation_internal_protos += '\n// Functions to validate structures\n'
for xr_struct in self.api_structures:
if xr_struct.protect_value:
validation_internal_protos += '#if %s\n' % xr_struct.protect_string
validation_internal_protos += 'XrResult ValidateXrStruct(GenValidUsageXrInstanceInfo *instance_info, const std::string &command_name,\n'
validation_internal_protos += ' std::vector<GenValidUsageXrObjectInfo>& objects_info, bool check_members,\n'
validation_internal_protos += ' const %s* value);\n' % xr_struct.name
if xr_struct.protect_value:
validation_internal_protos += '#endif // %s\n' % xr_struct.protect_string
return validation_internal_protos
# Generate C++ functions for validating 'next' chains in a structure.
# self the ValidationSourceOutputGenerator object
def outputValidationSourceNextChainFunc(self):
next_chain_info = ''
next_chain_info += 'NextChainResult ValidateNextChain(GenValidUsageXrInstanceInfo *instance_info,\n'
next_chain_info += ' const std::string &command_name,\n'
next_chain_info += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
next_chain_info += ' const void* next,\n'
next_chain_info += ' std::vector<XrStructureType>& valid_ext_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& encountered_structs,\n'
next_chain_info += ' std::vector<XrStructureType>& duplicate_structs) {\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'NextChainResult return_result = NEXT_CHAIN_RESULT_VALID;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '// NULL is valid\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (nullptr == next) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return return_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '// Non-NULL is not valid if there is no valid extension structs\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (nullptr != next && 0 == valid_ext_structs.size()) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'const XrBaseInStructure* next_header = reinterpret_cast<const XrBaseInStructure*>(next);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'auto valid_ext = std::find(valid_ext_structs.begin(), valid_ext_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (valid_ext == valid_ext_structs.end()) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '// Not a valid extension structure type for this next chain.\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '} else {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '// Check to see if we\'ve already encountered this structure.\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'auto already_encountered_ext = std::find(encountered_structs.begin(), encountered_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'if (already_encountered_ext != encountered_structs.end()) {\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '// Make sure we only put in unique types into our duplicate list.\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'auto already_duplicate = std::find(duplicate_structs.begin(), duplicate_structs.end(), next_header->type);\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'if (already_duplicate == duplicate_structs.end()) {\n'
next_chain_info += self.writeIndent(4)
next_chain_info += 'duplicate_structs.push_back(next_header->type);\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'return_result = NEXT_CHAIN_RESULT_DUPLICATE_STRUCT;\n'
next_chain_info += self.writeIndent(2)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
# Validate the rest of this struct
next_chain_info += self.writeIndent(1)
next_chain_info += 'switch (next_header->type) {\n'
for enum_tuple in self.api_enums:
if enum_tuple.name == 'XrStructureType':
if enum_tuple.protect_value:
next_chain_info += '#if %s\n' % enum_tuple.protect_string
for cur_value in enum_tuple.values:
struct_define_name = self.genXrStructureName(
cur_value.name)
if len(struct_define_name) > 0:
struct_tuple = self.getStruct(struct_define_name)
if struct_tuple.protect_value:
next_chain_info += '#if %s\n' % struct_tuple.protect_string
next_chain_info += self.writeIndent(2)
next_chain_info += 'case %s:\n' % cur_value.name
next_chain_info += self.writeIndent(3)
next_chain_info += 'if (XR_SUCCESS != ValidateXrStruct(instance_info, command_name, objects_info, false,\n'
next_chain_info += self.writeIndent(3)
next_chain_info += ' reinterpret_cast<const %s*>(next))) {\n' % struct_define_name
next_chain_info += self.writeIndent(4)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(3)
next_chain_info += '}\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'break;\n'
if struct_tuple.protect_value:
next_chain_info += '#endif // %s\n' % struct_tuple.protect_string
if enum_tuple.protect_value:
next_chain_info += '#endif //%s\n' % enum_tuple.protect_string
break
next_chain_info += self.writeIndent(2)
next_chain_info += 'default:\n'
next_chain_info += self.writeIndent(3)
next_chain_info += 'return NEXT_CHAIN_RESULT_ERROR;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
# Validate any chained structs
next_chain_info += self.writeIndent(1)
next_chain_info += 'NextChainResult next_result = ValidateNextChain(instance_info, command_name,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' objects_info, next_header->next,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' valid_ext_structs,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' encountered_structs,\n'
next_chain_info += self.writeIndent(1)
next_chain_info += ' duplicate_structs);\n'
next_chain_info += self.writeIndent(1)
next_chain_info += 'if (NEXT_CHAIN_RESULT_VALID == next_result && NEXT_CHAIN_RESULT_VALID != return_result) {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return return_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '} else {\n'
next_chain_info += self.writeIndent(2)
next_chain_info += 'return next_result;\n'
next_chain_info += self.writeIndent(1)
next_chain_info += '}\n'
next_chain_info += '}\n\n'
return next_chain_info
# Generate C++ header information containing functionality used in both
# the generated and manual code.
# - Structures used to store validation information on a per-handle basis.
# - Unordered_map and mutexes used for storing the structure information on a per handle basis.
# self the ValidationSourceOutputGenerator object
def outputValidationHeaderInfo(self):
commands = []
validation_header_info = ''
cur_extension_name = ''
validation_header_info += '// Unordered Map associating pointer to a vector of session label information to a session\'s handle\n'
validation_header_info += 'extern std::unordered_map<XrSession, std::vector<GenValidUsageXrInternalSessionLabel*>*> g_xr_session_labels;\n\n'
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_header_info += '\n// ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_header_info += '\n// ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
prototype = cur_cmd.cdecl.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
# We need to always export xrGetInstanceProcAddr, even though we automatically generate it.
# Also, we really only need the core function, not the others.
if 'xrGetInstanceProcAddr' in cur_cmd.name:
validation_header_info += '%s\n' % prototype.replace(
" xr", " GenValidUsageXr")
continue
elif cur_cmd.name in VALID_USAGE_DONT_GEN or not cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
continue
if cur_cmd.protect_value:
validation_header_info += '#if %s\n' % cur_cmd.protect_string
# Core call, for us to make from here into the manually implemented code
validation_header_info += '%s\n' % prototype.replace(
" xr", " CoreValidationXr")
# Validate Inputs and Next calls for the validation to make
validation_header_info += 'XrResult %s(' % cur_cmd.name.replace(
"xr", "GenValidUsageInputsXr")
count = 0
for param in cur_cmd.params:
if count > 0:
validation_header_info += ', '
count = count + 1
validation_header_info += param.cdecl.strip()
validation_header_info += ');\n'
validation_header_info += '%s\n' % prototype.replace(
" xr", " GenValidUsageNextXr")
if cur_cmd.protect_value:
validation_header_info += '#endif // %s\n' % cur_cmd.protect_string
validation_header_info += '\n// Current API version of the Core Validation API Layer\n#define XR_CORE_VALIDATION_API_VERSION '
validation_header_info += self.api_version_define
validation_header_info += '\n'
validation_header_info += '\n// Externs for Core Validation\n'
validation_header_info += self.outputInfoMapDeclarations(extern=True)
validation_header_info += 'void GenValidUsageCleanUpMaps(GenValidUsageXrInstanceInfo *instance_info);\n\n'
validation_header_info += '\n// Function to convert XrObjectType to string\n'
validation_header_info += 'std::string GenValidUsageXrObjectTypeToString(const XrObjectType& type);\n\n'
validation_header_info += '// Function to record all the core validation information\n'
validation_header_info += 'extern void CoreValidLogMessage(GenValidUsageXrInstanceInfo *instance_info, const std::string &message_id,\n'
validation_header_info += ' GenValidUsageDebugSeverity message_severity, const std::string &command_name,\n'
validation_header_info += ' std::vector<GenValidUsageXrObjectInfo> objects_info, const std::string &message);\n'
return validation_header_info
# Generate C++ utility functions to verify that all the required extensions have been enabled.
# self the ValidationSourceOutputGenerator object
def writeVerifyExtensions(self):
verify_extensions = 'bool ExtensionEnabled(std::vector<std::string> &extensions, const char* const check_extension_name) {\n'
verify_extensions += self.writeIndent(1)
verify_extensions += 'for (auto enabled_extension: extensions) {\n'
verify_extensions += self.writeIndent(2)
verify_extensions += 'if (enabled_extension == check_extension_name) {\n'
verify_extensions += self.writeIndent(3)
verify_extensions += 'return true;\n'
verify_extensions += self.writeIndent(2)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(1)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(1)
verify_extensions += 'return false;\n'
verify_extensions += '}\n\n'
number_of_instance_extensions = 0
number_of_system_extensions = 0
for extension in self.extensions:
if extension.type == 'instance':
number_of_instance_extensions += 1
elif extension.type == 'system':
number_of_system_extensions += 1
verify_extensions += 'bool ValidateInstanceExtensionDependencies(GenValidUsageXrInstanceInfo *gen_instance_info,\n'
verify_extensions += ' const std::string &command,\n'
verify_extensions += ' const std::string &struct_name,\n'
verify_extensions += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
verify_extensions += ' std::vector<std::string> &extensions) {\n'
indent = 1
if number_of_instance_extensions > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t cur_index = 0; cur_index < extensions.size(); ++cur_index) {\n'
indent += 1
for extension in self.extensions:
number_of_required = len(extension.required_exts) - 1
if extension.type == 'instance' and number_of_required > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (extensions[cur_index] == "%s") {\n' % extension.name
current_count = 0
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t check_index = 0; check_index < extensions.size(); ++check_index) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (cur_index == check_index) {\n'
verify_extensions += self.writeIndent(indent + 1)
verify_extensions += 'continue;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count = 0
for required_ext in extension.required_exts:
if current_count > 0:
found = False
for extension_look in self.extensions:
if extension_look.name == required_ext:
found = True
if extension_look.type != 'instance':
verify_extensions += self.printCodeGenErrorMessage('Instance extension "%s" requires non-instance extension "%s" which is not allowed' % (
self.currentExtension, required_ext))
if not found:
verify_extensions += self.printCodeGenErrorMessage('Instance extension "%s" lists extension "%s" as a requirement, but'
' it is not defined in the registry.' % (
self.currentExtension, required_ext))
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(extensions, "%s")) {\n' % required_ext
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (nullptr != gen_instance_info) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'std::string vuid = "VUID-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += command;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += struct_name;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-parameter";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'CoreValidLogMessage(gen_instance_info, vuid, VALID_USAGE_DEBUG_SEVERITY_ERROR,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' command, objects_info,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "Missing extension dependency \\"%s\\" (required by extension" \\\n' % required_ext
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "\\"%s\\") from enabled extension list");\n' % extension.name
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return false;\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count += 1
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// No instance extensions to check dependencies for\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return true;\n'
verify_extensions += '}\n\n'
verify_extensions += 'bool ValidateSystemExtensionDependencies(GenValidUsageXrInstanceInfo *gen_instance_info,\n'
verify_extensions += ' const std::string &command,\n'
verify_extensions += ' const std::string &struct_name,\n'
verify_extensions += ' std::vector<GenValidUsageXrObjectInfo>& objects_info,\n'
verify_extensions += ' std::vector<std::string> &extensions) {\n'
indent = 1
if number_of_system_extensions > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t cur_index = 0; cur_index < extensions.size(); ++cur_index) {\n'
indent += 1
for extension in self.extensions:
number_of_required = len(self.required_exts) - 1
if extension.type == 'system' and number_of_required > 0:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (extensions[cur_index] == "%s") {\n' % extension.name
current_count = 0
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'for (uint32_t check_index = 0; check_index < extensions.size(); ++check_index) {\n'
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (cur_index == check_index) {\n'
verify_extensions += self.writeIndent(indent + 1)
verify_extensions += 'continue;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count = 0
for required_ext in extension.required_exts:
if current_count > 0:
found = False
is_instance = False
for extension_look in self.extensions:
if extension_look.name == required_ext:
found = True
if extension_look.type == 'instance':
is_instance = True
if not is_instance and extension_look.type != 'system':
verify_extensions += self.printCodeGenErrorMessage('System extension "%s" has an extension dependency on extension "%s" '
'which is of an invalid type.' % (
self.currentExtension, required_ext))
if not found:
verify_extensions += self.printCodeGenErrorMessage('System extension "%s" lists extension "%s" as a requirement, but'
' it is not defined in the registry.' % (
self.currentExtension, required_ext))
if is_instance:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// This is an instance extension dependency, so make sure it is enabled in the instance\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(gen_instance_info->enabled_extensions, "%s") {\n' % required_ext
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += 'if (!ExtensionEnabled(extensions, "%s")) {\n' % required_ext
indent += 1
verify_extensions += self.writeIndent(indent)
verify_extensions += 'std::string vuid = "VUID-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += command;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += struct_name;\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'vuid += "-parameter";\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'CoreValidLogMessage(gen_instance_info, vuid, VALID_USAGE_DEBUG_SEVERITY_ERROR,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' command, objects_info,\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "Missing extension dependency \\"%s\\" (required by extension" \\' % required_ext
verify_extensions += self.writeIndent(indent)
verify_extensions += ' "\\"%s\\") from enabled extension list");\n' % extension.name
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return false;\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
current_count += 1
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
indent -= 1
verify_extensions += self.writeIndent(indent)
verify_extensions += '}\n'
else:
verify_extensions += self.writeIndent(indent)
verify_extensions += '// No system extensions to check dependencies for\n'
verify_extensions += self.writeIndent(indent)
verify_extensions += 'return true;\n'
verify_extensions += '}\n\n'
return verify_extensions
# Generate C++ enum and utility functions for verify that handles are valid.
# self the ValidationSourceOutputGenerator object
def writeValidateHandleChecks(self):
verify_handle = ''
for handle in self.api_handles:
if handle.protect_value:
verify_handle += '#if %s\n' % handle.protect_string
indent = 1
lower_handle_name = handle.name[2:].lower()
verify_handle += 'ValidateXrHandleResult Verify%sHandle(const %s* handle_to_check) {\n' % (
handle.name, handle.name)
verify_handle += self.writeIndent(indent)
verify_handle += 'return %s.verifyHandle(handle_to_check);\n' % self.makeInfoName(handle)
verify_handle += '}\n\n'
if handle.protect_value:
verify_handle += '#endif // %s\n' % handle.protect_string
return verify_handle
# Generate C++ utility functions for verify that handles share a parent.
# self the ValidationSourceOutputGenerator object
def writeValidateHandleParent(self):
verify_parent = '// Implementation function to get parent handle information\n'
verify_parent += 'bool GetXrParent(const XrObjectType inhandle_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE inhandle,\n'
verify_parent += ' XrObjectType& outhandle_type, XR_VALIDATION_GENERIC_HANDLE_TYPE& outhandle) {\n'
indent = 1
for handle in self.api_handles:
if handle.name == 'XrInstance':
verify_parent += self.writeIndent(indent)
verify_parent += 'if (inhandle_type == XR_OBJECT_TYPE_INSTANCE) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
else:
handle_info = '%s.get(CONVERT_GENERIC_TO_HANDLE(%s, inhandle))' % (self.makeInfoName(handle), handle.name)
verify_parent += self.writeIndent(indent)
verify_parent += 'if (inhandle_type == %s) {\n' % self.genXrObjectType(
handle.name)
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += '// Get the object and parent of the handle\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'GenValidUsageXrHandleInfo *handle_info = %s;\n' % handle_info
verify_parent += self.writeIndent(indent)
verify_parent += 'outhandle_type = handle_info->direct_parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'outhandle = handle_info->direct_parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return true;\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += ' return false;\n'
verify_parent += '}\n\n'
verify_parent += '// Implementation of VerifyXrParent function\n'
verify_parent += 'bool VerifyXrParent(XrObjectType handle1_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle1,\n'
verify_parent += ' XrObjectType handle2_type, const XR_VALIDATION_GENERIC_HANDLE_TYPE handle2,\n'
verify_parent += ' bool check_this) {\n'
indent = 1
verify_parent += self.writeIndent(indent)
verify_parent += 'if (CHECK_FOR_NULL_HANDLE(handle1) || CHECK_FOR_NULL_HANDLE(handle2)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (check_this && handle1_type == handle2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (handle1 == handle2);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (handle1_type == XR_OBJECT_TYPE_INSTANCE && handle2_type != XR_OBJECT_TYPE_INSTANCE) {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle2_type, handle2, parent_type, parent_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return VerifyXrParent(handle1_type, handle1, parent_type, parent_handle, true);\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (handle2_type == XR_OBJECT_TYPE_INSTANCE && handle1_type != XR_OBJECT_TYPE_INSTANCE) {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle1_type, handle1, parent_type, parent_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return VerifyXrParent(parent_type, parent_handle, handle2_type, handle2, true);\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '} else {\n'
indent += 1
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent1_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent1_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += self.writeIndent(indent)
verify_parent += 'XrObjectType parent2_type;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'XR_VALIDATION_GENERIC_HANDLE_TYPE parent2_handle;\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle1_type, handle1, parent1_type, parent1_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (!GetXrParent(handle2_type, handle2, parent2_type, parent2_handle)) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return false;\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'if (parent1_type == handle2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (parent1_handle == handle2);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else if (handle1_type == parent2_type) {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return (handle1 == parent2_handle);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '} else {\n'
verify_parent += self.writeIndent(indent + 1)
verify_parent += 'return VerifyXrParent(parent1_type, parent1_handle, parent2_type, parent2_handle, true);\n'
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
indent -= 1
verify_parent += self.writeIndent(indent)
verify_parent += '}\n'
verify_parent += self.writeIndent(indent)
verify_parent += 'return false;\n'
indent -= 1
verify_parent += '}\n\n'
return verify_parent
# Generate inline C++ code to check if a 'next' chain is valid for the current structure.
# self the ValidationSourceOutputGenerator object
# struct_type the name of the type of structure performing the validation check
# member the member generated in automatic_source_generator.py to validate
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateStructNextCheck(self, struct_type, struct_name, member, indent):
validate_struct_next = self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> valid_ext_structs;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> duplicate_ext_structs;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'std::vector<XrStructureType> encountered_structs;\n'
if member.valid_extension_structs and len(member.valid_extension_structs) > 0:
for valid_struct in member.valid_extension_structs:
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'valid_ext_structs.push_back(%s);\n' % self.genXrStructureType(
valid_struct)
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'NextChainResult next_result = ValidateNextChain(instance_info, command_name, objects_info,\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' %s->%s, valid_ext_structs,\n' % (
struct_name, member.name)
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' encountered_structs,\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += ' duplicate_ext_structs);\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '// No valid extension structs for this \'next\'. Therefore, must be NULL\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '// or only contain a list of valid extension structures.\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += 'if (NEXT_CHAIN_RESULT_ERROR == next_result) {\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'CoreValidLogMessage(instance_info, "VUID-%s-%s-next",\n' % (struct_type,
member.name)
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' objects_info, "Invalid structure(s) in \\"next\\" chain for %s struct \\"%s\\"");\n' % (struct_type,
member.name)
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '} else if (NEXT_CHAIN_RESULT_DUPLICATE_STRUCT == next_result) {\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'char struct_type_buffer[XR_MAX_STRUCTURE_NAME_SIZE];\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'std::string error_message = "Multiple structures of the same type(s) in \\"next\\" chain for ";\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'error_message += "%s : ";\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'if (nullptr != instance_info) {\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += 'bool wrote_struct = false;\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += 'for (uint32_t dup = 0; dup < duplicate_ext_structs.size(); ++dup) {\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += 'if (XR_SUCCESS == instance_info->dispatch_table->StructureTypeToString(instance_info->instance,\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += ' duplicate_ext_structs[dup],\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += ' struct_type_buffer)) {\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += 'if (wrote_struct) {\n'
validate_struct_next += self.writeIndent(indent + 5)
validate_struct_next += 'error_message += ", ";\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += '} else {\n'
validate_struct_next += self.writeIndent(indent + 5)
validate_struct_next += 'wrote_struct = true;\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 4)
validate_struct_next += 'error_message += struct_type_buffer;\n'
validate_struct_next += self.writeIndent(indent + 3)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 2)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += '}\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'CoreValidLogMessage(instance_info, "VUID-%s-next-unique",\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += ' objects_info,\n'
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += '"Multiple structures of the same type(s) in \\"next\\" chain for %s struct");\n' % struct_type
validate_struct_next += self.writeIndent(indent + 1)
validate_struct_next += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
validate_struct_next += self.writeIndent(indent)
validate_struct_next += '}\n'
return validate_struct_next
# Generate inline C++ code to check if a pointer to a variable or array is valid.
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# member_param_name the name of the member or parameter getting validated
# member_param_type the type of the member or parameter getting validated
# pointer_to_check the full name of the pointer to check (usually cmd_struct_name +
# member_param_name in some fashion)
# full_count_var the full name of the array count variable (if this is an array), or None
# short_count_var the short name of the array count variable (if this is an array), or None
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidatePointerArrayNonNull(self, cmd_struct_name, member_param_name, member_param_type,
pointer_to_check, full_count_var, short_count_var, is_in_cmd,
indent):
array_check = self.writeIndent(indent)
instance_info_string = 'instance_info'
command_string = 'command_name'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
command_string = '"%s"' % cmd_struct_name
error_prefix = 'Invalid NULL for'
else:
error_prefix = '%s contains invalid NULL for' % cmd_struct_name
if full_count_var is None or len(full_count_var) == 0:
array_check += '// Non-optional pointer/array variable that needs to not be NULL\n'
array_check += self.writeIndent(indent)
array_check += 'if (nullptr == %s) {\n' % pointer_to_check
indent = indent + 1
array_check += self.writeIndent(indent)
array_check += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s, objects_info,\n' % command_string
array_check += self.writeIndent(indent)
array_check += ' "%s %s \\"%s\\" which is not "\n' % (error_prefix,
member_param_type,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' "optional and must be non-NULL");\n'
else:
array_check += '// Pointer/array variable with a length variable. Make sure that\n'
array_check += self.writeIndent(indent)
array_check += '// if length variable is non-zero that the pointer is not NULL\n'
array_check += self.writeIndent(indent)
array_check += 'if (nullptr == %s && 0 != %s) {\n' % (
pointer_to_check, full_count_var)
indent = indent + 1
array_check += self.writeIndent(indent)
array_check += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s, objects_info,\n' % command_string
array_check += self.writeIndent(indent)
array_check += ' "%s %s \\"%s\\" is which not "\n' % (error_prefix,
member_param_type,
member_param_name)
array_check += self.writeIndent(indent)
array_check += ' "optional since \\"%s\\" is set and must be non-NULL");\n' % short_count_var
array_check += self.writeIndent(indent)
array_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
array_check += self.writeIndent(indent)
array_check += '}\n'
return array_check
# Write an inline check to make sure an Enum is valid
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# cmd_name_param the name of the parameter containing the command name
# param_type the type of enum to validate getting validated
# param_name the name of the parameter to validate
# full_param_name the full name of the parameter to check (usually cmd_struct_name +
# member_param_name in some fashion)
# param_is_pointer Boolean indicate that the parameter is a pointer
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineEnum(self, cmd_struct_name, cmd_name_param, param_type, param_name, full_param_name,
param_is_pointer, is_in_cmd, indent):
int_indent = indent
inline_enum_str = self.writeIndent(int_indent)
inline_enum_str += '// Make sure the enum type %s value is valid\n' % param_type
inline_enum_str += self.writeIndent(int_indent)
pointer_string = ''
if param_is_pointer:
pointer_string = '*'
instance_info_string = 'instance_info'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
error_prefix = 'Invalid'
else:
error_prefix = '%s contains invalid' % cmd_struct_name
inline_enum_str += 'if (!ValidateXrEnum(%s, %s, "%s", "%s", objects_info, %s%s)) {\n' % (
instance_info_string, cmd_name_param, cmd_struct_name, param_name, pointer_string, full_param_name)
int_indent = int_indent + 1
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'std::ostringstream oss_enum;\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'oss_enum << std::hex << static_cast<int32_t>(%s%s);\n' % (pointer_string,
full_param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'std::string error_str = "%s %s \\"%s\\" enum value 0x";\n' % (error_prefix,
param_type,
param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'error_str += oss_enum.str();\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += ' objects_info, error_str);\n'
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_enum_str += self.writeIndent(int_indent)
inline_enum_str += '}\n'
return inline_enum_str
# Write an inline check to make sure a flag is valid
# self the ValidationSourceOutputGenerator object
# cmd_struct_name the name of the structure or command generating this validation check.
# cmd_name_param the name of the parameter containing the command name
# param_type the type of flag to validate getting validated
# param_name the name of the parameter to validate
# full_param_name the full name of the parameter to check (usually cmd_struct_name +
# member_param_name in some fashion)
# param_is_pointer Boolean indicating that the parameter is a pointer
# is_optional Boolean indicating that the parameter is optional
# is_in_cmd Boolean indicating that this is being called directly from inside a command
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineFlag(self, cmd_struct_name, cmd_name_param, param_type, param_name, full_param_name,
param_is_pointer, is_optional, is_in_cmd, indent):
int_indent = indent
inline_flag_str = self.writeIndent(int_indent)
# Add underscore between lowercase then uppercase
result_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', param_type)
# Change to uppercase
result_name = result_name[3:].lower()
result_name += '_result'
pointer_string = ''
if param_is_pointer:
pointer_string = '*'
instance_info_string = 'instance_info'
error_prefix = ''
if is_in_cmd:
if cmd_struct_name == 'xrCreateInstance':
instance_info_string = 'nullptr'
else:
instance_info_string = 'gen_instance_info'
error_prefix = 'Invalid'
else:
error_prefix = '%s invalid member' % cmd_struct_name
inline_flag_str += 'ValidateXrFlagsResult %s = ValidateXr%s(%s%s);\n' % (result_name,
param_type[2:],
pointer_string,
full_param_name)
if self.flagHasValidValues(param_type):
if not is_optional:
# Must be non-zero
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Flags must be non-zero in this case.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_ZERO == %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-requiredbitmask",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, "%s \\"%s\\" flag must be non-zero");\n' % (param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '} else if (VALIDATE_XR_FLAGS_SUCCESS != %s) {\n' % result_name
else:
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Valid flags available, so it must be invalid to fail.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_INVALID == %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Otherwise, flags must be valid.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'std::ostringstream oss_enum;\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'oss_enum << std::hex << static_cast<int32_t>(%s%s);\n' % (pointer_string,
full_param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'std::string error_str = "%s %s \\"%s\\" flag value 0x";\n' % (error_prefix,
param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'error_str += oss_enum.str();\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'error_str += " contains illegal bit";\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, error_str);\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '}\n'
else:
# Must be zero
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '// Flags must be zero in this case.\n'
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'if (VALIDATE_XR_FLAGS_ZERO != %s) {\n' % result_name
int_indent = int_indent + 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'CoreValidLogMessage(%s, "VUID-%s-%s-zerobitmask",\n' % (instance_info_string,
cmd_struct_name,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += ' objects_info, "%s \\"%s\\" flag must be zero");\n' % (param_type,
param_name)
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += 'return XR_ERROR_VALIDATION_FAILURE;\n'
int_indent = int_indent - 1
inline_flag_str += self.writeIndent(int_indent)
inline_flag_str += '}\n'
return inline_flag_str
# Write an inline check to make sure a handle is valid
# self the ValidationSourceOutputGenerator object
# cmd_name the name of the command generating this validation check.
# vuid_name the name of the structure or command to put in the VUID
# member_param the member or parameter generated in automatic_source_generator.py to validate
# mem_par_desc_name Descriptive name of parameter
# output_result_type Boolean indicating we need to output the handle result type (since it hasn't
# been defined in the C++ code yet).
# return_on_null Boolean indicating we need to return immediately if we encounter a NULL
# instance_info_name Name of the parameter storing the instance information
# element_in_array This is a single element in an array
# indent the number of "tabs" to space in for the resulting C+ code.
def writeValidateInlineHandleValidation(self, cmd_name, vuid_name, member_param, mem_par_desc_name,
output_result_type, return_on_null, instance_info_name,
element_in_array, indent):
inline_validate_handle = ''
adjust_to_pointer = ''
if (not element_in_array and member_param.pointer_count == 0) or (element_in_array and member_param.pointer_count == 1):
adjust_to_pointer = '&'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '{\n'
indent += 1
inline_validate_handle += self.writeIndent(indent) + "// writeValidateInlineHandleValidation\n"
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'ValidateXrHandleResult handle_result = Verify%sHandle(%s%s);\n' % (member_param.type, adjust_to_pointer,
mem_par_desc_name)
wrote_first_if = False
if member_param.is_optional:
# If we have to return on a Handle that has a value of XR_NULL_HANDLE, do so.
if return_on_null:
wrote_first_if = True
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'if (handle_result == VALIDATE_XR_HANDLE_NULL) {\n'
inline_validate_handle += self.writeIndent(indent + 1)
inline_validate_handle += '// Handle is optional so NULL is valid. But we can\'t do anything else, either.\n'
inline_validate_handle += self.writeIndent(indent + 1)
inline_validate_handle += 'return XR_SUCCESS;\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}'
# Otherwise, catch the non-success case. If we catch the NULL handle above, we add an "else" to
# the if below.
if not member_param.no_auto_validity:
if wrote_first_if:
inline_validate_handle += ' else '
else:
inline_validate_handle += self.writeIndent(indent)
indent = indent + 1
if member_param.is_optional:
inline_validate_handle += 'if (handle_result == VALIDATE_XR_HANDLE_INVALID) {\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '// Not a valid handle\n'
else:
inline_validate_handle += 'if (handle_result != VALIDATE_XR_HANDLE_SUCCESS) {\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '// Not a valid handle or NULL (which is not valid in this case)\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'std::ostringstream oss;\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'oss << "Invalid %s handle \\"%s\\" 0x";\n' % (member_param.type,
member_param.name)
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'oss << std::hex << reinterpret_cast<const void*>(%s);\n' % mem_par_desc_name
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_name,
vuid_name,
member_param.name)
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += ' objects_info, oss.str());\n'
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += 'return XR_ERROR_HANDLE_INVALID;\n'
indent = indent - 1
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}\n'
else:
inline_validate_handle += '\n'
indent -= 1
inline_validate_handle += self.writeIndent(indent)
inline_validate_handle += '}\n'
return inline_validate_handle
def outputParamMemberContents(self, is_command, struct_command_name, param_member, param_member_prefix, instance_info_variable,
command_name_variable, is_first_param, primary_handle, primary_handle_desc_name, primary_handle_tuple,
wrote_handle_proto, indent):
param_member_contents = ''
is_loop = False
is_pointer = False
is_array = param_member.is_array
check_pointer_array_null = False
loop_string = ''
wrote_loop = False
prefixed_param_member_name = param_member_prefix
prefixed_param_member_name += param_member.name
pre_loop_prefixed_param_member_name = prefixed_param_member_name
loop_param_name = 'value_'
loop_param_name += param_member.name.lower()
loop_param_name += '_inc'
if len(param_member.array_count_var) != 0:
is_array = True
if param_member.pointer_count > 0:
is_pointer = True
elif len(param_member.pointer_count_var) != 0:
is_array = True
if param_member.pointer_count > 1:
is_pointer = True
elif param_member.pointer_count > 0:
is_pointer = True
if is_array or is_pointer:
check_pointer_array_null = not param_member.is_optional and not param_member.is_static_array
short_count_var = None
full_count_var = None
if is_array:
long_count_name = param_member_prefix
if param_member.is_static_array:
short_count_var = param_member.static_array_sizes[0]
long_count_name = param_member.static_array_sizes[0]
elif len(param_member.array_count_var) != 0:
short_count_var = param_member.array_count_var
if self.isAllUpperCase(param_member.array_count_var):
long_count_name = param_member.array_count_var
else:
long_count_name += param_member.array_count_var
else:
short_count_var = param_member.pointer_count_var
if self.isAllUpperCase(param_member.pointer_count_var):
long_count_name = param_member.pointer_count_var
else:
long_count_name += param_member.pointer_count_var
if check_pointer_array_null:
full_count_var = long_count_name
param_member_contents += self.writeValidatePointerArrayNonNull(struct_command_name,
param_member.name,
param_member.type,
prefixed_param_member_name,
full_count_var,
short_count_var,
is_command,
indent)
if (param_member.is_handle or self.isEnumType(param_member.type) or
(self.isStruct(param_member.type) and not self.isStructAlwaysValid(param_member.type))):
loop_string += self.writeIndent(indent)
loop_string += 'for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (loop_param_name,
loop_param_name,
long_count_name,
loop_param_name)
indent = indent + 1
prefixed_param_member_name = '%s[%s]' % (
prefixed_param_member_name, loop_param_name)
is_loop = True
elif check_pointer_array_null:
param_member_contents += self.writeValidatePointerArrayNonNull(struct_command_name,
param_member.name,
param_member.type,
prefixed_param_member_name,
None,
None,
is_command,
indent)
if not param_member.is_static_array and len(param_member.array_length_for) > 0:
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Optional array must be non-NULL when %s is non-zero\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (0 != %s && nullptr == %s%s) {\n' % (
prefixed_param_member_name, param_member_prefix, param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (instance_info_variable,
struct_command_name,
param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.array_length_for)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is NULL, but %s is greater than 0");\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
else:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Non-optional array length must be non-zero\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (0 >= %s && nullptr != %s%s) {\n' % (
prefixed_param_member_name, param_member_prefix, param_member.array_length_for)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-arraylength",\n' % (instance_info_variable,
struct_command_name,
param_member.name)
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is non-optional and must be greater than 0");\n'
param_member_contents += self.writeIndent(indent + 1)
param_member_contents += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
first_time_handle_check = not wrote_handle_proto
if param_member.is_handle:
if param_member.pointer_count == 0:
param_member_contents += self.writeValidateInlineHandleValidation(command_name_variable,
struct_command_name,
param_member,
prefixed_param_member_name,
first_time_handle_check,
is_command,
instance_info_variable,
False,
indent)
# If the first item is a handle, and this is a different handle, we need to verify that
# one is either the parent of the other, or that they share a common ancestor.
if primary_handle_tuple is not None and not first_time_handle_check:
current_handle_tuple = self.getHandle(param_member.type)
param_member_contents += self.writeInlineParentCheckCall(instance_info_variable,
primary_handle_tuple,
primary_handle,
primary_handle_desc_name,
current_handle_tuple,
param_member,
prefixed_param_member_name,
struct_command_name,
command_name_variable,
indent)
elif not is_command:
primary_handle_tuple = self.getHandle(param_member.type)
primary_handle = param_member
primary_handle_desc_name = prefixed_param_member_name
elif is_array:
param_member_contents += loop_string
wrote_loop = True
param_member_contents += self.writeValidateInlineHandleValidation(command_name_variable,
struct_command_name,
param_member,
prefixed_param_member_name,
first_time_handle_check,
is_command,
instance_info_variable,
True,
indent)
elif self.isStruct(param_member.type) and not self.isStructAlwaysValid(param_member.type):
param_member_contents += loop_string
wrote_loop = True
is_relation_group = False
relation_group = None
# Check to see if this struct is the base of a relation group
for cur_rel_group in self.struct_relation_groups:
if cur_rel_group.generic_struct_name == param_member.type:
relation_group = cur_rel_group
is_relation_group = True
break
# If this struct is the base of a relation group, check to see if this call really should go to any one of
# it's children instead of itself.
if is_relation_group:
for child in relation_group.child_struct_names:
child_struct = self.getStruct(child)
if child_struct.protect_value:
param_member_contents += '#if %s\n' % child_struct.protect_string
param_member_contents += self.writeIndent(indent)
param_member_contents += '// Validate if %s is a child structure of type %s and it is valid\n' % (
param_member.type, child)
param_member_contents += self.writeIndent(indent)
base_child_struct_name = child[2:].lower()
if is_pointer or is_array:
new_type_info = param_member.cdecl.replace(
param_member.type, child)
new_type_info = new_type_info.replace(
param_member.name, "")
new_type_info = new_type_info.strip().rstrip()
param_member_contents += '%s new_%s_value = reinterpret_cast<%s>(%s);\n' % (
new_type_info, base_child_struct_name, new_type_info, pre_loop_prefixed_param_member_name)
param_member_contents += self.writeIndent(indent)
deref_string = '->' if is_pointer else '.'
if is_array:
param_member_contents += 'if (new_%s_value[%s]%stype == %s) {\n' % (
base_child_struct_name, loop_param_name, deref_string, self.genXrStructureType(child))
else:
param_member_contents += 'if (new_%s_value%stype == %s) {\n' % (
base_child_struct_name, deref_string, self.genXrStructureType(child))
else:
param_member_contents += 'const %s* new_%s_value = reinterpret_cast<const %s*>(&%s);\n' % (
child, base_child_struct_name, child, pre_loop_prefixed_param_member_name)
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (new_%s_value->type == %s) {\n' % (
base_child_struct_name, self.genXrStructureType(child))
indent = indent + 1
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (nullptr != new_%s_value) {\n' % base_child_struct_name
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += ' false,'
else:
param_member_contents += ' check_members,'
if is_array:
if is_pointer:
param_member_contents += ' new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' &new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' new_%s_value);\n' % base_child_struct_name
else:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += 'false,'
else:
param_member_contents += ' check_members,'
if is_array:
param_member_contents += ' new_%s_value[%s]);\n' % (
base_child_struct_name, loop_param_name)
else:
param_member_contents += ' new_%s_value);\n' % base_child_struct_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (XR_SUCCESS != xr_result) {\n'
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'std::string error_message = "'
if is_command:
param_member_contents += 'Command %s param %s";\n' % (
struct_command_name, param_member.name)
else:
param_member_contents += 'Structure %s member %s";\n' % (
struct_command_name, param_member.name)
if is_array:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += "[";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += std::to_string(%s);\n' % loop_param_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += "]";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'error_message += " is invalid";\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' error_message);\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return XR_ERROR_VALIDATION_FAILURE;\n'
if is_array:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'break;\n'
param_member_contents += self.writeIndent(indent - 1)
param_member_contents += '} else {\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'continue;\n'
if param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
if child_struct.protect_value:
param_member_contents += '#endif // %s\n' % child_struct.protect_string
param_member_contents += self.writeIndent(indent)
if is_relation_group:
param_member_contents += '// Validate that the base-structure %s is valid\n' % (
param_member.type)
else:
param_member_contents += '// Validate that the structure %s is valid\n' % (
param_member.type)
param_member_contents += self.writeIndent(indent)
if is_pointer:
if param_member.is_optional:
param_member_contents += 'if (nullptr != %s) {\n' % prefixed_param_member_name
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,'
if is_command:
param_member_contents += ' false,'
else:
param_member_contents += ' check_members,'
param_member_contents += ' %s);\n' % prefixed_param_member_name
else:
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s, objects_info,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
if param_member.is_const:
param_member_contents += 'true,'
else:
param_member_contents += 'false,'
else:
param_member_contents += 'check_members,'
param_member_contents += ' %s);\n' % prefixed_param_member_name
else:
param_member_contents += 'xr_result = ValidateXrStruct(%s, %s, objects_info,\n' % (
instance_info_variable, command_name_variable)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += 'true,'
else:
param_member_contents += 'check_members,'
param_member_contents += ' &%s);\n' % prefixed_param_member_name
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (XR_SUCCESS != xr_result) {\n'
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' is invalid");\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return xr_result;\n'
indent = indent - 1
if is_pointer and param_member.is_optional:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
elif self.isEnumType(param_member.type):
if is_array:
param_member_contents += loop_string
wrote_loop = True
param_member_contents += self.writeValidateInlineEnum(struct_command_name,
command_name_variable,
param_member.type,
param_member.name,
prefixed_param_member_name,
is_pointer,
is_command,
indent)
elif self.isFlagType(param_member.type):
param_member_contents += self.writeValidateInlineFlag(struct_command_name,
command_name_variable,
param_member.type,
param_member.name,
prefixed_param_member_name,
is_pointer,
param_member.is_optional,
is_command,
indent)
elif "void" not in param_member.type:
if param_member.is_null_terminated:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" null-termination\n' % (struct_command_name,
param_member.name)
elif param_member.pointer_count > 0:
param_member_contents += self.writeIndent(indent)
param_member_contents += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" type\n' % (struct_command_name,
param_member.name)
elif param_member.is_static_array and "char" in param_member.type:
param_member_contents += self.writeIndent(indent)
param_member_contents += 'if (%s < std::strlen(%s)) {\n' % (
param_member.static_array_sizes[0], prefixed_param_member_name)
indent = indent + 1
param_member_contents += self.writeIndent(indent)
param_member_contents += 'CoreValidLogMessage(%s, "VUID-%s-%s-parameter",\n' % (
instance_info_variable, struct_command_name, param_member.name)
param_member_contents += self.writeIndent(indent)
param_member_contents += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % command_name_variable
param_member_contents += self.writeIndent(indent)
param_member_contents += ' objects_info,\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += ' '
if is_command:
param_member_contents += '"Command %s param %s' % (
struct_command_name, param_member.name)
else:
param_member_contents += '"Structure %s member %s' % (
struct_command_name, param_member.name)
param_member_contents += ' length is too long.");\n'
param_member_contents += self.writeIndent(indent)
param_member_contents += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
if is_loop:
indent = indent - 1
if wrote_loop:
param_member_contents += self.writeIndent(indent)
param_member_contents += '}\n'
return param_member_contents
# Write the validation function for every struct we know about.
# self the ValidationSourceOutputGenerator object
def writeValidateStructFuncs(self):
struct_check = ''
# Now write out the actual functions
for xr_struct in self.api_structures:
if xr_struct.name in self.structs_with_no_type:
continue
indent = 1
is_relation_group = False
relation_group = None
if xr_struct.protect_value:
struct_check += '#if %s\n' % xr_struct.protect_string
struct_check += 'XrResult ValidateXrStruct(GenValidUsageXrInstanceInfo *instance_info, const std::string &command_name,\n'
struct_check += ' std::vector<GenValidUsageXrObjectInfo>& objects_info, bool check_members,\n'
struct_check += ' const %s* value) {\n' % xr_struct.name
setup_bail = False
struct_check += ' XrResult xr_result = XR_SUCCESS;\n'
# Check to see if this struct is the base of a relation group
for cur_rel_group in self.struct_relation_groups:
if cur_rel_group.generic_struct_name == xr_struct.name:
relation_group = cur_rel_group
is_relation_group = True
break
# If this struct is the base of a relation group, check to see if this call really should go to any one of
# it's children instead of itself.
if is_relation_group:
for member in xr_struct.members:
if member.name == 'next':
struct_check += self.writeIndent(indent)
struct_check += '// NOTE: Can\'t validate "VUID-%s-next-next" because it is a base structure\n' % xr_struct.name
else:
struct_check += self.writeIndent(indent)
struct_check += '// NOTE: Can\'t validate "VUID-%s-%s-parameter" because it is a base structure\n' % (
xr_struct.name, member.name)
for child in relation_group.child_struct_names:
child_struct = self.getStruct(child)
if child_struct.protect_value:
struct_check += '#if %s\n' % child_struct.protect_string
struct_check += self.writeIndent(indent)
struct_check += 'if (value->type == %s) {\n' % self.genXrStructureType(
child)
indent += 1
struct_check += self.writeIndent(indent)
struct_check += 'const %s* new_value = reinterpret_cast<const %s*>(value);\n' % (
child, child)
if child_struct.ext_name and not self.isCoreExtensionName(child_struct.ext_name):
struct_check += self.writeIndent(indent)
struct_check += 'if (nullptr != instance_info && !ExtensionEnabled(instance_info->enabled_extensions, "%s")) {\n' % child_struct.ext_name
indent += 1
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s being used with child struct type ";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += "\\"%s\\"";\n' % self.genXrStructureType(
child)
struct_check += self.writeIndent(indent)
struct_check += 'error_str += " which requires extension \\"%s\\" to be enabled, but it is not enabled";\n' % child_struct.ext_name
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-type-type",\n' % (
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent -= 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
struct_check += self.writeIndent(indent)
struct_check += 'return ValidateXrStruct(instance_info, command_name, objects_info, check_members, new_value);\n'
indent -= 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
if child_struct.protect_value:
struct_check += '#endif // %s\n' % child_struct.protect_string
struct_check += self.writeIndent(indent)
struct_check += 'std::ostringstream oss_type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'oss_type << std::hex << value->type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s has an invalid XrStructureType 0x";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += oss_type.str();\n'
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-type-type",\n' % (
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += '}\n\n'
continue
first_member_handle_tuple = None
first_member_handle = None
count = 0
wrote_handle_check_proto = False
has_enable_extension_count = False
has_enable_extension_names = False
for member in xr_struct.members:
# If we're not supposed to check this, then skip it
if member.no_auto_validity:
continue
if member.name == 'type':
struct_check += self.writeIndent(indent)
struct_check += '// Make sure the structure type is correct\n'
struct_check += self.writeIndent(indent)
struct_check += 'if (value->type != %s) {\n' % self.genXrStructureType(
xr_struct.name)
indent = indent + 1
struct_check += self.writeIndent(indent)
struct_check += 'std::ostringstream oss_type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'oss_type << std::hex << value->type;\n'
struct_check += self.writeIndent(indent)
struct_check += 'std::string error_str = "%s has an invalid XrStructureType 0x";\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += 'error_str += oss_type.str();\n'
struct_check += self.writeIndent(indent)
struct_check += 'error_str += ", expected %s";\n' % self.genXrStructureType(
xr_struct.name)
struct_check += self.writeIndent(indent)
struct_check += 'CoreValidLogMessage(instance_info, "VUID-%s-%s-type",\n' % (
xr_struct.name, member.name)
struct_check += self.writeIndent(indent)
struct_check += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, command_name,\n'
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, error_str);\n'
struct_check += self.writeIndent(indent)
struct_check += 'xr_result = XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
struct_check += self.writeIndent(indent)
struct_check += '}\n'
continue
elif member.name == 'next':
struct_check += self.writeValidateStructNextCheck(
xr_struct.name, 'value', member, indent)
elif member.name == 'enabledExtensionCount':
has_enable_extension_count = True
elif member.name == 'enabledExtensionNames':
has_enable_extension_names = True
elif not setup_bail:
struct_check += self.writeIndent(indent)
struct_check += '// If we are not to check the rest of the members, just return here.\n'
struct_check += self.writeIndent(indent)
struct_check += 'if (!check_members || XR_SUCCESS != xr_result) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return xr_result;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
setup_bail = True
struct_check += self.outputParamMemberContents(False, xr_struct.name, member, 'value->',
"instance_info", "command_name",
count == 0,
first_member_handle,
first_member_handle,
first_member_handle_tuple,
wrote_handle_check_proto,
indent)
if member.is_handle:
wrote_handle_check_proto = True
count = count + 1
# We only have extensions to check if both the count and enable fields are there
if has_enable_extension_count and has_enable_extension_names:
# This is create instance, so check all instance extensions
struct_check += self.writeIndent(indent)
struct_check += 'std::vector<std::string> enabled_extension_vec;\n'
struct_check += self.writeIndent(indent)
struct_check += 'for (uint32_t extension = 0; extension < value->enabledExtensionCount; ++extension) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'enabled_extension_vec.push_back(value->enabledExtensionNames[extension]);\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
if xr_struct.name == 'XrInstanceCreateInfo':
struct_check += self.writeIndent(indent)
struct_check += 'if (!ValidateInstanceExtensionDependencies(nullptr, command_name, "%s",\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, enabled_extension_vec)) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
else:
struct_check += self.writeIndent(indent)
struct_check += 'if (!ValidateSystemExtensionDependencies(instance_info, command_name, "%s",\n' % xr_struct.name
struct_check += self.writeIndent(indent)
struct_check += ' objects_info, enabled_extension_vec)) {\n'
struct_check += self.writeIndent(indent + 1)
struct_check += 'return XR_ERROR_VALIDATION_FAILURE;\n'
struct_check += self.writeIndent(indent)
struct_check += '}\n'
struct_check += self.writeIndent(indent)
struct_check += '// Everything checked out properly\n'
struct_check += self.writeIndent(indent)
struct_check += 'return xr_result;\n'
struct_check += '}\n\n'
if xr_struct.protect_value:
struct_check += '#endif // %s\n' % xr_struct.protect_string
struct_check += '\n'
return struct_check
# Write an inline validation check for handle parents
# self the ValidationSourceOutputGenerator object
# instance_info_string string used to identify the variable associated with the instance information struct.
# first_handle_tuple the handle tuple associated with the type of the first handle
# first_handle_mem_param the member/param of the first handle
# first_handle_desc_name the descriptive name of the first handle
# cur_handle_tuple the handle tuple associated with the type of the current handle
# cur_handle_mem_param the member/param of the current handle
# cur_handle_desc_name the descriptive name of the current handle
# vuid_name the VUID identifier to associate this check and member/param name with
# cmd_name_param the parameter containing the associated command name
# indent the number of tab-stops to indent the current inline strings
def writeInlineParentCheckCall(self, instance_info_string, first_handle_tuple, first_handle_mem_param, first_handle_desc_name,
cur_handle_tuple, cur_handle_mem_param, cur_handle_desc_name, vuid_name,
cmd_name_param, indent):
parent_check_string = ''
parent_id = 'commonparent'
if (first_handle_tuple.name == cur_handle_tuple.parent or
cur_handle_tuple.name == first_handle_tuple.parent):
parent_id = '%s-parent' % cur_handle_mem_param.name
parent_check_string += self.writeIndent(indent)
pointer_deref = ''
if cur_handle_mem_param.pointer_count > 0:
pointer_deref = '*'
compare_flag = 'true'
if first_handle_mem_param.type == cur_handle_mem_param.type:
compare_flag = 'false'
if cur_handle_mem_param.is_optional:
parent_check_string += '// If the second handle is optional, only check for a common parent if\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += '// it is not XR_NULL_HANDLE\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!CHECK_FOR_NULL_HANDLE(%s) && !VerifyXrParent(%s, CONVERT_HANDLE_TO_GENERIC(%s),\n' % (
cur_handle_desc_name,
self.genXrObjectType(first_handle_mem_param.type),
first_handle_desc_name)
parent_check_string += ' %s, CONVERT_HANDLE_TO_GENERIC(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type),
pointer_deref,
cur_handle_desc_name,
compare_flag)
else:
parent_check_string += '// Verify that the handles share a common ancestry\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!VerifyXrParent(%s, CONVERT_HANDLE_TO_GENERIC(%s),\n' % (
self.genXrObjectType(first_handle_mem_param.type), first_handle_desc_name)
parent_check_string += ' %s, CONVERT_HANDLE_TO_GENERIC(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type), pointer_deref, cur_handle_desc_name, compare_flag)
indent = indent + 1
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::ostringstream oss_handle_1;\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_handle_1 << std::hex << reinterpret_cast<const void*>(%s);\n' % first_handle_desc_name
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::ostringstream oss_handle_2;\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_handle_2 << std::hex << reinterpret_cast<const void*>(%s%s);\n' % (
pointer_deref, cur_handle_desc_name)
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::string error_str = "%s ";\n' % first_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_1.str();\n'
if first_handle_tuple.name == cur_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must be a parent to %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
elif cur_handle_tuple.name == first_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must be a child of %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
else:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " and %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += oss_handle_2.str();\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'error_str += " must share a parent";\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'CoreValidLogMessage(%s, "VUID-%s-%s",\n' % (instance_info_string,
vuid_name,
parent_id)
parent_check_string += self.writeIndent(indent)
parent_check_string += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
parent_check_string += self.writeIndent(indent)
parent_check_string += ' objects_info, error_str);\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
parent_check_string += self.writeIndent(indent)
parent_check_string += '}\n'
return parent_check_string
# Generate C++ code to validate the inputs of the current command.
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
def genValidateInputsFunc(self, cur_command):
pre_validate_func = ''
pre_validate_func += 'XrResult %s(' % cur_command.name.replace("xr",
"GenValidUsageInputsXr")
pre_validate_func += '\n'
pre_validate_func += ',\n'.join((param.cdecl.strip() for param in cur_command.params))
pre_validate_func += ') {\n'
wrote_handle_check_proto = False
is_first_param_handle = cur_command.params[0].is_handle
first_param_handle_tuple = self.getHandle(cur_command.params[0].type)
command_name_string = '"%s"' % cur_command.name
# If the first parameter is a handle and we either have to validate that handle, or check
# for extension information, then we will need the instance information.
indent = 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'try {\n'
indent = indent + 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'XrResult xr_result = XR_SUCCESS;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'std::vector<GenValidUsageXrObjectInfo> objects_info;\n'
if first_param_handle_tuple != None:
handle_param = cur_command.params[0]
first_handle_name = self.getFirstHandleName(handle_param)
obj_type = self.genXrObjectType(handle_param.type)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n\n'% (first_handle_name, obj_type)
lower_handle_name = first_param_handle_tuple.name[2:].lower()
if first_param_handle_tuple.name == 'XrInstance':
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = g_instance_info.get(%s);\n' % first_handle_name
else:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'auto info_with_instance = %s.getWithInstanceInfo(%s);\n' % (
self.makeInfoName(handle_type_name=handle_param.type), first_handle_name)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrHandleInfo *gen_%s_info = info_with_instance.first;\n' % lower_handle_name
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = info_with_instance.second;\n'
# If any of the associated handles has validation state tracking, get the
# appropriate struct setup for validation later in the function
valid_type_list = []
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
command_param_of_type = ''
for param in cur_command.params:
if param.type == cur_state.type:
command_param_of_type = param.name
break
if (len(command_param_of_type) > 0) and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'auto %s_valid = g_%s_valid_states[%s];\n' % (
cur_state.type[2:].lower(), cur_state.type[2:].lower(), command_param_of_type)
for additional_ext in cur_command.required_exts:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '// Check to make sure that the extension this command is in has been enabled\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'if (!ExtensionEnabled(gen_instance_info->enabled_extensions, "%s")) {\n' % additional_ext
pre_validate_func += self.writeIndent(indent + 1)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '}\n'
instance_info_variable = 'gen_instance_info' if first_param_handle_tuple else 'nullptr'
# Check for non-optional null pointers
for count, param in enumerate(cur_command.params):
is_first = (count == 0)
# TODO use_pointer_deref never gets used?
use_pointer_deref = False
if len(param.array_count_var) != 0 or len(param.pointer_count_var) != 0:
if ((len(param.array_count_var) != 0 and param.pointer_count > 0) or
(len(param.pointer_count_var) != 0 and param.pointer_count > 1)):
use_pointer_deref = True
elif param.pointer_count > 0:
use_pointer_deref = True
if not is_first and param.is_handle and not param.pointer_count > 0:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n' % (param.name, self.genXrObjectType(
param.type))
if not param.no_auto_validity:
pre_validate_func += self.outputParamMemberContents(True, cur_command.name, param, '',
instance_info_variable,
command_name_string,
is_first,
cur_command.params[0],
cur_command.params[0].name,
first_param_handle_tuple,
wrote_handle_check_proto,
indent)
wrote_handle_check_proto = True
count = count + 1
base_handle_name = cur_command.params[0].type[2:].lower()
# If this command needs to be checked to ensure that it is executing between
# a "begin" and an "end" command, do so.
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called at the proper time between the\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// appropriate commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (!%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is required to be called between successful calls to ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for begin_command in cur_state.begin_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % begin_command
pre_validate_func += ' and '
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += ' commands";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-checkstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# If this command needs to indicate that a validation state has begun, do so.
if cur_command.begins_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
# First, make sure we're not calling two (or more) "begins" in a row
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called first or only after the corresponding\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// "completion" commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is called again without first successfully calling ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += '";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-beginstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# Begin the appropriate state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Begin the %s state\n' % cur_state.state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '%s_valid->%s = true;\n' % (
cur_state.type[2:].lower(), cur_state.variable)
# If this command needs to indicate an end of a validation state, do so.
if cur_command.ends_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
# First, make sure we're not calling two (or more) "ends" in a row (or before a "begin")
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called after the corresponding\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// "begin" commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (!%s_valid->%s) {\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is called again without first successfully calling ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for begin_command in cur_state.begin_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % begin_command
pre_validate_func += '";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-endstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# End the appropriate state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// End the %s state\n' % cur_state.state
pre_validate_func += self.writeIndent(2)
pre_validate_func += '%s_valid->%s = false;\n' % (
cur_state.type[2:].lower(), cur_state.variable)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'return XR_SUCCESS;\n'
indent = indent - 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '} catch (...) {\n'
pre_validate_func += self.writeIndent(indent + 1)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '}\n'
pre_validate_func += '}\n\n'
return pre_validate_func
# Generate C++ code to call down to the next layer/loader terminator/runtime
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
# has_return Boolean indicating that the command must return a value (usually XrResult)
# is_create Boolean indicating that the command is a create command
# is_destroy Boolean indicating that the command is a destroy command
# is_sempath_query Boolean indicating that this is a semantic path query (treat similar to a create)
def genNextValidateFunc(self, cur_command, has_return, is_create, is_destroy, is_sempath_query):
next_validate_func = ''
# Note: We don't make a "next" call for xrCreateInstance in a layer because we
# actually have to call xrCreateApiLayerInstance. Also, we have to setup the first
# entry into the dispatch table so it's a special case all around.
if 'xrCreateInstance' in cur_command.name:
return ''
prototype = cur_command.cdecl
prototype = prototype.replace(" xr", " GenValidUsageNextXr")
prototype = prototype.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
prototype = prototype.replace(";", " {")
next_validate_func += '%s\n' % (prototype)
if has_return:
return_prefix = ' '
return_prefix += cur_command.return_type.text
return_prefix += ' result'
if cur_command.return_type.text == 'XrResult':
return_prefix += ' = XR_SUCCESS;\n'
else:
return_prefix += ';\n'
next_validate_func += return_prefix
next_validate_func += ' try {\n'
# Next, we have to call down to the next implementation of this command in the call chain.
# Before we can do that, we have to figure out what the dispatch table is
base_handle_name = cur_command.params[0].type[2:].lower()
if cur_command.params[0].is_handle:
handle_tuple = self.getHandle(cur_command.params[0].type)
first_handle_name = self.getFirstHandleName(cur_command.params[0])
if handle_tuple.name == 'XrInstance':
next_validate_func += ' GenValidUsageXrInstanceInfo *gen_instance_info = g_instance_info.get(%s);\n' % first_handle_name
else:
next_validate_func += ' GenValidUsageXrHandleInfo *gen_%s_info = ' % base_handle_name
next_validate_func += 'g_%s_info.get(%s);\n' % (
base_handle_name, first_handle_name)
next_validate_func += ' GenValidUsageXrInstanceInfo *gen_instance_info = gen_%s_info->instance_info;\n' % base_handle_name
else:
next_validate_func += '#error("Bug")\n'
# Call down, looking for the returned result if required.
next_validate_func += ' '
if has_return:
next_validate_func += 'result = '
next_validate_func += 'gen_instance_info->dispatch_table->%s(' % cur_command.name[2:]
count = 0
for param in cur_command.params:
if count > 0:
next_validate_func += ', '
next_validate_func += param.name
count = count + 1
next_validate_func += ');\n'
# If this is a create command, we have to create an entry in the appropriate
# unordered_map pointing to the correct dispatch table for the newly created
# object. Likewise, if it's a delete command, we have to remove the entry
# for the dispatch table from the unordered_map
last_name = ''
last_lower_type = ''
if cur_command.params[-1].is_handle:
last_handle_tuple = self.getHandle(cur_command.params[-1].type)
last_lower_type = last_handle_tuple.name[2:].lower()
last_name = cur_command.params[-1].name
if is_create:
assert(last_handle_tuple.name != 'XrInstance')
next_validate_func += ' if (XR_SUCCESS == result && nullptr != %s) {\n' % last_name
next_validate_func += ' std::unique_ptr<GenValidUsageXrHandleInfo> handle_info(new GenValidUsageXrHandleInfo());\n'
next_validate_func += ' handle_info->instance_info = gen_instance_info;\n'
next_validate_func += ' handle_info->direct_parent_type = %s;\n' % self.genXrObjectType(
cur_command.params[0].type)
next_validate_func += ' handle_info->direct_parent_handle = CONVERT_HANDLE_TO_GENERIC(%s);\n' % cur_command.params[
0].name
next_validate_func += ' %s.insert(*%s, std::move(handle_info));\n' % (self.makeInfoName(last_handle_tuple), last_name)
# If this object contains a state that needs tracking, allocate it
valid_type_list = []
for cur_state in self.api_states:
if last_handle_tuple.name == cur_state.type and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '// Check to see if this object that has been created has a validation\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '// state structure that needs to be created as well.\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '%sValidationStates *%s_valid_state = new %sValidationStates;\n' % (
cur_state.type, cur_state.type[2:].lower(), cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '(*%s_valid_state) = {};\n' % cur_state.type[2:].lower()
next_validate_func += self.writeIndent(3)
next_validate_func += 'g_%s_valid_states[(*%s)] = %s_valid_state;\n' % (
cur_state.type[2:].lower(), last_name, cur_state.type[2:].lower())
next_validate_func += ' }\n'
elif is_destroy:
if cur_command.params[-1].type == 'XrSession':
next_validate_func += '\n // Clean up any labels associated with this session\n'
next_validate_func += ' CoreValidationDeleteSessionLabels(session);\n\n'
# Only remove the handle from our map if the runtime returned success
next_validate_func += ' if (XR_SUCCEEDED(result)) {\n'
# If this object contains a state that needs tracking, free it
valid_type_list = []
for cur_state in self.api_states:
if last_handle_tuple.name == cur_state.type and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
next_validate_func += self.writeIndent(3)
next_validate_func += '// Check to see if this object that is about to be destroyed has a\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '// validation state structure that needs to be cleaned up.\n'
next_validate_func += self.writeIndent(3)
next_validate_func += '%sValidationStates *%s_valid_state = g_%s_valid_states[%s];\n' % (
cur_state.type, cur_state.type[2:].lower(), cur_state.type[2:].lower(), last_name)
next_validate_func += self.writeIndent(3)
next_validate_func += 'if (nullptr != %s_valid_state) {\n' % cur_state.type[2:].lower(
)
next_validate_func += self.writeIndent(4)
next_validate_func += 'delete %s_valid_state;\n' % cur_state.type[2:].lower(
)
next_validate_func += self.writeIndent(4)
next_validate_func += 'g_%s_valid_states.erase(%s);\n' % (
cur_state.type[2:].lower(), last_name)
next_validate_func += self.writeIndent(3)
next_validate_func += '}\n'
next_validate_func += ' g_%s_info.erase(%s);\n' % (last_lower_type, last_name)
next_validate_func += ' }\n'
if 'xrDestroyInstance' in cur_command.name:
next_validate_func += ' GenValidUsageCleanUpMaps(gen_instance_info);\n'
# Catch any exceptions that may have occurred. If any occurred between any of the
# valid mutex lock/unlock statements, perform the unlock now. Notice that a create can
# also allocate items, so we want to special case catching the failure of the allocation.
if is_create or is_sempath_query:
next_validate_func += ' } catch (std::bad_alloc&) {\n'
next_validate_func += ' result = XR_ERROR_OUT_OF_MEMORY;\n'
next_validate_func += ' } catch (...) {\n'
if has_return:
next_validate_func += ' result = XR_ERROR_VALIDATION_FAILURE;\n'
next_validate_func += ' }\n'
if has_return:
next_validate_func += ' return result;\n'
next_validate_func += '}\n\n'
return next_validate_func
# Generate a top-level automatic C++ validation function which will be used until
# a manual function is defined.
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
# has_return Boolean indicating that the command must return a value (usually XrResult)
def genAutoValidateFunc(self, cur_command, has_return):
auto_validate_func = ''
prototype = cur_command.cdecl
prototype = prototype.replace(" xr", " GenValidUsageXr")
prototype = prototype.replace("API_ATTR ", "")
prototype = prototype.replace("XRAPI_CALL ", "")
prototype = prototype.replace(";", " {")
auto_validate_func += '%s\n' % (prototype)
auto_validate_func += self.writeIndent(1)
if has_return:
auto_validate_func += '%s test_result = ' % cur_command.return_type.text
# Define the pre-validate call
auto_validate_func += '%s(' % cur_command.name.replace("xr",
"GenValidUsageInputsXr")
count = 0
for param in cur_command.params:
if count > 0:
auto_validate_func += ', '
count = count + 1
auto_validate_func += param.name
auto_validate_func += ');\n'
if has_return and cur_command.return_type.text == 'XrResult':
auto_validate_func += self.writeIndent(1)
auto_validate_func += 'if (XR_SUCCESS != test_result) {\n'
auto_validate_func += self.writeIndent(2)
auto_validate_func += 'return test_result;\n'
auto_validate_func += self.writeIndent(1)
auto_validate_func += '}\n'
# Make the calldown to the next layer
auto_validate_func += self.writeIndent(1)
if has_return:
auto_validate_func += 'return '
auto_validate_func += '%s(' % cur_command.name.replace("xr",
"GenValidUsageNextXr")
count = 0
for param in cur_command.params:
if count > 0:
auto_validate_func += ', '
count = count + 1
auto_validate_func += param.name
auto_validate_func += ');\n'
auto_validate_func += '}\n\n'
return auto_validate_func
# Implementation for generated validation commands
# self the ValidationSourceOutputGenerator object
def outputValidationSourceFuncs(self):
commands = []
validation_source_funcs = ''
cur_extension_name = ''
# First, output the mapping and mutex items
validation_source_funcs += '// Unordered Map associating pointer to a vector of session label information to a session\'s handle\n'
validation_source_funcs += 'std::unordered_map<XrSession, std::vector<GenValidUsageXrInternalSessionLabel*>*> g_xr_session_labels;\n\n'
validation_source_funcs += self.outputInfoMapDeclarations(extern=False)
validation_source_funcs += '\n'
validation_source_funcs += self.outputValidationInternalProtos()
validation_source_funcs += '// Function used to clean up any residual map values that point to an instance prior to that\n'
validation_source_funcs += '// instance being deleted.\n'
validation_source_funcs += 'void GenValidUsageCleanUpMaps(GenValidUsageXrInstanceInfo *instance_info) {\n'
for handle in self.api_handles:
base_handle_name = handle.name[2:].lower()
if handle.protect_value:
validation_source_funcs += '#if %s\n' % handle.protect_string
if handle.name == 'XrInstance':
validation_source_funcs += ' EraseAllInstanceTableMapElements(instance_info);\n'
else:
validation_source_funcs += ' g_%s_info.removeHandlesForInstance(instance_info);\n' % base_handle_name
if handle.protect_value:
validation_source_funcs += '#endif // %s\n' % handle.protect_string
validation_source_funcs += '}\n'
validation_source_funcs += '\n'
validation_source_funcs += '// Function to convert XrObjectType to string\n'
validation_source_funcs += 'std::string GenValidUsageXrObjectTypeToString(const XrObjectType& type) {\n'
validation_source_funcs += ' std::string object_string;\n'
count = 0
for object_type in self.api_object_types:
object_string = object_type.name.replace("XR_OBJECT_TYPE_", "")
object_string = object_string.replace("_", "")
if object_string == "UNKNOWN":
if count == 0:
validation_source_funcs += ' if '
else:
validation_source_funcs += ' } else if '
validation_source_funcs += '(type == XR_OBJECT_TYPE_UNKNOWN) {\n'
validation_source_funcs += ' object_string = "Unknown XR Object";\n'
else:
for handle in self.api_handles:
handle_name = handle.name[2:].upper()
if handle_name != object_string:
continue
if object_type.protect_value:
validation_source_funcs += '#if %s\n' % object_type.protect_string
if count == 0:
validation_source_funcs += ' if '
else:
validation_source_funcs += ' } else if '
validation_source_funcs += '(type == %s) {\n' % object_type.name
validation_source_funcs += ' object_string = "%s";\n' % handle.name
if object_type.protect_value:
validation_source_funcs += '#endif // %s\n' % object_type.protect_string
count = count + 1
validation_source_funcs += ' }\n'
validation_source_funcs += ' return object_string;\n'
validation_source_funcs += '}\n\n'
validation_source_funcs += self.outputValidationStateCheckStructs()
validation_source_funcs += self.outputValidationSourceNextChainProtos()
validation_source_funcs += self.outputValidationSourceFlagBitValues()
validation_source_funcs += self.outputValidationSourceEnumValues()
validation_source_funcs += self.writeVerifyExtensions()
validation_source_funcs += self.writeValidateHandleChecks()
validation_source_funcs += self.writeValidateHandleParent()
validation_source_funcs += self.writeValidateStructFuncs()
validation_source_funcs += self.outputValidationSourceNextChainFunc()
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_source_funcs += '\n// ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_source_funcs += '\n// ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
if cur_cmd.name in VALID_USAGE_DONT_GEN:
continue
# We fill in the GetInstanceProcAddr manually at the end
if cur_cmd.name == 'xrGetInstanceProcAddr':
continue
if cur_cmd.protect_value:
validation_source_funcs += '#if %s\n' % cur_cmd.protect_string
validation_source_funcs += '\n'
is_create = False
is_destroy = False
has_return = False
is_sempath_query = False
if ('xrCreate' in cur_cmd.name or 'xrConnect' in cur_cmd.name) and cur_cmd.params[-1].is_handle:
is_create = True
has_return = True
elif ('xrDestroy' in cur_cmd.name or 'xrDisconnect' in cur_cmd.name) and cur_cmd.params[-1].is_handle:
is_destroy = True
has_return = True
elif (cur_cmd.return_type != None):
has_return = True
validation_source_funcs += self.genValidateInputsFunc(cur_cmd)
validation_source_funcs += self.genNextValidateFunc(
cur_cmd, has_return, is_create, is_destroy, is_sempath_query)
if not cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
validation_source_funcs += self.genAutoValidateFunc(
cur_cmd, has_return)
if cur_cmd.protect_value:
validation_source_funcs += '#endif // %s\n' % cur_cmd.protect_string
validation_source_funcs += '\n'
validation_source_funcs += '\n// API Layer\'s xrGetInstanceProcAddr\n'
validation_source_funcs += 'XrResult GenValidUsageXrGetInstanceProcAddr(\n'
validation_source_funcs += ' XrInstance instance,\n'
validation_source_funcs += ' const char* name,\n'
validation_source_funcs += ' PFN_xrVoidFunction* function) {\n'
validation_source_funcs += ' try {\n'
validation_source_funcs += ' std::string func_name = name;\n'
validation_source_funcs += ' std::vector<GenValidUsageXrObjectInfo> objects;\n'
validation_source_funcs += ' if (g_instance_info.verifyHandle(&instance) == VALIDATE_XR_HANDLE_INVALID) {\n'
validation_source_funcs += ' // Make sure the instance is valid if it is not XR_NULL_HANDLE\n'
validation_source_funcs += ' std::vector<GenValidUsageXrObjectInfo> objects;\n'
validation_source_funcs += ' objects.resize(1);\n'
validation_source_funcs += ' objects[0].handle = CONVERT_HANDLE_TO_GENERIC(instance);\n'
validation_source_funcs += ' objects[0].type = XR_OBJECT_TYPE_INSTANCE;\n'
validation_source_funcs += ' CoreValidLogMessage(nullptr, "VUID-xrGetInstanceProcAddr-instance-parameter",\n'
validation_source_funcs += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "xrGetInstanceProcAddr", objects,\n'
validation_source_funcs += ' "Invalid instance handle provided.");\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' // NOTE: Can\'t validate "VUID-xrGetInstanceProcAddr-name-parameter" null-termination\n'
validation_source_funcs += ' // If we setup the function, just return\n'
validation_source_funcs += ' if (function == nullptr) {\n'
validation_source_funcs += ' CoreValidLogMessage(nullptr, "VUID-xrGetInstanceProcAddr-function-parameter",\n'
validation_source_funcs += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "xrGetInstanceProcAddr", objects,\n'
validation_source_funcs += ' "function is NULL");\n'
validation_source_funcs += ' return XR_ERROR_VALIDATION_FAILURE;\n'
validation_source_funcs += ' }\n'
count = 0
for x in range(0, 2):
if x == 0:
commands = self.core_commands
else:
commands = self.ext_commands
for cur_cmd in commands:
if cur_cmd.ext_name != cur_extension_name:
if 'XR_VERSION_' in cur_cmd.ext_name:
validation_source_funcs += '\n // ---- Core %s commands\n' % cur_cmd.ext_name[11:].replace(
"_", ".")
else:
validation_source_funcs += '\n // ---- %s extension commands\n' % cur_cmd.ext_name
cur_extension_name = cur_cmd.ext_name
if cur_cmd.name in VALID_USAGE_DONT_GEN:
continue
has_return = False
if (cur_cmd.return_type != None):
has_return = True
if cur_cmd.name in VALID_USAGE_MANUALLY_DEFINED:
# Remove 'xr' from proto name and use manual name
layer_command_name = cur_cmd.name.replace(
"xr", "CoreValidationXr")
else:
# Remove 'xr' from proto name and use generated name
layer_command_name = cur_cmd.name.replace(
"xr", "GenValidUsageXr")
if cur_cmd.protect_value:
validation_source_funcs += '#if %s\n' % cur_cmd.protect_string
if count == 0:
validation_source_funcs += ' if (func_name == "%s") {\n' % cur_cmd.name
else:
validation_source_funcs += ' } else if (func_name == "%s") {\n' % cur_cmd.name
count = count + 1
validation_source_funcs += ' *function = reinterpret_cast<PFN_xrVoidFunction>(%s);\n' % layer_command_name
if cur_cmd.protect_value:
validation_source_funcs += '#endif // %s\n' % cur_cmd.protect_string
validation_source_funcs += ' }\n'
validation_source_funcs += ' // If we setup the function, just return\n'
validation_source_funcs += ' if (*function != nullptr) {\n'
validation_source_funcs += ' return XR_SUCCESS;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' // We have not found it, so pass it down to the next layer/runtime\n'
validation_source_funcs += ' GenValidUsageXrInstanceInfo* instance_valid_usage_info = g_instance_info.get(instance);\n'
validation_source_funcs += ' if (nullptr == instance_valid_usage_info) {\n'
validation_source_funcs += ' return XR_ERROR_HANDLE_INVALID;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += ' return instance_valid_usage_info->dispatch_table->GetInstanceProcAddr(instance, name, function);\n'
validation_source_funcs += ' } catch (...) {\n'
validation_source_funcs += ' return XR_ERROR_VALIDATION_FAILURE;\n'
validation_source_funcs += ' }\n'
validation_source_funcs += '}\n'
return validation_source_funcs
|
from django.shortcuts import get_object_or_404
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import truncatechars
from django import forms
from django import http
from django.views.generic import TemplateView
from django_cradmin.viewhelpers import objecttable
from django_cradmin.viewhelpers import create
from django_cradmin.viewhelpers import update
from django_cradmin.viewhelpers import delete
from django_cradmin.viewhelpers import multiselect
from django_cradmin import crispylayouts
from django_cradmin import crapp
from django.core.urlresolvers import reverse
from crispy_forms import layout
from django_cradmin.acemarkdown.widgets import AceMarkdownWidget
from trix.trix_core import models as trix_models
from trix.trix_core import multiassignment_serialize
from trix.trix_admin import formfields
class TitleColumn(objecttable.MultiActionColumn):
modelfield = 'title'
def get_buttons(self, assignment):
buttons = [
objecttable.Button(
label=_('Edit'),
url=self.reverse_appurl('edit', args=[assignment.id])),
objecttable.PagePreviewButton(
label=_('Preview'),
url=self.reverse_appurl('preview', args=[assignment.id])),
objecttable.Button(
label=_('Delete'),
url=self.reverse_appurl('delete', args=[assignment.id]),
buttonclass="danger"),
]
course = self.view.request.cradmin_role
tags = set([tag.tag for tag in assignment.tags.all()])
if course.active_period.tag in tags:
tags.remove(course.course_tag.tag)
tags.remove(course.active_period.tag)
view_url = u'{url}?tags={tags}#assignment-{assignmentid}'.format(
url=reverse('trix_student_course', kwargs={'course_id': course.id}),
tags=u','.join(tags),
assignmentid=assignment.id)
buttons.insert(2, objecttable.Button(
label=_('View'),
url=view_url))
return buttons
class TextIntroColumn(objecttable.PlainTextColumn):
modelfield = 'text'
def render_value(self, assignment):
return truncatechars(assignment.text, 50)
class LastUpdateDatetimeColumn(objecttable.PlainTextColumn):
modelfield = 'lastupdate_datetime'
def render_value(self, obj):
return defaultfilters.date(obj.lastupdate_datetime, 'SHORT_DATETIME_FORMAT')
class TagsColumn(objecttable.PlainTextColumn):
modelfield = 'tags'
def is_sortable(self):
return False
def render_value(self, assignment):
return ', '.join(tag.tag for tag in assignment.tags.all())
class AssignmentQuerysetForRoleMixin(object):
def get_queryset_for_role(self, course):
return self.model.objects.filter(tags=course.course_tag)\
.prefetch_related('tags')
class AssignmentListView(AssignmentQuerysetForRoleMixin, objecttable.ObjectTableView):
model = trix_models.Assignment
columns = [
TitleColumn,
TagsColumn,
TextIntroColumn,
LastUpdateDatetimeColumn
]
searchfields = [
'title',
'tags__tag',
'text',
'solution',
]
enable_previews = True
def get_buttons(self):
app = self.request.cradmin_app
course = self.request.cradmin_role
return [
objecttable.Button(_('Create'), url=app.reverse_appurl('create')),
objecttable.Button(
_('Show on website'),
url=reverse('trix_student_course', kwargs={'course_id': course.id})),
]
def get_multiselect_actions(self):
app = self.request.cradmin_app
return [
objecttable.MultiSelectAction(
label=_('Edit'),
url=app.reverse_appurl('multiedit')
),
]
class AssignmentCreateUpdateMixin(object):
model = trix_models.Assignment
def get_preview_url(self):
return self.request.cradmin_app.reverse_appurl('preview')
def get_field_layout(self):
return [
layout.Div('title', css_class="cradmin-focusfield cradmin-focusfield-lg"),
layout.Fieldset(_('Organize'), 'tags'),
layout.Div('text', css_class="cradmin-focusfield"),
layout.Div('solution', css_class="cradmin-focusfield"),
]
def get_form(self, *args, **kwargs):
form = super(AssignmentCreateUpdateMixin, self).get_form(*args, **kwargs)
form.fields['tags'] = formfields.ManyToManyTagInputField(required=False)
form.fields['text'].widget = AceMarkdownWidget()
form.fields['solution'].widget = AceMarkdownWidget()
return form
def save_object(self, form, commit=True):
assignment = super(AssignmentCreateUpdateMixin, self).save_object(form, commit=commit)
if commit:
# Replace the tags with the new tags
assignment.tags.clear()
for tag in form.cleaned_data['tags']:
assignment.tags.add(tag)
return assignment
def form_saved(self, assignment):
course = self.request.cradmin_role
if not assignment.tags.filter(tag=course.course_tag).exists():
assignment.tags.add(course.course_tag)
class AssignmentCreateView(AssignmentCreateUpdateMixin, create.CreateView):
"""
View used to create new assignments.
"""
class AssignmentUpdateView(AssignmentQuerysetForRoleMixin, AssignmentCreateUpdateMixin, update.UpdateView):
"""
View used to edit existing assignments.
"""
class AssignmentMultiEditForm(forms.Form):
data = forms.CharField(
required=True,
label=_('Assignment YAML'),
widget=forms.Textarea)
class AssignmentMultiEditView(AssignmentQuerysetForRoleMixin, multiselect.MultiSelectFormView):
"""
View used to edit multiple assignments.
Supports both updating and creating assignments.
"""
form_class = AssignmentMultiEditForm
model = trix_models.Assignment
template_name = 'trix_admin/assignments/multiedit.django.html'
def form_valid(self, form):
course = self.request.cradmin_role
try:
multiassignment_serialize.Deserializer(
serialized_assignments=form.cleaned_data['data'],
course_tag=course.course_tag.tag).sync()
except multiassignment_serialize.DeserializerValidationErrors as e:
return self.render_to_response(self.get_context_data(
form=form,
deserializer_validationerrors=e.errors))
except multiassignment_serialize.DeserializerError as e:
return self.render_to_response(self.get_context_data(
form=form,
deserializererror=e))
return http.HttpResponseRedirect(self.request.cradmin_app.reverse_appindexurl())
def get_initial(self):
return {
'data': multiassignment_serialize.serialize(self.selected_objects)
}
def get_buttons(self):
return [
crispylayouts.PrimarySubmit('submit-save', _('Save'))
]
def get_field_layout(self):
return [
layout.Div('data', css_class="cradmin-focusfield cradmin-focusfield-screenheight"),
]
class AssignmentDeleteView(AssignmentQuerysetForRoleMixin, delete.DeleteView):
"""
View used to delete existing assignments.
"""
model = trix_models.Assignment
class PreviewAssignmentView(TemplateView):
template_name = 'trix_admin/assignments/preview.django.html'
def __get_page(self):
if self.kwargs['pk'] is None:
return AssignmentCreateView.get_preview_data(self.request)
else:
# NOTE: The queryset ensures only admins on the current site gains access.
course = self.request.cradmin_role
return get_object_or_404(trix_models.Assignment.objects.filter(tags=course.course_tag).distinct(),
pk=self.kwargs['pk'])
def get_context_data(self, **kwargs):
context = super(PreviewAssignmentView, self).get_context_data(**kwargs)
context['assignment'] = self.__get_page()
return context
class App(crapp.App):
appurls = [
crapp.Url(
r'^$',
AssignmentListView.as_view(),
name=crapp.INDEXVIEW_NAME),
crapp.Url(
r'^create$',
AssignmentCreateView.as_view(),
name="create"),
crapp.Url(
r'^edit/(?P<pk>\d+)$',
AssignmentUpdateView.as_view(),
name="edit"),
crapp.Url(
r'^preview/(?P<pk>\d+)?$',
PreviewAssignmentView.as_view(),
name="preview"),
crapp.Url(
r'^multiedit$',
AssignmentMultiEditView.as_view(),
name="multiedit"),
crapp.Url(
r'^delete/(?P<pk>\d+)$',
AssignmentDeleteView.as_view(),
name="delete")
]
Added bulk add and edit of tags on assignments.
from django.shortcuts import get_object_or_404
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import truncatechars
from django import forms
from django import http
from django.views.generic import TemplateView
from django_cradmin.viewhelpers import objecttable
from django_cradmin.viewhelpers import create
from django_cradmin.viewhelpers import update
from django_cradmin.viewhelpers import delete
from django_cradmin.viewhelpers import multiselect
from django_cradmin import crispylayouts
from django_cradmin import crapp
from django.core.urlresolvers import reverse
from crispy_forms import layout
from django_cradmin.acemarkdown.widgets import AceMarkdownWidget
from trix.trix_core import models as trix_models
from trix.trix_core import multiassignment_serialize
from trix.trix_admin import formfields
class TitleColumn(objecttable.MultiActionColumn):
modelfield = 'title'
def get_buttons(self, assignment):
buttons = [
objecttable.Button(
label=_('Edit'),
url=self.reverse_appurl('edit', args=[assignment.id])),
objecttable.PagePreviewButton(
label=_('Preview'),
url=self.reverse_appurl('preview', args=[assignment.id])),
objecttable.Button(
label=_('Delete'),
url=self.reverse_appurl('delete', args=[assignment.id]),
buttonclass="danger"),
]
course = self.view.request.cradmin_role
tags = set([tag.tag for tag in assignment.tags.all()])
if course.active_period.tag in tags:
tags.remove(course.course_tag.tag)
tags.remove(course.active_period.tag)
view_url = u'{url}?tags={tags}#assignment-{assignmentid}'.format(
url=reverse('trix_student_course', kwargs={'course_id': course.id}),
tags=u','.join(tags),
assignmentid=assignment.id)
buttons.insert(2, objecttable.Button(
label=_('View'),
url=view_url))
return buttons
class TextIntroColumn(objecttable.PlainTextColumn):
modelfield = 'text'
def render_value(self, assignment):
return truncatechars(assignment.text, 50)
class LastUpdateDatetimeColumn(objecttable.PlainTextColumn):
modelfield = 'lastupdate_datetime'
def render_value(self, obj):
return defaultfilters.date(obj.lastupdate_datetime, 'SHORT_DATETIME_FORMAT')
class TagsColumn(objecttable.PlainTextColumn):
modelfield = 'tags'
def is_sortable(self):
return False
def render_value(self, assignment):
return ', '.join(tag.tag for tag in assignment.tags.all())
class AssignmentQuerysetForRoleMixin(object):
def get_queryset_for_role(self, course):
return self.model.objects.filter(tags=course.course_tag)\
.prefetch_related('tags')
class AssignmentListView(AssignmentQuerysetForRoleMixin, objecttable.ObjectTableView):
model = trix_models.Assignment
columns = [
TitleColumn,
TagsColumn,
TextIntroColumn,
LastUpdateDatetimeColumn
]
searchfields = [
'title',
'tags__tag',
'text',
'solution',
]
enable_previews = True
def get_buttons(self):
app = self.request.cradmin_app
course = self.request.cradmin_role
return [
objecttable.Button(_('Create'), url=app.reverse_appurl('create')),
objecttable.Button(
_('Show on website'),
url=reverse('trix_student_course', kwargs={'course_id': course.id})),
]
def get_multiselect_actions(self):
app = self.request.cradmin_app
return [
objecttable.MultiSelectAction(
label=_('Edit'),
url=app.reverse_appurl('multiedit')
),
objecttable.MultiSelectAction(
label=_('Add tag'),
url=app.reverse_appurl('multiadd-tag')
),
objecttable.MultiSelectAction(
label=_('Remove tag'),
url=app.reverse_appurl('multiremove-tag')
),
]
class AssignmentCreateUpdateMixin(object):
model = trix_models.Assignment
def get_preview_url(self):
return self.request.cradmin_app.reverse_appurl('preview')
def get_field_layout(self):
return [
layout.Div('title', css_class="cradmin-focusfield cradmin-focusfield-lg"),
layout.Fieldset(_('Organize'), 'tags'),
layout.Div('text', css_class="cradmin-focusfield"),
layout.Div('solution', css_class="cradmin-focusfield"),
]
def get_form(self, *args, **kwargs):
form = super(AssignmentCreateUpdateMixin, self).get_form(*args, **kwargs)
form.fields['tags'] = formfields.ManyToManyTagInputField(required=False)
form.fields['text'].widget = AceMarkdownWidget()
form.fields['solution'].widget = AceMarkdownWidget()
return form
def save_object(self, form, commit=True):
assignment = super(AssignmentCreateUpdateMixin, self).save_object(form, commit=commit)
if commit:
# Replace the tags with the new tags
assignment.tags.clear()
for tag in form.cleaned_data['tags']:
assignment.tags.add(tag)
return assignment
def form_saved(self, assignment):
course = self.request.cradmin_role
if not assignment.tags.filter(tag=course.course_tag).exists():
assignment.tags.add(course.course_tag)
class AssignmentCreateView(AssignmentCreateUpdateMixin, create.CreateView):
"""
View used to create new assignments.
"""
class AssignmentUpdateView(AssignmentQuerysetForRoleMixin, AssignmentCreateUpdateMixin, update.UpdateView):
"""
View used to edit existing assignments.
"""
class AssignmentMultiEditForm(forms.Form):
data = forms.CharField(
required=True,
label=_('Assignment YAML'),
widget=forms.Textarea)
class AssignmentMultiEditView(AssignmentQuerysetForRoleMixin, multiselect.MultiSelectFormView):
"""
View used to edit multiple assignments.
Supports both updating and creating assignments.
"""
form_class = AssignmentMultiEditForm
model = trix_models.Assignment
template_name = 'trix_admin/assignments/multiedit.django.html'
def form_valid(self, form):
course = self.request.cradmin_role
try:
multiassignment_serialize.Deserializer(
serialized_assignments=form.cleaned_data['data'],
course_tag=course.course_tag.tag).sync()
except multiassignment_serialize.DeserializerValidationErrors as e:
return self.render_to_response(self.get_context_data(
form=form,
deserializer_validationerrors=e.errors))
except multiassignment_serialize.DeserializerError as e:
return self.render_to_response(self.get_context_data(
form=form,
deserializererror=e))
return http.HttpResponseRedirect(self.request.cradmin_app.reverse_appindexurl())
def get_initial(self):
return {
'data': multiassignment_serialize.serialize(self.selected_objects)
}
def get_buttons(self):
return [
crispylayouts.PrimarySubmit('submit-save', _('Save'))
]
def get_field_layout(self):
return [
layout.Div('data', css_class="cradmin-focusfield cradmin-focusfield-screenheight"),
]
class AssignmentMultiTagForm(forms.Form):
tag = forms.CharField(
required=True,
label=_('Tag'))
class AssignmentMultiAddTagView(AssignmentQuerysetForRoleMixin, multiselect.MultiSelectFormView):
"""
View used to add a tag to assignments.
"""
form_class = AssignmentMultiTagForm
model = trix_models.Assignment
def form_valid(self, form):
assignments = self.selected_objects
tag = form.cleaned_data['tag']
for assignment in assignments:
if not assignment.tags.filter(tag=tag).exists():
assignment.tags.add(trix_models.Tag.objects.get_or_create(tag))
return http.HttpResponseRedirect(self.request.cradmin_app.reverse_appindexurl())
def get_pagetitle(self):
return _('tags on selected assignments')
def get_buttons(self):
return [
crispylayouts.PrimarySubmit('submit-save', _('Add tag'))
]
def get_field_layout(self):
return [
layout.Fieldset(
_('Type in the tag you want to add to the selected assignments'),
'tag'
)
]
class AssignmentMultiRemoveTagView(AssignmentQuerysetForRoleMixin, multiselect.MultiSelectFormView):
"""
View used to add a tag to assignments.
"""
form_class = AssignmentMultiTagForm
model = trix_models.Assignment
def form_valid(self, form):
assignments = self.selected_objects
tag = form.cleaned_data['tag']
for assignment in assignments:
try:
tag = assignment.tags.filter(tag=tag).get()
assignment.tags.remove(tag)
except trix_models.Tag.DoesNotExist:
pass
return http.HttpResponseRedirect(self.request.cradmin_app.reverse_appindexurl())
def get_pagetitle(self):
return _('tags on selected assignments')
def get_buttons(self):
return [
crispylayouts.PrimarySubmit('submit-save', _('Delete tag'))
]
def get_field_layout(self):
return [
layout.Fieldset(
_('Type in the tag you want to remove from the selected assignments'),
'tag'
)
]
class AssignmentDeleteView(AssignmentQuerysetForRoleMixin, delete.DeleteView):
"""
View used to delete existing assignments.
"""
model = trix_models.Assignment
class PreviewAssignmentView(TemplateView):
template_name = 'trix_admin/assignments/preview.django.html'
def __get_page(self):
if self.kwargs['pk'] is None:
return AssignmentCreateView.get_preview_data(self.request)
else:
# NOTE: The queryset ensures only admins on the current site gains access.
course = self.request.cradmin_role
return get_object_or_404(trix_models.Assignment.objects.filter(tags=course.course_tag).distinct(),
pk=self.kwargs['pk'])
def get_context_data(self, **kwargs):
context = super(PreviewAssignmentView, self).get_context_data(**kwargs)
context['assignment'] = self.__get_page()
return context
class App(crapp.App):
appurls = [
crapp.Url(
r'^$',
AssignmentListView.as_view(),
name=crapp.INDEXVIEW_NAME),
crapp.Url(
r'^create$',
AssignmentCreateView.as_view(),
name="create"),
crapp.Url(
r'^edit/(?P<pk>\d+)$',
AssignmentUpdateView.as_view(),
name="edit"),
crapp.Url(
r'^preview/(?P<pk>\d+)?$',
PreviewAssignmentView.as_view(),
name="preview"),
crapp.Url(
r'^multiedit$',
AssignmentMultiEditView.as_view(),
name="multiedit"),
crapp.Url(
r'^multiadd-tag$',
AssignmentMultiAddTagView.as_view(),
name="multiadd-tag"),
crapp.Url(
r'^multiremove-tag$',
AssignmentMultiRemoveTagView.as_view(),
name="multiremove-tag"),
crapp.Url(
r'^delete/(?P<pk>\d+)$',
AssignmentDeleteView.as_view(),
name="delete")
]
|
# Load in core dependencies
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
# Load in 3rd party dependencies
from jinja2 import Template
# Set up constants
__dir__ = os.path.dirname(os.path.abspath(__file__))
# TODO: Break this up into the core (split_sel, run_test) and unittest.TestCase
class TestCase(unittest.TestCase):
# TODO: It would be nice to pull directory location from Sublime but it isn't critical
# Determine the scratch plugin directory
scratch_dir = os.path.expanduser('~/.config/sublime-text-2/Packages/tmp-plugin-tests')
output_dir = __dir__ + '/output'
@classmethod
def split_sel(cls, input):
""" Break up input string with selection delimiters into selection and content. """
# Create a placeholder selection
sel = []
# Find all indications for selection
while True:
# Find the next matching selection
# TODO: Robustify with multi-char selection and escaping
match = re.search(r'\|', input)
# If there was a match
if match:
# Save the selection
start = match.start(0)
sel.append((start, start))
# Remove the match from the input
input = input[:start] + input[match.end(0):]
# Otherwise, break
else:
break
# Return a selection and content
return {
'sel': sel,
'content': input
}
@classmethod
def ensure_scratch_dir(cls):
# If the scratch plugins directory does not exist, create it
if not os.path.exists(cls.scratch_dir):
os.makedirs(cls.scratch_dir)
@classmethod
def ensure_launcher(cls):
# Ensure the scratch directory exists
cls.ensure_scratch_dir()
# If command.py doesn't exist, copy it
orig_command_path = __dir__ + '/lib/command.py'
dest_command_path = cls.scratch_dir + '/command.py'
if not os.path.exists(dest_command_path):
shutil.copyfile(orig_command_path, dest_command_path)
else:
# Otherwise...
# If there are updates for command.py
expected_command = None
with open(orig_command_path) as f:
expected_command = f.read()
actual_command = None
with open(dest_command_path) as f:
actual_command = f.read()
if expected_command != actual_command:
# Update the file
shutil.copyfile(orig_command_path, dest_command_path)
# and notify the user we must restart Sublime
# TODO: We might want to make this even more loud
print 'We had to update the test launcher plugin. You must close or restart Sublime to continue testing.'
return False
# Notify the user that the launcher exists
return True
@classmethod
def ensure_output_dir(cls):
if not os.path.exists(cls.output_dir):
os.makedirs(cls.output_dir)
def __call__(self, result=None):
# For each test
loader = unittest.TestLoader()
for test_name in loader.getTestCaseNames(self.__class__):
# Wrap the function
test_fn = getattr(self, test_name)
wrapped_test = self._wrap_test(test_fn)
setattr(self, test_name, wrapped_test)
# Call the original function
unittest.TestCase.__call__(self, result)
def _wrap_test(self, test_fn):
# Guarantee there is an output directory
self.ensure_output_dir()
# Generate a wrapped function
def wrapped_fn():
# Get the test info
test = test_fn()
# Reserve an output file
output_file = tempfile.mkstemp()[1]
# Template plugin
plugin = None
with open('lib/plugin.template.py') as f:
template = Template(f.read())
plugin = template.render(target_sel=test['target_sel'],
content=test['content'],
expected_sel=test['expected_sel'],
expected_content=test['expected_content'],
output_file=output_file)
# Output plugin to directory
with open(self.scratch_dir + '/plugin.py', 'w') as f:
f.write(plugin)
# Force a delay to allow f.write changes to be picked up
# TODO: If the delay becomes too significant, attempt batch write -> delay -> batch test
time.sleep(0.5)
# Start a subprocess to run the plugin
# TODO: We might want a development mode (runs commands inside local sublime window) and a testing mode (calls out to Vagrant box)
# TODO: or at least 2 plugin hooks, one for CLI based testing and one for internal dev
subprocess.call(['sublime_text', '--command', 'tmp_test'])
# TODO: How does this work if `tmp_test` is theoretically run in parallel
# Read in the output
with open(output_file) as f:
# Read and parse the result
result = f.read()
result_lines = result.split('\n')
success = result_lines[0] == 'SUCCESS'
failure_reason = '\n'.join(result_lines[1:] or ['Test failed'])
# Assert we were successful
# TODO: Rather than asserting, move this function elsewhere and return a reason to assert against
self.assertTrue(success, failure_reason)
# Return the wrapped function
return wrapped_fn
Going to look into the developer reload packages
# Load in core dependencies
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
# Load in 3rd party dependencies
from jinja2 import Template
# Set up constants
__dir__ = os.path.dirname(os.path.abspath(__file__))
# TODO: Break this up into the core (split_sel, run_test) and unittest.TestCase
class TestCase(unittest.TestCase):
# TODO: It would be nice to pull directory location from Sublime but it isn't critical
# Determine the scratch plugin directory
scratch_dir = os.path.expanduser('~/.config/sublime-text-2/Packages/tmp-plugin-tests')
output_dir = __dir__ + '/output'
@classmethod
def split_sel(cls, input):
""" Break up input string with selection delimiters into selection and content. """
# Create a placeholder selection
sel = []
# Find all indications for selection
while True:
# Find the next matching selection
# TODO: Robustify with multi-char selection and escaping
match = re.search(r'\|', input)
# If there was a match
if match:
# Save the selection
start = match.start(0)
sel.append((start, start))
# Remove the match from the input
input = input[:start] + input[match.end(0):]
# Otherwise, break
else:
break
# Return a selection and content
return {
'sel': sel,
'content': input
}
@classmethod
def ensure_scratch_dir(cls):
# If the scratch plugins directory does not exist, create it
if not os.path.exists(cls.scratch_dir):
os.makedirs(cls.scratch_dir)
@classmethod
def ensure_launcher(cls):
# Ensure the scratch directory exists
cls.ensure_scratch_dir()
# If command.py doesn't exist, copy it
orig_command_path = __dir__ + '/lib/command.py'
dest_command_path = cls.scratch_dir + '/command.py'
if not os.path.exists(dest_command_path):
shutil.copyfile(orig_command_path, dest_command_path)
else:
# Otherwise...
# If there are updates for command.py
expected_command = None
with open(orig_command_path) as f:
expected_command = f.read()
actual_command = None
with open(dest_command_path) as f:
actual_command = f.read()
if expected_command != actual_command:
# Update the file
shutil.copyfile(orig_command_path, dest_command_path)
# and notify the user we must restart Sublime
# TODO: We might want to make this even more loud
print 'We had to update the test launcher plugin. You must close or restart Sublime to continue testing.'
return False
# Notify the user that the launcher exists
return True
@classmethod
def ensure_output_dir(cls):
if not os.path.exists(cls.output_dir):
os.makedirs(cls.output_dir)
def __call__(self, result=None):
# For each test
loader = unittest.TestLoader()
for test_name in loader.getTestCaseNames(self.__class__):
# Wrap the function
test_fn = getattr(self, test_name)
wrapped_test = self._wrap_test(test_fn)
setattr(self, test_name, wrapped_test)
# Call the original function
unittest.TestCase.__call__(self, result)
def _wrap_test(self, test_fn):
# Guarantee there is an output directory
self.ensure_output_dir()
# Generate a wrapped function
def wrapped_fn():
# Get the test info
test = test_fn()
# Reserve an output file
output_file = tempfile.mkstemp()[1]
# Template plugin
plugin = None
with open('lib/plugin.template.py') as f:
template = Template(f.read())
plugin = template.render(target_sel=test['target_sel'],
content=test['content'],
expected_sel=test['expected_sel'],
expected_content=test['expected_content'],
output_file=output_file)
# Output plugin to directory
with open(self.scratch_dir + '/plugin.py', 'w') as f:
f.write(plugin)
# Force a delay to allow f.write changes to be picked up
# TODO: If the delay becomes too significant, attempt batch write -> delay -> batch test
time.sleep(0.3)
# Start a subprocess to run the plugin
# TODO: We might want a development mode (runs commands inside local sublime window) and a testing mode (calls out to Vagrant box)
# TODO: or at least 2 plugin hooks, one for CLI based testing and one for internal dev
subprocess.call(['sublime_text', '--command', 'tmp_test'])
# TODO: How does this work if `tmp_test` is theoretically run in parallel
# Read in the output
with open(output_file) as f:
# Read and parse the result
result = f.read()
result_lines = result.split('\n')
success = result_lines[0] == 'SUCCESS'
failure_reason = '\n'.join(result_lines[1:] or ['Test failed'])
# Assert we were successful
# TODO: Rather than asserting, move this function elsewhere and return a reason to assert against
self.assertTrue(success, failure_reason)
# Return the wrapped function
return wrapped_fn
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
nr_openSMILE.py
Run selected openSMILE scripts on files with noises replaced by various methods
(see `openSMILE_preprocessing/noise_replacement` for noise replacement
details).
Author:
– Jon Clucas, 2016–2017 (jon.clucas@childmind.org)
© 2017, Child Mind Institute, Apache v2.0 License
This script uses functions from mhealthx
( http://sage-bionetworks.github.io/mhealthx/ ).
Authors:
- Arno Klein, 2015–2017 (arno@sagebase.org) http://binarybottle.com
Copyright 2015–2017, Sage Bionetworks (http://sagebase.org) & Child Mind
Institute, Apache v2.0 License
"""
from ..openSMILE_runSM.mhealthx.mhealthx import extract as ex
from .. import iterate_ursis as iu
import os
# change this variable to your openSMILE installation location
openSMILE = '/home/jclucas/opensmile-2.3.0/inst/bin/SMILExtract'
def run_openSMILE(config_file, sound_file):
"""
Function to run the openSMILE with a specified config_file on a specified
sound file and save the results.
Parameters
----------
config_file : string
the basename of the openSMILE config file that lives in `config`
sound_file : string
the absolute path to the soundfile
Returns
-------
None.
Outputs
-------
*soundfile*.csv : comma separated value file
openSMILE output in arff–csv format with the original filename with the
extension replaced, and in a subdirectory with a name matching the
original location within an `openSMILE_output` directory at the
relative level of original location
"""
sub_dir = os.path.split(os.path.dirname(sound_file))
basename = ''.join([os.path.basename(sound_file).strip('.wav').strip(
'.WAV'), '.csv'])
out_path = os.path.join(sub_dir[0], 'openSMILE_output', sub_dir[1])
if not os.path.exists(out_path):
os.makedirs(out_path, 0755)
row = None
r_oS_args = sound_file, openSMILE, '-I', '-C', '-O', ''.join(['config/',
config_file]), '', row, out_dir, True
print(' '.join(r_oS_args))
# process the file
try:
row, table_path = ex.run_openSMILE(r_oS_args)
# if necessary, specify csvoutput
except cComponentException:
row, table_path = ex.run_openSMILE(sound_file, openSMILE, '-I', '-C',
'-csvoutput', ''.join(['config/', config_file]), '',
row, out_dir, True)
def main():
# replace `top_dir`'s value with your top-level directory
top_dir = '/home/jclucas/opensmile-2.3.0/test/noise_replacement_efficacy'
# replace `sub_dirs` with your conditions
sub_dirs = ['ambient_clip', 'clone_fill', 'no_beeps', 'sample_silenced',
'timeshifted']
# replace `configs` with your openSMILE config files
configs = ['emobase.conf', 'ComParE_2016.conf']
# initialize file_list
file_list = []
# get files
for sub_dir in sub_dirs:
file_list.extend(iu.i_ursi(top_dir, sub_dir))
for sound_file in file_list:
for config in configs:
run_openSMILE(config, sound_file)
# ============================================================================
if __name__ == '__main__' and __package__ is None:
__package__ = "expected.package.name"
main()
correct relative points
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
nr_openSMILE.py
Run selected openSMILE scripts on files with noises replaced by various methods
(see `openSMILE_preprocessing/noise_replacement` for noise replacement
details).
Author:
– Jon Clucas, 2016–2017 (jon.clucas@childmind.org)
© 2017, Child Mind Institute, Apache v2.0 License
This script uses functions from mhealthx
( http://sage-bionetworks.github.io/mhealthx/ ).
Authors:
- Arno Klein, 2015–2017 (arno@sagebase.org) http://binarybottle.com
Copyright 2015–2017, Sage Bionetworks (http://sagebase.org) & Child Mind
Institute, Apache v2.0 License
"""
from ...openSMILE_runSM.mhealthx.mhealthx import extract as ex
from ... import iterate_ursis as iu
import os
# change this variable to your openSMILE installation location
openSMILE = '/home/jclucas/opensmile-2.3.0/inst/bin/SMILExtract'
def run_openSMILE(config_file, sound_file):
"""
Function to run the openSMILE with a specified config_file on a specified
sound file and save the results.
Parameters
----------
config_file : string
the basename of the openSMILE config file that lives in `config`
sound_file : string
the absolute path to the soundfile
Returns
-------
None.
Outputs
-------
*soundfile*.csv : comma separated value file
openSMILE output in arff–csv format with the original filename with the
extension replaced, and in a subdirectory with a name matching the
original location within an `openSMILE_output` directory at the
relative level of original location
"""
sub_dir = os.path.split(os.path.dirname(sound_file))
basename = ''.join([os.path.basename(sound_file).strip('.wav').strip(
'.WAV'), '.csv'])
out_path = os.path.join(sub_dir[0], 'openSMILE_output', sub_dir[1])
if not os.path.exists(out_path):
os.makedirs(out_path, 0755)
row = None
r_oS_args = sound_file, openSMILE, '-I', '-C', '-O', ''.join(['config/',
config_file]), '', row, out_dir, True
print(' '.join(r_oS_args))
# process the file
try:
row, table_path = ex.run_openSMILE(r_oS_args)
# if necessary, specify csvoutput
except cComponentException:
row, table_path = ex.run_openSMILE(sound_file, openSMILE, '-I', '-C',
'-csvoutput', ''.join(['config/', config_file]), '',
row, out_dir, True)
def main():
# replace `top_dir`'s value with your top-level directory
top_dir = '/home/jclucas/opensmile-2.3.0/test/noise_replacement_efficacy'
# replace `sub_dirs` with your conditions
sub_dirs = ['ambient_clip', 'clone_fill', 'no_beeps', 'sample_silenced',
'timeshifted']
# replace `configs` with your openSMILE config files
configs = ['emobase.conf', 'ComParE_2016.conf']
# initialize file_list
file_list = []
# get files
for sub_dir in sub_dirs:
file_list.extend(iu.i_ursi(top_dir, sub_dir))
for sound_file in file_list:
for config in configs:
run_openSMILE(config, sound_file)
# ============================================================================
if __name__ == '__main__' and __package__ is None:
__package__ = "expected.package.name"
main()
|
from __future__ import print_function
import os, sys
import subprocess
import time
import tempfile
import threading
debug = True
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
if snapname in dict2:
rv.append(snapname)
else:
pass;
return rv
def CHECK_OUTPUT(*args, **kwargs):
if debug:
print("CHECK_OUTPUT({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_output(*args, **kwargs)
def CALL(*args, **kwargs):
if debug:
print("CALL({}, {})".format(args, kwargs, file=sys.stderr))
return subprocess.call(*args, **kwargs)
def CHECK_CALL(*args, **kwargs):
if debug:
print("CHECK_CALL({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_call(*args, **kwargs)
def POPEN(*args, **kwargs):
if debug:
print("POPEN({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.Popen(*args, **kwargs)
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
if debug:
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = CHECK_OUTPUT(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
"""
def __init__(self):
pass
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return "Null Filter"
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
class ZFSBackupFilterThread(ZFSBackupFilter, threading.Thread):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__()
threading.Thread.__init__(self)
(self.input_pipe, self.output_pipe) = os.pipe()
self._source = None
self._done = threading.Event()
self._done.clear()
self._process = process
if self._process is None:
self._name = "Null Thread Filter"
else:
self._name = name
@property
def backup_command(self):
return ["<thread>"]
@property
def restore_command(self):
return ["<thread>"]
@property
def input_pipe(self):
return self._input
@input_pipe.setter
def input_pipe(self, p):
self._input = p
@property
def output_pipe(self):
return self._output
@output_pipe.setter
def output_pipe(self, p):
self._output = p
@property
def source(self):
return self._source
@property
def mode(self):
return self._mode
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
def run(self):
while True:
b = self.source.read(1024*1024)
if b:
os.write(self.output_pipe, self.process(b))
else:
break
self._done.set()
os.close(self.output_pipe)
def start_backup(self, source):
self._mode = "backup"
self._source = source
self._py_output = os.fdopen(self.input_pipe, "rb")
self.start()
return self._py_output
def start_restore(self, source):
self._mode = "restore"
self._source = source
rv = os.fdopen(self.input_pipe, "rb")
self.start()
return rv
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
If restore_command is None, then backup_command will be used.
"""
def __init__(self, backup_command=["/bin/cat"], restore_command=None,
error=None):
super(ZFSBackupFilterCommand, self).__init__()
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command or self.backup_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = POPEN(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return p.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = POPEN(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
return p.stdout
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
else:
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command])
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None):
super(ZFSBackupFilterCounter, self).__init__()
self._count = 0
self.handler = handler
def name(self):
return "ZFS Count Filter"
def process(self, b):
self._count += len(b)
return b
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
self._done.wait()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _filter_backup(self, source, error=None):
# Private method, to stitch the backup filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_restore(input)
return input
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
CHECK_CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
if not self.source_snapshots:
# A source with no snapshots cannot be backed up
raise ZFSBackupError("Source {} does not have snapshots".format(self.source))
return
def backup_handler(self, stream, **kwargs):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
if debug:
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
CALL(command, stdout=devnull, stderr=devnull)
except:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
CHECK_CALL(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None, force_full=False):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
By default, it will first find a list of snapshots in common with the
source and target, ordered chronologically (based on the source).
If force_full is True, then the snapshot chosen will be sent in its entirety,
rather than trying to find a common ancestor for an incremental snapshot.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupError("Specified snapshot {} does not exist".foramt(snapname))
# We want to remove everything in source_snapshots up to the given one
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
if debug:
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
last_snapshot = source_snapshots[-1]
last_common_snapshot = None
if force_full:
common_snapshots = []
else:
common_snapshots = _merge_snapshots(source_snapshots, self.target_snapshots)
# At this point, common_snapshots has a list of snapshot names on both.
# If there are no common snapshots, then we back up everything up to last_snapshot
if debug:
print("ZFSBackup: last_snapshot = {}, common_snapshots = {}".format(last_snapshot,
common_snapshots),
file=sys.stderr)
if last_snapshot["Name"] not in common_snapshots:
if debug:
print("We have to do some sends/receives", file=sys.stderr)
# We need to do incremental snapshots from the last common snapshot to
# last_snapshot.
if common_snapshots:
# Don't bother doing this if we have no snapshots in common
last_common_snapshot = common_snapshots[-1]
if debug:
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
for indx, snap in enumerate(source_snapshots):
if snap["Name"] == last_common_snapshot:
break
snapshot_list = source_snapshots[indx:]
else:
# Either it's been deleted on the remote end, or it's newer than the list.
# So we start at a full dump from last_snapshot
snapshot_list = [last_snapshot]
else:
snapshot_list = [last_snapshot]
# There are two approaches that could be done here.
# One is to do incremental sends for every snapshot; the other
# is simply to do a send -I. I'm choosing the latter.
# If we have a last common snapshot, we can do an incremental from it to
# the last snapshot; if we don't, we'll need to do a full send.
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
backup_dict = {}
if last_common_snapshot:
command.extend(["-I", "{}".format(last_common_snapshot)])
backup_dict["incremental"] = True
backup_dict["parent"] = last_common_snapshot
else:
backup_dict["incremental"] = False
backup_dict["CreationTime"] = last_snapshot["CreationTime"]
command.append("{}@{}".format(self.source, last_snapshot["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = POPEN(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
self.backup_handler(send_proc.stdout, **backup_dict)
if send_proc.returncode:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def replicate(self, source, snapname, previous=None, date=int(time.time())):
"""
Replicate from source. source must be an object that supports
read(). If date is not given, we will use the current time, so
it should really be set. The full snapshot name from the source
would be dataset@snapname. If previous is set, it indicates this
is an incremental snapshot.
The snapname, previous, and date parameters are for informational purposes only;
the base class doesn't use them, but derived classes may.
"""
destination = os.path.join(self.target, self.dataset)
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
# fobj = self._filter(source, error=error_output)
fobj = source
try:
CHECK_CALL(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
name = "{}@{}".format(self.dataset, snapname)
error_output.seek(0)
if debug:
print("`{}` failed: {}".format(" ".join(command), error_output.read()),
file=sys.stderr)
raise ZFSBackupError("Could not replicate {} to target {}".format(name, self.target))
return
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupSSH(ZFSBackup):
"""
Replicate to a remote host using ssh.
This runs all of the commands the base class does, but via ssh
to another host.
When running a command on a remote host, we have the following
options:
1) We don't care about input or output, only the return value.
2) We stream to it, or from it.
(1) is mostly for validation -- ensure the target exists, and
we can connect to it.
For (2), we stream to it (writing to stdin), and don't care about
the output until after, for backup.
For (2), we stream _from_ it (reading from its stdout) when getting
a list of snapshots, and when doing a restore.
"""
def __init__(self, source, target, remote_host,
remote_user=None,
ssh_opts=[],
recursive=False):
self._user = remote_user
self._host = remote_host
self._ssh_opts = ssh_opts[:]
super(ZFSBackupSSH, self).__init__(source, target, recursive)
@property
def user(self):
return self._user
@property
def host(self):
return self._host
@property
def ssh_options(self):
return self._ssh_opts
def _build_command(self, cmd, *args):
# First set up ssh.
command = ["/usr/bin/ssh"]
if self.ssh_options:
command.extend(self.ssh_options)
if self.user:
command.append("{}@{}".format(self.user, self.host))
else:
command.append(self.host)
# Then goes the rest of the command
command.append(cmd)
if args:
command.extend(args)
return command
def _run_cmd(self, cmd, *args, **kwargs):
"""
This implements running a command and not caring about
the output. If stdout or stderr are given, those will
be file-like objects that the output and error are written
to. If the command exists with a non-0 value, we raise an
exception.
"""
command = self._build_command(cmd, *args)
try:
CHECK_CALL(command, **kwargs)
except subprocess.CalledProcessError:
raise ZFSBackupError("`{}` failed".format(command))
def _remote_stream(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, but we want to write to or read
from it. We return a subprocess.Popen object, so the caller
needs to specify stdin=subprocess.PIPE, or stdout. Both can't be pipes.
This should only be called by _remote_write or remote_stream
"""
command = self._build_command(cmd, *args)
return POPEN(cmd[0], *cmd[1:], **kwargs)
def _remote_write(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, writing to it via stdin.
"""
# First remove stdin=, if it's there.
kwargs["stdin"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def _remote_read(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, reading its stdout.
"""
# First remove stdout=, if it's there.
kwargs["stdout"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def validate(self):
"""
Do a couple of validations.
"""
# See if we can connect to the remote host
with tempfile.TemporaryFile() as error_output:
try:
self._run_cmd("/usr/bin/true", stderr=error_output)
except ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError("Unable to connect to remote host: {}".format(error_output.read()))
# See if the target exists
with open("/dev/null", "w+") as devnull:
try:
self._run_cmd("/sbin/zfs", "list", "-H", self.target,
stdout=devnull, stderr=devnull, stdin=devnull)
except ZFSBackupError:
raise ZFSBackupError("Target {} does not exist on remote host".format(self.target))
return
def backup_handler(self, stream, **kwargs):
"""
Implement the replication.
This is not right yet: we need to decompress and decrypt and dewhatever else
and do it by creating a pipeline on the remote end.
"""
# First, we create the intervening dataset pats. See the base class' method.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = self._build_command("/sbin/zfs", "create", "-o", "readonly=on", full_path)
try:
CALL(command, stdout=devnull, stderr=devnull, stdin=devnull)
except:
pass
# Here's where we would have to go through the filters, if any, and undo them.
# But some of the possible filters aren't needed, so I need a way to indicate that.
# For now, I'll simply assume uncompressed, unencrypted, etc.
command = self._build_command("/sbin/zfs", "receive", "-d", "-F", self.target)
with tempfile.TemporaryFile() as error_output:
# See above
fobj = stream
try:
CHECK_CALL(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
@property
def target_snapshots(self):
if not self._target_snapshots:
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
command = self._build_command("/sbin/zfs", "list", "-H", "-p",
"-o", "name,creation", "-r",
"-d", "1", "-t", "snapshot", "-s",
"creation", target_path)
snapshots = []
try:
output = CHECK_OUTPUT(command).split("\n")
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
pass
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, recursive=False):
super(ZFSBackupCount, self).__init__(source, "", recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, recursive={})".format(self.__class__.__name__,
self.source,
self.recursive)
def validate(self):
return
def backup_handler(self, stream, **kwargs):
count = 0
mByte = 1024 * 1024
fobj = self._filter_backup(stream)
while True:
b = fobj.read(mByte)
if b:
count += len(b)
else:
break
self._count = count
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def main():
global debug
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument("--debug", dest='debug',
action='store_true', default=False,
help='Turn on debugging')
parser.add_argument("--verbose", dest='verbose', action='store_true',
default=False, help='Be verbose')
parser.add_argument('--recursive', '-R', dest='recursive',
type=bool,
default=False,
help='Recursively replicate')
parser.add_argument('--snapshot', '-S', dest='snapshot_name',
default=None,
help='Snapshot to replicate')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
parser.add_argument('--pigz', action='store_true',
dest='use_pigz', default=False,
help='Use pigz to compress')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
# ssh parser has a lot more options
ssh_parser = subparsers.add_parser("ssh",
help="Replicate to a remote ZFS server")
ssh_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
ssh_parser.add_argument('--host', '-H', dest='remote_host',
required=True,
help='Remote hostname')
ssh_parser.add_argument("--user", '-U', dest='remote_user',
help='Remote user (defaults to current user)')
args = parser.parse_args()
debug = args.debug
if debug:
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
print("Invalid snapshot name {}".format(args.snapshot_name), file=sys.stderr)
sys.exit(1)
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
elif args.subcommand == 'ssh':
backup = ZFSBackupSSH(dataset, args.destination, args.remote_host,
remote_user=args.remote_user,
recursive=args.recursive)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
if args.compressed:
backup.AddFilter(ZFSBackupFilterCompressed(pigz=args.use_pigz))
if args.verbose:
print("Starting backup of {}".format(dataset))
backup.backup(snapname=args.snapshot_name)
if args.verbose:
print("Done with backup");
if isinstance(backup, ZFSBackupCount):
print("{} bytes".format(backup.count))
if __name__ == "__main__":
main()
Fix up errors discovered during a test.
from __future__ import print_function
import os, sys
import subprocess
import time
import tempfile
import threading
debug = True
def _merge_snapshots(list1, list2):
"""
Given a list of snapshots, return a list of
common snapshots (sorted by creation time).
The return list is simply an array of names.
N.B.: Snapshots are assumed to be the same if
they have the same name!
"""
rv = []
if list2:
dict2 = dict((el["Name"], True) for el in list2)
for snapname in [x["Name"] for x in list1]:
if snapname in dict2:
rv.append(snapname)
else:
pass;
return rv
def CHECK_OUTPUT(*args, **kwargs):
if debug:
print("CHECK_OUTPUT({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_output(*args, **kwargs)
def CALL(*args, **kwargs):
if debug:
print("CALL({}, {})".format(args, kwargs, file=sys.stderr))
return subprocess.call(*args, **kwargs)
def CHECK_CALL(*args, **kwargs):
if debug:
print("CHECK_CALL({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.check_call(*args, **kwargs)
def POPEN(*args, **kwargs):
if debug:
print("POPEN({}, {})".format(args, kwargs), file=sys.stderr)
return subprocess.Popen(*args, **kwargs)
def _get_snapshots(ds):
"""
Return a list of snapshots for the given dataset.
This only works for local ZFS pools, obviously.
It relies on /sbin/zfs sorting, rather than sorting itself.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
ds]
if debug:
print("get_snapshots: {}".format(" ".join(command)), file=sys.stderr)
try:
output = CHECK_OUTPUT(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupError(ValueError):
pass
class ZFSBackupFilter(object):
"""
Base class for ZFS backup filters.
Filters have several properties, and
start_backup() and start_restore() methods.
The start_* methods take a source, which
should be a pipe. In general, the filters
should use a subprocess or thread, unless they
are the terminus of the pipeline. (Doing otherwise
risks deadlock.)
"""
def __init__(self):
pass
@property
def error_output(self):
return None
@error_output.setter
def error_output(self, e):
return
@property
def name(self):
return "Null Filter"
@property
def backup_command(self):
return []
@property
def restore_command(self):
return []
def start_backup(self, source):
"""
Start the filter when doing a backup.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run gzip.
"""
return source
def start_restore(self, source):
"""
Start the filter when doing a restore.
E.g., for a compression filter, this would
start the command (in a subprocess) to
run 'gzcat'.
"""
return source
class ZFSBackupFilterThread(ZFSBackupFilter, threading.Thread):
"""
Base class for a thread-based filter. Either it should be
subclassed (see ZFSBackupFilterCounter below), or it should
be called with a callable object as the "process=" parameter.
The process method may need to check ZFSBackupFilterThread.mode
to decide if it is backing up or restoring.
"""
def __init__(self, process=None, name="Thread Filter"):
super(ZFSBackupFilterThread, self).__init__()
threading.Thread.__init__(self)
(self.input_pipe, self.output_pipe) = os.pipe()
self._source = None
self._done = threading.Event()
self._done.clear()
self._process = process
if self._process is None:
self._name = "Null Thread Filter"
else:
self._name = name
@property
def backup_command(self):
return ["<thread>"]
@property
def restore_command(self):
return ["<thread>"]
@property
def input_pipe(self):
return self._input
@input_pipe.setter
def input_pipe(self, p):
self._input = p
@property
def output_pipe(self):
return self._output
@output_pipe.setter
def output_pipe(self, p):
self._output = p
@property
def source(self):
return self._source
@property
def mode(self):
return self._mode
def process(self, buf):
# Subclasses should do any processing here
if self._process:
return self._process(buf)
def run(self):
while True:
b = self.source.read(1024*1024)
if b:
os.write(self.output_pipe, self.process(b))
else:
break
self._done.set()
os.close(self.output_pipe)
def start_backup(self, source):
self._mode = "backup"
self._source = source
self._py_output = os.fdopen(self.input_pipe, "rb")
self.start()
return self._py_output
def start_restore(self, source):
self._mode = "restore"
self._source = source
rv = os.fdopen(self.input_pipe, "rb")
self.start()
return rv
class ZFSBackupFilterCommand(ZFSBackupFilter):
"""
Derived class for backup filters based on commands.
This adds a coupe properties, and starts the appropriate commands
in a Popen instance. The error parameter in the constructor is
used to indicate where stderr should go; by default, it goes to
/dev/null
If restore_command is None, then backup_command will be used.
"""
def __init__(self, backup_command=["/bin/cat"], restore_command=None,
error=None):
super(ZFSBackupFilterCommand, self).__init__()
self._backup_command=backup_command
self._restore_command=restore_command
self.error = error
@property
def backup_command(self):
return self._backup_command
@property
def restore_command(self):
return self._restore_command or self.backup_command
@property
def error_output(self):
return self._error_output
@error_output.setter
def error_output(self, where):
if self.error:
self.error.close()
self._error_output = where
def start_restore(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, setting source as stdin, and
subprocess.PIPE as stdout, and return popen.stdout.
If error is None, we open /dev/null for writig and
use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = POPEN(self.restore_command,
bufsize=1024 * 1024,
stdin=source,
stdout=subprocess.PIPE,
stderr=self.error)
return p.stdout
def start_backup(self, source):
"""
source is a file-like object, usually a pipe.
We run Popen, and setting source up as stdin,
and subprocess.PIPE as output, and return
popen.stdout.
If error is None, we open /dev/null for writing
and use that.
"""
if self.error is None:
self.error = open("/dev/null", "w+")
p = POPEN(self.backup_command,
bufsize=1024 * 1024,
stderr=self.error,
stdin=source,
stdout=subprocess.PIPE)
return p.stdout
class ZFSBackupFilterCompressed(ZFSBackupFilterCommand):
"""
A sample command filter, for compressing.
One optional parameter: pigz.
"""
def __init__(self, pigz=False):
if pigz:
backup_command = "/usr/local/bin/pigz"
restore_command = "/usr/local/bin/unpigz"
else:
backup_command = "/usr/bin/gzip"
restore_command = "/usr/bin/gunzip"
super(ZFSBackupFilterCompressed, self).__init__(backup_command=[backup_command],
restore_command=[restore_command])
class ZFSBackupFilterCounter(ZFSBackupFilterThread):
"""
A sample thread filter. All this does is count the
bytes that come in to be processed.
"""
def __init__(self, handler=None):
super(ZFSBackupFilterCounter, self).__init__()
self._count = 0
self.handler = handler
def name(self):
return "ZFS Count Filter"
def process(self, b):
self._count += len(b)
return b
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, h):
self._handler = h
@property
def count(self):
self._done.wait()
if self.handler and iscallable(self.handler):
self.handler(self._count)
return self._count
class ZFSBackup(object):
"""
Base class for doing ZFS backups.
Backups are done using snapshots -- zfs send is used -- not using files.
Every backup must have a source and a target, although subclasses
can change how they are interpreted. Backups can be recursive.
One ZFSBackup object should be created for each <source, target>, but
not for each snapshot. That is, you would use
backup = ZFSBackup("/tank/Media", "/backup/tank/Media", recursive=True)
<do backup>
backup = ZFSBackup("/tank/Documents", "/backup/tank/Documents")
<do backup>
instead of creating a ZFSBackup object for each snapshot.
In general, backups and restores are simply inverses of each other.
In order to perform backups, it is necesary to get a list of snapshots
on both the source and target. An empty list on the target will mean
a full backup is being done; an empty list on the source is a failure.
Backups can have filters applied to them. This is not used in the base
class (since it only implements ZFS->ZFS), but subclasses may wish to
add filters for compression, encryption, or accounting. Some sample
filter classes are provided.
Some notes on how replication works:
* source is the full path to the dataset. *Or* it can be the entire pool.
* target is the dataset to which the replication should go.
* If source is the full pool, then the target will have all of the files
at the root of the source pool.
* If source is NOT the full pool, then the target will end up with only the
dataset(s) being replicated -- but any intervening datasets will be created.
What this means:
* tank -> backup/tank means we end up with backup/tank as a copy of tank.
* tank/usr/home > backup/home means we end up with bakup/home/usr/home.
* When getting snapshots for the destination, we need to add the path for
source, *minus* the pool name.
* UNLESS we are replicating the full pool.
What *that* means:
* tank -> backup/tank means getting snapshots from backup/tank
* tanks/usr/home -> backup/home means getting snapshots from backup/home/usr/home
"""
def __init__(self, source, target, recursive=False):
"""
Parameters:
source - (str) a ZFS pool or dataset to be backed up.
target - (str) a ZFS dataset to be backed up.
recursive - (bool) Indicate whether the backup is to be recursive or not.
The only thing the base class does is run some validation tests
on the source and target.
"""
self.target = target
self.source = source
self.recursive = recursive
self._source_snapshots = None
self._target_snapshots = None
self._filters = []
self.validate()
@property
def target(self):
return self._dest
@target.setter
def target(self, t):
self._dest = t
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, b):
self._recursive = b
def AddFilter(self, filter):
"""
Add a filter. The filter is set up during the backup and
restore methods. The filter needs to be an instance of
ZFSFilter -- at least, it needs to have the start_backup and
start_restore methods.
"""
if not callable(getattr(filter, "start_backup", None)) and \
not callable(getattr(filter, "start_restore", None)):
raise ValueError("Incorrect type passed for filter")
self._filters.append(filter)
def _filter_backup(self, source, error=None):
# Private method, to stitch the backup filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_backup(input)
return input
def _filter_restore(self, source, error=None):
# Private method, to stitch the restore filters together.
input = source
for f in self._filters:
f.error_output = error
input = f.start_restore(input)
return input
def __repr__(self):
return "{}(source={}, target={})".format(self.__class__.__name__, self.source, self.target)
@property
def source_snapshots(self):
"""
Return a list of snapshots on the source. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
source (recursive requires that the same snapshot exist on the descendents,
or it doesn't get backed up).
We cache this so we don't have to keep doing a list.
"""
if not self._source_snapshots:
self._source_snapshots = _get_snapshots(self.source)
return self._source_snapshots
@property
def target_snapshots(self):
"""
Return a list of snapshots on the target. The return value is
an array of dictionaries; the dictionaries have, at minimum, two
elements:
Name -- (str) Snapshot name. The part that goes after the '@'
CreationTime -- (int) Time (in unix epoch seconds) the snapshot was created.
Even if the recursive is true, this _only_ lists the snapshots for the
target dataset.
We cache this so we dont have to keep doing a list.
"""
if not self._target_snapshots:
# See the long discussion above about snapshots.
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
self._target_snapshots = _get_snapshots(target_path)
return self._target_snapshots
def validate(self):
"""
Ensure the destination exists. Derived classes will want
to override this (probably).
"""
command = ["/sbin/zfs", "list", "-H", self.target]
try:
with open("/dev/null", "w") as devnull:
CHECK_CALL(command, stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError:
raise ZFSBackupError("Target {} does not exist".format(self.target))
if not self.source_snapshots:
# A source with no snapshots cannot be backed up
raise ZFSBackupError("Source {} does not have snapshots".format(self.source))
return
def backup_handler(self, stream, **kwargs):
"""
Method called to write the backup to the target. In the base class,
this simply creates the necessary datasets on the target, and then
creates a Popen subprocess for 'zfs recv' with the appropriate arguments,
and sets its stdin to stream.
Subclasses will probably want to replace this method.
"""
# First we create the intervening dataset paths. That is, the
# equivalent of 'mkdir -p ${target}/${source}'.
# We don't care if it fails.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = ["/sbin/zfs", "create", "-o", "readonly=on", full_path]
if debug:
print("Running command {}".format(" ".join(command)), file=sys.stderr)
try:
CALL(command, stdout=devnull, stderr=devnull)
except:
pass
# Now we just send the data to zfs recv.
# Do we need -p too?
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
fobj = stream
try:
CHECK_CALL(command, stdin=fobj,
stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def backup(self, snapname=None, force_full=False):
"""
Back up the source to the target.
If snapname is given, then that will be the snapshot used for the backup,
otherwise it will be the most recent snapshot. If snapname is given and
does not exist, an exception is raised.
By default, it will first find a list of snapshots in common with the
source and target, ordered chronologically (based on the source).
If force_full is True, then the snapshot chosen will be sent in its entirety,
rather than trying to find a common ancestor for an incremental snapshot.
This is the main driver of the backup process, and subclasses should be okay
with using it.
"""
# First, if snapname is given, let's make sure that it exists on the source.
if snapname:
# If snapname has the dataset in it, let's remove it
if '@' in snapname:
(_, snapname) = snapname.split("@")
snap_index = None
for indx, d in enumerate(self.source_snapshots):
if d["Name"] == snapname:
snap_index = indx
break
if snap_index is None:
raise ZFSBackupError("Specified snapshot {} does not exist".format(snapname))
# We want to remove everything in source_snapshots up to the given one
source_snapshots = self.source_snapshots[0:snap_index+1]
else:
source_snapshots = self.source_snapshots
last_snapshot = source_snapshots[-1]
if debug:
print("last_snapshot = {}".format(last_snapshot), file=sys.stderr)
last_common_snapshot = None
if force_full:
common_snapshots = []
else:
common_snapshots = _merge_snapshots(source_snapshots, self.target_snapshots)
# At this point, common_snapshots has a list of snapshot names on both.
# If there are no common snapshots, then we back up everything up to last_snapshot
if debug:
print("ZFSBackup: last_snapshot = {}, common_snapshots = {}".format(last_snapshot,
common_snapshots),
file=sys.stderr)
if last_snapshot["Name"] not in common_snapshots:
if debug:
print("We have to do some sends/receives", file=sys.stderr)
# We need to do incremental snapshots from the last common snapshot to
# last_snapshot.
if common_snapshots:
# Don't bother doing this if we have no snapshots in common
last_common_snapshot = common_snapshots[-1]
if debug:
print("Last common snapshot = {}".format(last_common_snapshot), file=sys.stderr)
for indx, snap in enumerate(source_snapshots):
if snap["Name"] == last_common_snapshot:
break
snapshot_list = source_snapshots[indx:]
else:
# Either it's been deleted on the remote end, or it's newer than the list.
# So we start at a full dump from last_snapshot
snapshot_list = [last_snapshot]
else:
snapshot_list = [last_snapshot]
# There are two approaches that could be done here.
# One is to do incremental sends for every snapshot; the other
# is simply to do a send -I. I'm choosing the latter.
# If we have a last common snapshot, we can do an incremental from it to
# the last snapshot; if we don't, we'll need to do a full send.
command = ["/sbin/zfs", "send"]
if self.recursive:
command.append("-R")
backup_dict = {}
if last_common_snapshot:
command.extend(["-I", "{}".format(last_common_snapshot)])
backup_dict["incremental"] = True
backup_dict["parent"] = last_common_snapshot
else:
backup_dict["incremental"] = False
backup_dict["CreationTime"] = last_snapshot["CreationTime"]
command.append("{}@{}".format(self.source, last_snapshot["Name"]))
if debug:
print(" ".join(command), file=sys.stderr)
with tempfile.TemporaryFile() as error_output:
with open("/dev/null", "w+") as devnull:
mByte = 1024 * 1024
send_proc = POPEN(command,
bufsize=mByte,
stdin=devnull,
stderr=error_output,
stdout=subprocess.PIPE)
self.backup_handler(send_proc.stdout, **backup_dict)
if send_proc.returncode:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
def replicate(self, source, snapname, previous=None, date=int(time.time())):
"""
Replicate from source. source must be an object that supports
read(). If date is not given, we will use the current time, so
it should really be set. The full snapshot name from the source
would be dataset@snapname. If previous is set, it indicates this
is an incremental snapshot.
The snapname, previous, and date parameters are for informational purposes only;
the base class doesn't use them, but derived classes may.
"""
destination = os.path.join(self.target, self.dataset)
command = ["/sbin/zfs", "receive", "-d", "-F", self.target]
with tempfile.TemporaryFile() as error_output:
# ZFS->ZFS replication doesn't use filters.
# fobj = self._filter(source, error=error_output)
fobj = source
try:
CHECK_CALL(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
name = "{}@{}".format(self.dataset, snapname)
error_output.seek(0)
if debug:
print("`{}` failed: {}".format(" ".join(command), error_output.read()),
file=sys.stderr)
raise ZFSBackupError("Could not replicate {} to target {}".format(name, self.target))
return
@property
def snapshots(self):
"""
Return an array of snapshots for the destination.
Each entry in the array is a dictonary with at least
two keys -- Name and CreationTime. CreationTime is
an integer (unix seconds). The array is sorted by
creation time (oldest first). If there are no snapshots,
an empty array is returned.
This would be better with libzfs.
"""
command = ["/sbin/zfs", "list", "-H", "-p", "-o", "name,creation",
"-r", "-d", "1", "-t", "snapshot", "-s", "creation",
self.target]
try:
output = subprocess.check_output(command).split("\n")
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
return []
snapshots = []
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
return snapshots
class ZFSBackupSSH(ZFSBackup):
"""
Replicate to a remote host using ssh.
This runs all of the commands the base class does, but via ssh
to another host.
When running a command on a remote host, we have the following
options:
1) We don't care about input or output, only the return value.
2) We stream to it, or from it.
(1) is mostly for validation -- ensure the target exists, and
we can connect to it.
For (2), we stream to it (writing to stdin), and don't care about
the output until after, for backup.
For (2), we stream _from_ it (reading from its stdout) when getting
a list of snapshots, and when doing a restore.
"""
def __init__(self, source, target, remote_host,
remote_user=None,
ssh_opts=[],
recursive=False):
self._user = remote_user
self._host = remote_host
self._ssh_opts = ssh_opts[:]
super(ZFSBackupSSH, self).__init__(source, target, recursive)
@property
def user(self):
return self._user
@property
def host(self):
return self._host
@property
def ssh_options(self):
return self._ssh_opts
def _build_command(self, cmd, *args):
# First set up ssh.
command = ["/usr/bin/ssh"]
if self.ssh_options:
command.extend(self.ssh_options)
if self.user:
command.append("{}@{}".format(self.user, self.host))
else:
command.append(self.host)
# Then goes the rest of the command
command.append(cmd)
if args:
command.extend(args)
return command
def _run_cmd(self, cmd, *args, **kwargs):
"""
This implements running a command and not caring about
the output. If stdout or stderr are given, those will
be file-like objects that the output and error are written
to. If the command exists with a non-0 value, we raise an
exception.
"""
command = self._build_command(cmd, *args)
try:
CHECK_CALL(command, **kwargs)
except subprocess.CalledProcessError:
raise ZFSBackupError("`{}` failed".format(command))
def _remote_stream(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, but we want to write to or read
from it. We return a subprocess.Popen object, so the caller
needs to specify stdin=subprocess.PIPE, or stdout. Both can't be pipes.
This should only be called by _remote_write or remote_stream
"""
command = self._build_command(cmd, *args)
return POPEN(cmd[0], *cmd[1:], **kwargs)
def _remote_write(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, writing to it via stdin.
"""
# First remove stdin=, if it's there.
kwargs["stdin"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def _remote_read(self, cmd, *args, **kwargs):
"""
Run a command on the remote host, reading its stdout.
"""
# First remove stdout=, if it's there.
kwargs["stdout"] = subprocess.PIPE
return self._remote_stream(cmd, *args, **kwargs)
def validate(self):
"""
Do a couple of validations.
"""
# See if we can connect to the remote host
with tempfile.TemporaryFile() as error_output:
try:
self._run_cmd("/usr/bin/true", stderr=error_output)
except ZFSBackupError:
error_output.seek(0)
raise ZFSBackupError("Unable to connect to remote host: {}".format(error_output.read()))
# See if the target exists
with open("/dev/null", "w+") as devnull:
try:
self._run_cmd("/sbin/zfs", "list", "-H", self.target,
stdout=devnull, stderr=devnull, stdin=devnull)
except ZFSBackupError:
raise ZFSBackupError("Target {} does not exist on remote host".format(self.target))
return
def backup_handler(self, stream, **kwargs):
"""
Implement the replication.
This is not right yet: we need to decompress and decrypt and dewhatever else
and do it by creating a pipeline on the remote end.
"""
# First, we create the intervening dataset pats. See the base class' method.
full_path = self.target
with open("/dev/null", "w+") as devnull:
for d in self.source.split("/")[1:]:
full_path = os.path.join(full_path, d)
command = self._build_command("/sbin/zfs", "create", "-o", "readonly=on", full_path)
try:
CALL(command, stdout=devnull, stderr=devnull, stdin=devnull)
except:
pass
# Here's where we would have to go through the filters, if any, and undo them.
# But some of the possible filters aren't needed, so I need a way to indicate that.
# For now, I'll simply assume uncompressed, unencrypted, etc.
command = self._build_command("/sbin/zfs", "receive", "-d", "-F", self.target)
with tempfile.TemporaryFile() as error_output:
# See above
fobj = stream
try:
CHECK_CALL(command, stdin=fobj, stderr=error_output)
except subprocess.CalledProcessError:
error_output.seek(0)
raise ZFSBackupError(error_output.read())
return
@property
def target_snapshots(self):
if not self._target_snapshots:
(src_pool, _, src_ds) = self.source.partition("/")
if src_ds:
target_path = "{}/{}".format(self.target, src_ds)
else:
target_path = "{}/{}".format(self.target, src_pool)
command = self._build_command("/sbin/zfs", "list", "-H", "-p",
"-o", "name,creation", "-r",
"-d", "1", "-t", "snapshot", "-s",
"creation", target_path)
snapshots = []
try:
output = CHECK_OUTPUT(command).split("\n")
for snapshot in output:
if not snapshot:
continue
(name, ctime) = snapshot.rstrip().split()
name = name.split('@')[1]
snapshots.append({"Name" : name, "CreationTime" : int(ctime) })
except subprocess.CalledProcessError:
# We'll assume this is because there are no snapshots
pass
return snapshots
class ZFSBackupCount(ZFSBackup):
def __init__(self, source, recursive=False):
super(ZFSBackupCount, self).__init__(source, "", recursive)
self._count = 0
def __repr__(self):
return "{}(source={}, recursive={})".format(self.__class__.__name__,
self.source,
self.recursive)
def validate(self):
return
def backup_handler(self, stream, **kwargs):
count = 0
mByte = 1024 * 1024
fobj = self._filter_backup(stream)
while True:
b = fobj.read(mByte)
if b:
count += len(b)
else:
break
self._count = count
@property
def target_snapshots(self):
return []
@property
def count(self):
return self._count
def main():
global debug
import argparse
def to_bool(s):
if s.lower() in ("yes", "1", "true", "t", "y"):
return True
return False
parser = argparse.ArgumentParser(description='ZFS snapshot replictor')
parser.register('type', 'bool', to_bool)
parser.add_argument("--debug", dest='debug',
action='store_true', default=False,
help='Turn on debugging')
parser.add_argument("--verbose", dest='verbose', action='store_true',
default=False, help='Be verbose')
parser.add_argument('--recursive', '-R', dest='recursive',
type=bool,
default=False,
help='Recursively replicate')
parser.add_argument('--snapshot', '-S', dest='snapshot_name',
default=None,
help='Snapshot to replicate')
parser.add_argument("--compressed", "-C", dest='compressed',
action='store_true', default=False,
help='Compress snapshots')
parser.add_argument('--pigz', action='store_true',
dest='use_pigz', default=False,
help='Use pigz to compress')
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
# We have a sub parser for each type of replication
# Currently just ZFS and Counter
zfs_parser = subparsers.add_parser('zfs',
help='Replicate to local ZFS dataset')
zfs_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
counter_parser = subparsers.add_parser('counter',
help='Count replication bytes')
# ssh parser has a lot more options
ssh_parser = subparsers.add_parser("ssh",
help="Replicate to a remote ZFS server")
ssh_parser.add_argument('--dest', '-D', dest='destination',
required=True,
help='Pool/dataset target for replication')
ssh_parser.add_argument('--host', '-H', dest='remote_host',
required=True,
help='Remote hostname')
ssh_parser.add_argument("--user", '-U', dest='remote_user',
help='Remote user (defaults to current user)')
args = parser.parse_args()
debug = args.debug
if debug:
print("args = {}".format(args), file=sys.stderr)
try:
(dataset, snapname) = args.snapshot_name.split('@')
except ValueError:
print("Invalid snapshot name {}".format(args.snapshot_name), file=sys.stderr)
dataset = args.snapshot_name
snapname = None
if args.subcommand is None:
print("No replication type method. Valid types are zfs, counter", file=sys.stderr)
sys.exit(1)
elif args.subcommand == 'counter':
backup = ZFSBackupCount(dataset, recursive=args.recursive)
elif args.subcommand == 'zfs':
backup = ZFSBackup(dataset, args.destination, recursive=args.recursive)
elif args.subcommand == 'ssh':
backup = ZFSBackupSSH(dataset, args.destination, args.remote_host,
remote_user=args.remote_user,
recursive=args.recursive)
else:
print("Unknown replicator {}".format(args.subcommand), file=sys.stderr)
sys.exit(1)
if args.compressed:
backup.AddFilter(ZFSBackupFilterCompressed(pigz=args.use_pigz))
if args.verbose:
print("Starting backup of {}".format(dataset))
backup.backup(snapname=snapname)
if args.verbose:
print("Done with backup");
if isinstance(backup, ZFSBackupCount):
print("{} bytes".format(backup.count))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""
Utilities for administering elasticsearch
These can be run locally when connected to the VPN
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections import namedtuple
import json
import sys
from elasticsearch import Elasticsearch
from elasticsearch.client import ClusterClient, NodesClient, CatClient, IndicesClient
def pprint(data):
print json.dumps(data, indent=4)
def confirm(msg):
if raw_input(msg + "\n(y/n)") != 'y':
sys.exit()
Node = namedtuple("Node", "name node_id docs settings")
def get_nodes_info(es):
nc = NodesClient(es)
stats = nc.stats(metric="indices", index_metric="docs")
info = nc.info()
return [
Node(
name=data['name'],
node_id=node_id,
docs=data['indices']['docs'],
settings=info['nodes'][node_id]['settings'],
)
for node_id, data in stats['nodes'].items()
]
def cluster_status(es):
cluster = ClusterClient(es)
print "\nCLUSTER HEALTH"
pprint(cluster.health())
print "\nPENDING TASKS"
pprint(cluster.pending_tasks())
print "\nNODES"
for node in get_nodes_info(es):
print node.name, node.docs
print "\nSHARD ALLOCATION"
cat = CatClient(es)
print cat.allocation(v=True)
def shard_status(es):
cat = CatClient(es)
print cat.shards(v=True)
def cluster_settings(es):
cluster = ClusterClient(es)
pprint(cluster.get_settings())
def index_settings(es):
indices = IndicesClient(es)
pprint(indices.get_settings(flat_settings=True))
def create_replica_shards(es):
# https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices-update-settings.html
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 1}, "_all"))
def cancel_replica_shards(es):
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 0}, "_all"))
def decommission_node(es):
cluster = ClusterClient(es)
print "The nodes are:"
nodes = get_nodes_info(es)
for node in nodes:
print node.name, node.docs
confirm("Are you sure you want to decommission a node?")
node_name = raw_input("Which one would you like to decommission?\nname:")
names = [node.name for node in nodes]
if node_name not in names:
print "You must enter one of {}".format(", ".join(names))
return
confirm("This will remove all shards from {}, okay?".format(node_name))
cmd = {"transient": {"cluster.routing.allocation.exclude._name": node_name}}
pprint(cluster.put_settings(cmd))
print "The node is now being decommissioned."
def force_zone_replicas(es):
cluster = ClusterClient(es)
print "NODE SETTINGS:"
for node in get_nodes_info(es):
pprint(node.settings)
zones = raw_input("\nEnter the zone names, separated by a comma\n")
confirm("Are you sure these zones exist?")
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": zones,
"cluster.routing.allocation.awareness.attributes": "zone"}}
print "This will add the following settings"
pprint(cmd)
confirm("Okay?")
pprint(cluster.put_settings(cmd))
print "Finished"
def clear_zone_replicas(es):
# There doesn't appear to be a proper way to unset settings
# https://github.com/elastic/elasticsearch/issues/6732
cluster = ClusterClient(es)
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": "",
"cluster.routing.allocation.awareness.attributes": ""}}
confirm("Remove the allocation awareness settings from the cluster?")
pprint(cluster.put_settings(cmd))
print "Cleared"
commands = {
'cluster_status': cluster_status,
'cluster_settings': cluster_settings,
'index_settings': index_settings,
'decommission_node': decommission_node,
'shard_status': shard_status,
'force_zone_replicas': force_zone_replicas,
'clear_zone_replicas': clear_zone_replicas,
}
def main():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('host_url')
parser.add_argument('command', choices=commands.keys())
args = parser.parse_args()
es = Elasticsearch([{'host': args.host_url, 'port': 9200}])
commands[args.command](es)
if __name__ == "__main__":
main()
Use better names for zone aware replication
[ci skip]
#!/usr/bin/env python
"""
Utilities for administering elasticsearch
These can be run locally when connected to the VPN
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections import namedtuple
import json
import sys
from elasticsearch import Elasticsearch
from elasticsearch.client import ClusterClient, NodesClient, CatClient, IndicesClient
def pprint(data):
print json.dumps(data, indent=4)
def confirm(msg):
if raw_input(msg + "\n(y/n)") != 'y':
sys.exit()
Node = namedtuple("Node", "name node_id docs settings")
def get_nodes_info(es):
nc = NodesClient(es)
stats = nc.stats(metric="indices", index_metric="docs")
info = nc.info()
return [
Node(
name=data['name'],
node_id=node_id,
docs=data['indices']['docs'],
settings=info['nodes'][node_id]['settings'],
)
for node_id, data in stats['nodes'].items()
]
def cluster_status(es):
cluster = ClusterClient(es)
print "\nCLUSTER HEALTH"
pprint(cluster.health())
print "\nPENDING TASKS"
pprint(cluster.pending_tasks())
print "\nNODES"
for node in get_nodes_info(es):
print node.name, node.docs
print "\nSHARD ALLOCATION"
cat = CatClient(es)
print cat.allocation(v=True)
def shard_status(es):
cat = CatClient(es)
print cat.shards(v=True)
def cluster_settings(es):
cluster = ClusterClient(es)
pprint(cluster.get_settings())
def index_settings(es):
indices = IndicesClient(es)
pprint(indices.get_settings(flat_settings=True))
def create_replica_shards(es):
# https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices-update-settings.html
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 1}, "_all"))
def cancel_replica_shards(es):
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 0}, "_all"))
def decommission_node(es):
cluster = ClusterClient(es)
print "The nodes are:"
nodes = get_nodes_info(es)
for node in nodes:
print node.name, node.docs
confirm("Are you sure you want to decommission a node?")
node_name = raw_input("Which one would you like to decommission?\nname:")
names = [node.name for node in nodes]
if node_name not in names:
print "You must enter one of {}".format(", ".join(names))
return
confirm("This will remove all shards from {}, okay?".format(node_name))
cmd = {"transient": {"cluster.routing.allocation.exclude._name": node_name}}
pprint(cluster.put_settings(cmd))
print "The node is now being decommissioned."
def force_zone_awareness(es):
cluster = ClusterClient(es)
print "NODE SETTINGS:"
for node in get_nodes_info(es):
pprint(node.settings)
zones = raw_input("\nEnter the zone names, separated by a comma\n")
confirm("Are you sure these zones exist?")
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": zones,
"cluster.routing.allocation.awareness.attributes": "zone"}}
print "This will add the following settings"
pprint(cmd)
confirm("Okay?")
pprint(cluster.put_settings(cmd))
print "Finished"
def clear_zone_awareness(es):
# There doesn't appear to be a proper way to unset settings
# https://github.com/elastic/elasticsearch/issues/6732
cluster = ClusterClient(es)
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": "",
"cluster.routing.allocation.awareness.attributes": ""}}
confirm("Remove the allocation awareness settings from the cluster?")
pprint(cluster.put_settings(cmd))
print "Cleared"
commands = {
'cluster_status': cluster_status,
'cluster_settings': cluster_settings,
'index_settings': index_settings,
'decommission_node': decommission_node,
'shard_status': shard_status,
'create_replica_shards': create_replica_shards,
'cancel_replica_shards': cancel_replica_shards,
'force_zone_awareness': force_zone_awareness,
'clear_zone_awareness': clear_zone_awareness,
}
def main():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('host_url')
parser.add_argument('command', choices=commands.keys())
args = parser.parse_args()
es = Elasticsearch([{'host': args.host_url, 'port': 9200}])
commands[args.command](es)
if __name__ == "__main__":
main()
|
import json
import os
import numpy
import sklearn
import pandas
from elephant.estimator import Estimator
def main(data_set_name):
with open(os.path.join('../specs', data_set_name + '.json')) as specs_file:
specs = json.load(specs_file)
data_set = pandas.read_csv(os.path.join('../data', specs['file']), sep=specs['separator'], engine=specs['engine'],
skiprows=specs['header_rows'])
print(data_set.head())
with open(os.path.join(os.path.dirname(__file__), 'neural-net.json')) as config_file:
config = json.load(config_file)
x = data_set.ix[:, :2].values
estimator = Estimator(config, x)
y = data_set.ix[:, 2].values.reshape(-1, 1)
if specs['scaling']:
y = sklearn.preprocessing.MaxAbsScaler().fit_transform(numpy.log(y))
print('testing_error =', estimator.estimate(y, config['batch_size'], specs['test_size'], specs['metric']))
if __name__ == '__main__':
recommendation_data = ['movie-lens-100k', 'movie-lens-1m', 'e-pinions', 'movie-tweeting', ]
graph_data = ['airport', 'collaboration', 'forum', ]
main('forum')
rename data directory to resources directory
import json
import os
import numpy
import sklearn
import pandas
from elephant.estimator import Estimator
def main(data_set_name):
with open(os.path.join('../specs', data_set_name + '.json')) as specs_file:
specs = json.load(specs_file)
data_set = pandas.read_csv(os.path.join('../resources', specs['file']), sep=specs['separator'], engine=specs['engine'],
skiprows=specs['header_rows'])
print(data_set.head())
with open(os.path.join(os.path.dirname(__file__), 'neural-net.json')) as config_file:
config = json.load(config_file)
x = data_set.ix[:, :2].values
estimator = Estimator(config, x)
y = data_set.ix[:, 2].values.reshape(-1, 1)
if specs['scaling']:
y = sklearn.preprocessing.MaxAbsScaler().fit_transform(numpy.log(y))
print('testing_error =', estimator.estimate(y, config['batch_size'], specs['test_size'], specs['metric']))
if __name__ == '__main__':
recommendation_data = ['movie-lens-100k', 'movie-lens-1m', 'e-pinions', 'movie-tweeting', ]
graph_data = ['airport', 'collaboration', 'forum', ]
main('forum')
|
from __future__ import print_function
try:
# python 2
from Queue import Empty
except:
# python 3
from queue import Empty
import platform
from time import sleep
import logging
import os
try:
# IPython 3
from IPython.nbformat import NotebookNode
except ImportError:
from IPython.nbformat.current import NotebookNode
from IPython.kernel import KernelManager
class NotebookError(Exception):
pass
class NotebookRunner(object):
# The kernel communicates with mime-types while the notebook
# uses short labels for different cell types. We'll use this to
# map from kernel types to notebook format types.
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
def __init__(
self,
nb,
pylab=False,
mpl_inline=False,
profile_dir=None,
working_dir=None):
self.km = KernelManager()
args = []
if pylab:
args.append('--pylab=inline')
logging.warn(
'--pylab is deprecated and will be removed in a future version'
)
elif mpl_inline:
args.append('--matplotlib=inline')
logging.warn(
'--matplotlib is deprecated and' +
' will be removed in a future version'
)
if profile_dir:
args.append('--profile-dir=%s' % os.path.abspath(profile_dir))
cwd = os.getcwd()
if working_dir:
os.chdir(working_dir)
self.km.start_kernel(extra_arguments=args)
os.chdir(cwd)
if platform.system() == 'Darwin':
# There is sometimes a race condition where the first
# execute command hits the kernel before it's ready.
# It appears to happen only on Darwin (Mac OS) and an
# easy (but clumsy) way to mitigate it is to sleep
# for a second.
sleep(1)
self.kc = self.km.client()
self.kc.start_channels()
try:
self.kc.wait_for_ready()
except AttributeError:
# IPython < 3
self._wait_for_ready_backport()
self.nb = nb
def shutdown_kernel(self):
logging.info('Shutdown kernel')
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
def _wait_for_ready_backport(self):
"""Backport BlockingKernelClient.wait_for_ready from IPython 3"""
# Wait for kernel info reply on shell channel
self.kc.kernel_info()
while True:
msg = self.kc.get_shell_msg(block=True, timeout=30)
if msg['msg_type'] == 'kernel_info_reply':
break
# Flush IOPub channel
while True:
try:
msg = self.kc.get_iopub_msg(block=True, timeout=0.2)
except Empty:
break
def run_cell(self, cell):
"""
Run a notebook cell and update the output of that cell in-place.
"""
logging.info('Running cell:\n%s\n', cell.input)
self.kc.execute(cell.input)
reply = self.kc.get_shell_msg()
status = reply['content']['status']
traceback_text = ''
if status == 'error':
traceback_text = 'Cell raised uncaught exception: \n' + \
'\n'.join(reply['content']['traceback'])
logging.info(traceback_text)
else:
logging.info('Cell returned')
outs = list()
while True:
try:
msg = self.kc.get_iopub_msg(timeout=1)
if msg['msg_type'] == 'status':
if msg['content']['execution_state'] == 'idle':
break
except Empty:
# execution state should return to idle
# before the queue becomes empty,
# if it doesn't, something bad has happened
raise
content = msg['content']
msg_type = msg['msg_type']
# IPython 3.0.0-dev writes pyerr/pyout in the notebook format
# but uses error/execute_result in the message spec. This does the
# translation needed for tests to pass with IPython 3.0.0-dev
notebook3_format_conversions = {
'error': 'pyerr',
'execute_result': 'pyout'
}
msg_type = notebook3_format_conversions.get(msg_type, msg_type)
out = NotebookNode(output_type=msg_type)
if 'execution_count' in content:
cell['prompt_number'] = content['execution_count']
out.prompt_number = content['execution_count']
if msg_type in ('status', 'pyin', 'execute_input'):
continue
elif msg_type == 'stream':
out.stream = content['name']
# in msgspec 5, this is name, text
# in msgspec 4, this is name, data
if 'text' in content:
out.text = content['text']
else:
out.text = content['data']
#print(out.text, end='')
elif msg_type in ('display_data', 'pyout'):
for mime, data in content['data'].items():
try:
attr = self.MIME_MAP[mime]
except KeyError:
raise NotImplementedError(
'unhandled mime type: %s' % mime
)
setattr(out, attr, data)
#print(data, end='')
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
#logging.error('\n'.join(content['traceback']))
elif msg_type == 'clear_output':
outs = list()
continue
else:
raise NotImplementedError(
'unhandled iopub message: %s' % msg_type
)
outs.append(out)
cell['outputs'] = outs
if status == 'error':
raise NotebookError(traceback_text)
def iter_code_cells(self):
"""
Iterate over the notebook cells containing code.
"""
for ws in self.nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
yield cell
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the cells of a notebook in order and update
the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i)
def count_code_cells(self):
"""
Return the number of code cells in the notebook
"""
return sum(1 for _ in self.iter_code_cells())
runipy/notebook_runner.py: Put one line docstring on one line. (pep257: D200)
from __future__ import print_function
try:
# python 2
from Queue import Empty
except:
# python 3
from queue import Empty
import platform
from time import sleep
import logging
import os
try:
# IPython 3
from IPython.nbformat import NotebookNode
except ImportError:
from IPython.nbformat.current import NotebookNode
from IPython.kernel import KernelManager
class NotebookError(Exception):
pass
class NotebookRunner(object):
# The kernel communicates with mime-types while the notebook
# uses short labels for different cell types. We'll use this to
# map from kernel types to notebook format types.
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
def __init__(
self,
nb,
pylab=False,
mpl_inline=False,
profile_dir=None,
working_dir=None):
self.km = KernelManager()
args = []
if pylab:
args.append('--pylab=inline')
logging.warn(
'--pylab is deprecated and will be removed in a future version'
)
elif mpl_inline:
args.append('--matplotlib=inline')
logging.warn(
'--matplotlib is deprecated and' +
' will be removed in a future version'
)
if profile_dir:
args.append('--profile-dir=%s' % os.path.abspath(profile_dir))
cwd = os.getcwd()
if working_dir:
os.chdir(working_dir)
self.km.start_kernel(extra_arguments=args)
os.chdir(cwd)
if platform.system() == 'Darwin':
# There is sometimes a race condition where the first
# execute command hits the kernel before it's ready.
# It appears to happen only on Darwin (Mac OS) and an
# easy (but clumsy) way to mitigate it is to sleep
# for a second.
sleep(1)
self.kc = self.km.client()
self.kc.start_channels()
try:
self.kc.wait_for_ready()
except AttributeError:
# IPython < 3
self._wait_for_ready_backport()
self.nb = nb
def shutdown_kernel(self):
logging.info('Shutdown kernel')
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
def _wait_for_ready_backport(self):
"""Backport BlockingKernelClient.wait_for_ready from IPython 3"""
# Wait for kernel info reply on shell channel
self.kc.kernel_info()
while True:
msg = self.kc.get_shell_msg(block=True, timeout=30)
if msg['msg_type'] == 'kernel_info_reply':
break
# Flush IOPub channel
while True:
try:
msg = self.kc.get_iopub_msg(block=True, timeout=0.2)
except Empty:
break
def run_cell(self, cell):
"""Run a notebook cell and update the output of that cell in-place."""
logging.info('Running cell:\n%s\n', cell.input)
self.kc.execute(cell.input)
reply = self.kc.get_shell_msg()
status = reply['content']['status']
traceback_text = ''
if status == 'error':
traceback_text = 'Cell raised uncaught exception: \n' + \
'\n'.join(reply['content']['traceback'])
logging.info(traceback_text)
else:
logging.info('Cell returned')
outs = list()
while True:
try:
msg = self.kc.get_iopub_msg(timeout=1)
if msg['msg_type'] == 'status':
if msg['content']['execution_state'] == 'idle':
break
except Empty:
# execution state should return to idle
# before the queue becomes empty,
# if it doesn't, something bad has happened
raise
content = msg['content']
msg_type = msg['msg_type']
# IPython 3.0.0-dev writes pyerr/pyout in the notebook format
# but uses error/execute_result in the message spec. This does the
# translation needed for tests to pass with IPython 3.0.0-dev
notebook3_format_conversions = {
'error': 'pyerr',
'execute_result': 'pyout'
}
msg_type = notebook3_format_conversions.get(msg_type, msg_type)
out = NotebookNode(output_type=msg_type)
if 'execution_count' in content:
cell['prompt_number'] = content['execution_count']
out.prompt_number = content['execution_count']
if msg_type in ('status', 'pyin', 'execute_input'):
continue
elif msg_type == 'stream':
out.stream = content['name']
# in msgspec 5, this is name, text
# in msgspec 4, this is name, data
if 'text' in content:
out.text = content['text']
else:
out.text = content['data']
#print(out.text, end='')
elif msg_type in ('display_data', 'pyout'):
for mime, data in content['data'].items():
try:
attr = self.MIME_MAP[mime]
except KeyError:
raise NotImplementedError(
'unhandled mime type: %s' % mime
)
setattr(out, attr, data)
#print(data, end='')
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
#logging.error('\n'.join(content['traceback']))
elif msg_type == 'clear_output':
outs = list()
continue
else:
raise NotImplementedError(
'unhandled iopub message: %s' % msg_type
)
outs.append(out)
cell['outputs'] = outs
if status == 'error':
raise NotebookError(traceback_text)
def iter_code_cells(self):
"""
Iterate over the notebook cells containing code.
"""
for ws in self.nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
yield cell
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the cells of a notebook in order and update
the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i)
def count_code_cells(self):
"""
Return the number of code cells in the notebook
"""
return sum(1 for _ in self.iter_code_cells())
|
#!/usr/bin/env python
# encoding: utf-8
from rpcudp.rpcserver import RPCServer, rpccall, rpccall_n
from utils import period_task
import hashlib
KBUCKET_SIZE = 20
TREE_HEIGHT = 160
ALPHA = 3
"""
implement rpc call in kademlia
"""
class KademliaRpc(RPCServer):
@rpccall
def ping(self, dest):
pass
@rpccall_n(timeout=1)
def store(self, dest, keypair):
pass
@rpccall_n(timeout=1)
def findnode(self, dest, key, node):
pass
@rpccall_n(timeout=1)
def findvalue(self, dest, key):
pass
class KServer(KademliaRpc):
def __init__(self, addr, peer=None):
super(KademliaRpc, self).__init__(DEBUG=True)
self.addr = addr[1]
self.id = int(addr[0], 16)
self.kbucket = [[]] * (TREE_HEIGHT + 1)
self.initserver(peer)
self.report_kbucket()
self.check_tree()
def dict(self):
return {"id": str(self.id), "address": self.addr}
@period_task(period=10)
def report_kbucket(self):
i = 0
for k in self.kbucket:
for n in k:
print("%d--%d--%s--%s" % (self.id, i, n['id'], n['address'][0]))
i = i + 1
@period_task(period=100)
def check_tree(self):
res = self.ping([n['address'] for b in self.kbucket for n in b])
for r,d in res:
if not r:
self.delnode(d)
def serve(self):
self.run(self.addr)
def initserver(self, peer):
self.addnode(self.dict())
if peer:
nodes = self.rpc_findnode(self.id,
{"id": str(int(peer[0],16)), "address": peer[1]})
self.nodelookup(self.id, nodes)
def findclosestk(self, key):
"""return the index of closest kbucket"""
distance = self.id ^ key
#find the first big i
for i in range(0, TREE_HEIGHT+1):
if distance < pow(2, i):
return i
def addnode(self, node):
k = self.findclosestk(int(node['id']))
if len(self.kbucket[k]) < KBUCKET_SIZE:
#check if already exist the node
for n in self.kbucket[k]:
if n['id'] == node['id']:
return
if self.kbucket[k] == []:
self.kbucket[k] = [node]
else:
self.kbucket[k].append(node)
def delnode(self, addr):
for b in self.kbucket:
for n in b:
if n['address'] == addr:
b.remove(n)
return
def rpc_ping(self):
return "PONG"
def rpc_findnode(self, key, node):
#add node to kbucket
self.addnode(node)
res = []
i = j = self.findclosestk(int(key))
res.extend(self.kbucket[i])
while len(res) < KBUCKET_SIZE:
i = i - 1
j = j + 1
if i >= 0:
res.extend(self.kbucket[i])
if j <= TREE_HEIGHT:
res.extend(self.kbucket[j])
if i < 0 and j > TREE_HEIGHT:
break
return res[:KBUCKET_SIZE]
def rpc_findvalue(self, key):
pass
def rpc_store(self, key, value):
pass
def nodelookup(self, key, nodes, checkednodes=[]):
newnode = []
checkednodes.extend(nodes)
res = self.findnode([x['address'] for x in nodes], str(key), self.dict())
newnode.extend([ n for r,d in res if r for n in r if n not in checkednodes])
newnode.sort(key=lambda node : int(node['id']) ^ key)
for node in newnode:
self.addnode(node)
if len(newnode) == 0:
return nodes
else:
self.nodelookup(key, newnode[:KBUCKET_SIZE], checkednodes)
fix bug
#!/usr/bin/env python
# encoding: utf-8
from rpcudp.rpcserver import RPCServer, rpccall, rpccall_n
from utils import period_task
import hashlib
KBUCKET_SIZE = 20
TREE_HEIGHT = 160
ALPHA = 3
"""
implement rpc call in kademlia
"""
class KademliaRpc(RPCServer):
@rpccall_n
def ping(self, dest):
pass
@rpccall_n(timeout=1)
def store(self, dest, keypair):
pass
@rpccall_n(timeout=1)
def findnode(self, dest, key, node):
pass
@rpccall_n(timeout=1)
def findvalue(self, dest, key):
pass
class KServer(KademliaRpc):
def __init__(self, addr, peer=None):
super(KademliaRpc, self).__init__(DEBUG=True)
self.addr = addr[1]
self.id = int(addr[0], 16)
self.kbucket = [[]] * (TREE_HEIGHT + 1)
self.initserver(peer)
self.report_kbucket()
self.check_tree()
def dict(self):
return {"id": str(self.id), "address": self.addr}
@period_task(period=10)
def report_kbucket(self):
i = 0
for k in self.kbucket:
for n in k:
print("%d--%d--%s--%s" % (self.id, i, n['id'], n['address'][0]))
i = i + 1
@period_task(period=100)
def check_tree(self):
res = self.ping([n['address'] for b in self.kbucket for n in b])
for r,d in res:
if not r:
self.delnode(d)
def serve(self):
self.run(self.addr)
def initserver(self, peer):
self.addnode(self.dict())
if peer:
nodes = self.rpc_findnode(self.id,
{"id": str(int(peer[0],16)), "address": peer[1]})
self.nodelookup(self.id, nodes)
def findclosestk(self, key):
"""return the index of closest kbucket"""
distance = self.id ^ key
#find the first big i
for i in range(0, TREE_HEIGHT+1):
if distance < pow(2, i):
return i
def addnode(self, node):
k = self.findclosestk(int(node['id']))
if len(self.kbucket[k]) < KBUCKET_SIZE:
#check if already exist the node
for n in self.kbucket[k]:
if n['id'] == node['id']:
return
if self.kbucket[k] == []:
self.kbucket[k] = [node]
else:
self.kbucket[k].append(node)
def delnode(self, addr):
for b in self.kbucket:
for n in b:
if n['address'] == addr:
b.remove(n)
return
def rpc_ping(self):
return "PONG"
def rpc_findnode(self, key, node):
#add node to kbucket
self.addnode(node)
res = []
i = j = self.findclosestk(int(key))
res.extend(self.kbucket[i])
while len(res) < KBUCKET_SIZE:
i = i - 1
j = j + 1
if i >= 0:
res.extend(self.kbucket[i])
if j <= TREE_HEIGHT:
res.extend(self.kbucket[j])
if i < 0 and j > TREE_HEIGHT:
break
return res[:KBUCKET_SIZE]
def rpc_findvalue(self, key):
pass
def rpc_store(self, key, value):
pass
def nodelookup(self, key, nodes, checkednodes=[]):
newnode = []
checkednodes.extend(nodes)
res = self.findnode([x['address'] for x in nodes], str(key), self.dict())
newnode.extend([ n for r,d in res if r for n in r if n not in checkednodes])
newnode.sort(key=lambda node : int(node['id']) ^ key)
for node in newnode:
self.addnode(node)
if len(newnode) == 0:
return nodes
else:
self.nodelookup(key, newnode[:KBUCKET_SIZE], checkednodes)
|
from pyspark import SparkContext
import json
import time
print 'loading'
sc = SparkContext("spark://ec2-54-200-174-121.us-west-2.compute.amazonaws.com:7077", "Simple App")
# Replace `lay-k.json` with `*.json` to get a whole lot more data.
lay = sc.textFile('s3n://AKIAJFDTPC4XX2LVETGA:lJPMR8IqPw2rsVKmsSgniUd+cLhpItI42Z6DCFku@6885public/enron/lay-k.json')
json_lay = lay.map(lambda x: json.loads(x)).cache()
print 'json lay count', json_lay.count()
filtered_lay = json_lay.filter(lambda x: 'chairman' in x['text'].lower())
print 'lay filtered to chairman', filtered_lay.count()
to_list = json_lay.flatMap(lambda x: x['to'])
print 'to_list', to_list.count()
#grab senders
senders = json_lay.flatMap(lambda x: x['sender'])
collected = senders.collect().sort()
print 'sender_list', collected[0], collected[1], collected[2]
#terms = json_lay.filter(lambda x: _ in x['text'].lower())
#flat = terms.flatMap(lambda x: x)
counted_values = to_list.countByValue()
# Uncomment the next line to see a dictionary of every `to` mapped to
# the number of times it appeared.
#print 'counted_values', counted_values
# How to use a join to combine two datasets.
frequencies = sc.parallelize([('a', 2), ('the', 3)])
inverted_index = sc.parallelize([('a', ('doc1', 5)), ('the', ('doc1', 6)), ('cats', ('doc2', 1)), ('the', ('doc2', 2))])
# See also rightOuterJoin and leftOuterJoin.
join_result = frequencies.join(inverted_index)
# If you don't want to produce something as confusing as the next
# line's [1][1][0] nonesense, represent your data as dictionaries with
# named fields :).
multiplied_frequencies = join_result.map(lambda x: (x[0], x[1][1][0], x[1][0]*x[1][1][1]))
print 'term-document weighted frequencies', multiplied_frequencies.collect()
try to fix sender list
from pyspark import SparkContext
import json
import time
print 'loading'
sc = SparkContext("spark://ec2-54-200-174-121.us-west-2.compute.amazonaws.com:7077", "Simple App")
# Replace `lay-k.json` with `*.json` to get a whole lot more data.
lay = sc.textFile('s3n://AKIAJFDTPC4XX2LVETGA:lJPMR8IqPw2rsVKmsSgniUd+cLhpItI42Z6DCFku@6885public/enron/lay-k.json')
json_lay = lay.map(lambda x: json.loads(x)).cache()
print 'json lay count', json_lay.count()
filtered_lay = json_lay.filter(lambda x: 'chairman' in x['text'].lower())
print 'lay filtered to chairman', filtered_lay.count()
to_list = json_lay.flatMap(lambda x: x['to'])
print 'to_list', to_list.count()
#grab senders
senders = json_lay.flatMap(lambda x: x['sender'])
collected = senders.collect()
print 'sender_list', collected[0], collected[1], collected[2]
#terms = json_lay.filter(lambda x: _ in x['text'].lower())
#flat = terms.flatMap(lambda x: x)
counted_values = to_list.countByValue()
# Uncomment the next line to see a dictionary of every `to` mapped to
# the number of times it appeared.
#print 'counted_values', counted_values
# How to use a join to combine two datasets.
frequencies = sc.parallelize([('a', 2), ('the', 3)])
inverted_index = sc.parallelize([('a', ('doc1', 5)), ('the', ('doc1', 6)), ('cats', ('doc2', 1)), ('the', ('doc2', 2))])
# See also rightOuterJoin and leftOuterJoin.
join_result = frequencies.join(inverted_index)
# If you don't want to produce something as confusing as the next
# line's [1][1][0] nonesense, represent your data as dictionaries with
# named fields :).
multiplied_frequencies = join_result.map(lambda x: (x[0], x[1][1][0], x[1][0]*x[1][1][1]))
print 'term-document weighted frequencies', multiplied_frequencies.collect() |
import argparse
import os
import requests
import sys
import time
API_URL = "https://api.assemblyai.com/v2/"
def get_transcription(transcription_id):
"""Requests the transcription from the API and returns the JSON
response."""
endpoint = "".join([API_URL, "transcript/{}".format(transcription_id)])
headers = {"authorization": os.getenv('ASSEMBLYAI_KEY')}
response = requests.get(endpoint, headers=headers)
return response.json()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("transcription_id")
args = parser.parse_args()
transcription_id = args.transcription_id
response_json = get_transcription(transcription_id)
if response_json['status'] == "completed":
for word in response_json['words']:
print(word['text'], end=" ")
else:
print("current status of transcription request: {}".format(
response_json['status']))
tweak get transcription script
import argparse
import os
import requests
API_URL = "https://api.assemblyai.com/v2/"
def get_transcription(transcription_id):
"""Requests the transcription from the API and returns the JSON
response."""
endpoint = "".join([API_URL, "transcript/{}".format(transcription_id)])
headers = {"authorization": os.getenv('ASSEMBLYAI_KEY')}
response = requests.get(endpoint, headers=headers)
return response.json()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("transcription_id")
args = parser.parse_args()
transcription_id = args.transcription_id
response_json = get_transcription(transcription_id)
if response_json['status'] == "completed":
for word in response_json['words']:
print(word['text'], end=" ")
else:
print("current status of transcription request: {}".format(
response_json['status']))
|
from app import app
from flask import Flask, request, jsonify
import kairos
DEFAULT_GALLERY = 'default_gallery'
# App Logic
@app.route('/', methods=['GET'])
def index():
return 'yo'
@app.route('/upload', methods=['POST'])
def upload(name):
img_url = request.data['img_url']
name = request.data['name']
success = kairos.add_face_url(img_url, name, DEFAULT_GALLERY)
return jsonify({'success': success})
@app.route('/verify', methods=['GET'])
def verify():
img_url = request.args.get('img_url')
name = kairos.identify_face_url(img_url, DEFAULT_GALLERY)
allowed = name is not None
# TODO: open the door.
return jsonify({'allowed': allowed,
'name': name})
remove unneccessary arg
from app import app
from flask import Flask, request, jsonify
import kairos
DEFAULT_GALLERY = 'default_gallery'
# App Logic
@app.route('/', methods=['GET'])
def index():
return 'yo'
@app.route('/upload', methods=['POST'])
def upload():
img_url = request.data['img_url']
name = request.data['name']
success = kairos.add_face_url(img_url, name, DEFAULT_GALLERY)
return jsonify({'success': success})
@app.route('/verify', methods=['GET'])
def verify():
img_url = request.args.get('img_url')
name = kairos.identify_face_url(img_url, DEFAULT_GALLERY)
allowed = name is not None
# TODO: open the door.
return jsonify({'allowed': allowed,
'name': name})
|
# coding: utf-8
import hashlib
import json
import socket
import time
import uuid
from datetime import datetime, timedelta as td, timezone
from cronsim import CronSim
from django.conf import settings
from django.core.mail import mail_admins
from django.core.signing import TimestampSigner
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.text import slugify
from hc.accounts.models import Project
from hc.api import transports
from hc.lib import emails
from hc.lib.date import month_boundaries
from hc.lib.s3 import get_object, put_object, remove_objects
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
STATUSES = (("up", "Up"), ("down", "Down"), ("new", "New"), ("paused", "Paused"))
DEFAULT_TIMEOUT = td(days=1)
DEFAULT_GRACE = td(hours=1)
NEVER = datetime(3000, 1, 1, tzinfo=timezone.utc)
CHECK_KINDS = (("simple", "Simple"), ("cron", "Cron"))
# max time between start and ping where we will consider both events related:
MAX_DELTA = td(hours=24)
CHANNEL_KINDS = (
("email", "Email"),
("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"),
("pd", "PagerDuty"),
("pagertree", "PagerTree"),
("pagerteam", "Pager Team"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "Opsgenie"),
("victorops", "Splunk On-Call"),
("discord", "Discord"),
("telegram", "Telegram"),
("sms", "SMS"),
("zendesk", "Zendesk"),
("trello", "Trello"),
("matrix", "Matrix"),
("whatsapp", "WhatsApp"),
("apprise", "Apprise"),
("mattermost", "Mattermost"),
("msteams", "Microsoft Teams"),
("shell", "Shell Command"),
("zulip", "Zulip"),
("spike", "Spike"),
("call", "Phone Call"),
("linenotify", "LINE Notify"),
("signal", "Signal"),
)
PO_PRIORITIES = {-2: "lowest", -1: "low", 0: "normal", 1: "high", 2: "emergency"}
def isostring(dt):
"""Convert the datetime to ISO 8601 format with no microseconds. """
if dt:
return dt.replace(microsecond=0).isoformat()
class Check(models.Model):
name = models.CharField(max_length=100, blank=True)
slug = models.CharField(max_length=100, blank=True)
tags = models.CharField(max_length=500, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
desc = models.TextField(blank=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=10, default="simple", choices=CHECK_KINDS)
timeout = models.DurationField(default=DEFAULT_TIMEOUT)
grace = models.DurationField(default=DEFAULT_GRACE)
schedule = models.CharField(max_length=100, default="* * * * *")
tz = models.CharField(max_length=36, default="UTC")
subject = models.CharField(max_length=200, blank=True)
subject_fail = models.CharField(max_length=200, blank=True)
methods = models.CharField(max_length=30, blank=True)
manual_resume = models.BooleanField(default=False)
n_pings = models.IntegerField(default=0)
last_ping = models.DateTimeField(null=True, blank=True)
last_start = models.DateTimeField(null=True, blank=True)
last_duration = models.DurationField(null=True, blank=True)
last_ping_was_fail = models.BooleanField(default=False)
has_confirmation_link = models.BooleanField(default=False)
alert_after = models.DateTimeField(null=True, blank=True, editable=False)
status = models.CharField(max_length=6, choices=STATUSES, default="new")
class Meta:
indexes = [
# Index for the alert_after field. Excludes rows with status=down.
# Used in the sendalerts management command.
models.Index(
fields=["alert_after"],
name="api_check_aa_not_down",
condition=~models.Q(status="down"),
),
models.Index(fields=["project_id", "slug"], name="api_check_project_slug"),
]
def __str__(self):
return "%s (%d)" % (self.name or self.code, self.id)
def name_then_code(self):
if self.name:
return self.name
return str(self.code)
def url(self):
""" Return check's ping url in user's preferred style.
Note: this method reads self.project. If project is not loaded already,
this causes a SQL query.
"""
if self.project_id and self.project.show_slugs:
if not self.slug:
return None
# If ping_key is not set, use dummy placeholder
key = self.project.ping_key or "{ping_key}"
return settings.PING_ENDPOINT + key + "/" + self.slug
return settings.PING_ENDPOINT + str(self.code)
def details_url(self):
return settings.SITE_ROOT + reverse("hc-details", args=[self.code])
def cloaked_url(self):
return settings.SITE_ROOT + reverse("hc-uncloak", args=[self.unique_key])
def email(self):
return "%s@%s" % (self.code, settings.PING_EMAIL_DOMAIN)
def clamped_last_duration(self):
if self.last_duration and self.last_duration < MAX_DELTA:
return self.last_duration
def set_name_slug(self, name):
self.name = name
self.slug = slugify(name)
def get_grace_start(self, with_started=True):
""" Return the datetime when the grace period starts.
If the check is currently new, paused or down, return None.
"""
# NEVER is a constant sentinel value (year 3000).
# Using None instead would make the min() logic clunky.
result = NEVER
if self.kind == "simple" and self.status == "up":
result = self.last_ping + self.timeout
elif self.kind == "cron" and self.status == "up":
# The complex case, next ping is expected based on cron schedule.
# Don't convert to naive datetimes (and so avoid ambiguities around
# DST transitions). cronsim will handle the timezone-aware datetimes.
last_local = self.last_ping.astimezone(ZoneInfo(self.tz))
result = next(CronSim(self.schedule, last_local))
if with_started and self.last_start and self.status != "down":
result = min(result, self.last_start)
if result != NEVER:
return result
def going_down_after(self):
""" Return the datetime when the check goes down.
If the check is new or paused, and not currently running, return None.
If the check is already down, also return None.
"""
grace_start = self.get_grace_start()
if grace_start is not None:
return grace_start + self.grace
def get_status(self, with_started=False):
""" Return current status for display. """
frozen_now = now()
if self.last_start:
if frozen_now >= self.last_start + self.grace:
return "down"
elif with_started:
return "started"
if self.status in ("new", "paused", "down"):
return self.status
grace_start = self.get_grace_start(with_started=with_started)
grace_end = grace_start + self.grace
if frozen_now >= grace_end:
return "down"
if frozen_now >= grace_start:
return "grace"
return "up"
def assign_all_channels(self):
channels = Channel.objects.filter(project=self.project)
self.channel_set.set(channels)
def tags_list(self):
return [t.strip() for t in self.tags.split(" ") if t.strip()]
def matches_tag_set(self, tag_set):
return tag_set.issubset(self.tags_list())
def channels_str(self):
""" Return a comma-separated string of assigned channel codes. """
# self.channel_set may already be prefetched.
# Sort in python to make sure we do't run additional queries
codes = [str(channel.code) for channel in self.channel_set.all()]
return ",".join(sorted(codes))
@property
def unique_key(self):
code_half = self.code.hex[:16]
return hashlib.sha1(code_half.encode()).hexdigest()
def to_dict(self, readonly=False):
result = {
"name": self.name,
"slug": self.slug,
"tags": self.tags,
"desc": self.desc,
"grace": int(self.grace.total_seconds()),
"n_pings": self.n_pings,
"status": self.get_status(with_started=True),
"last_ping": isostring(self.last_ping),
"next_ping": isostring(self.get_grace_start()),
"manual_resume": self.manual_resume,
"methods": self.methods,
}
if self.last_duration:
result["last_duration"] = int(self.last_duration.total_seconds())
if readonly:
result["unique_key"] = self.unique_key
else:
update_rel_url = reverse("hc-api-single", args=[self.code])
pause_rel_url = reverse("hc-api-pause", args=[self.code])
result["ping_url"] = settings.PING_ENDPOINT + str(self.code)
result["update_url"] = settings.SITE_ROOT + update_rel_url
result["pause_url"] = settings.SITE_ROOT + pause_rel_url
result["channels"] = self.channels_str()
if self.kind == "simple":
result["timeout"] = int(self.timeout.total_seconds())
elif self.kind == "cron":
result["schedule"] = self.schedule
result["tz"] = self.tz
return result
def ping(self, remote_addr, scheme, method, ua, body, action, exitstatus=None):
frozen_now = now()
if self.status == "paused" and self.manual_resume:
action = "ign"
if action == "start":
self.last_start = frozen_now
# Don't update "last_ping" field.
elif action == "ign":
pass
else:
self.last_ping = frozen_now
if self.last_start:
self.last_duration = self.last_ping - self.last_start
self.last_start = None
else:
self.last_duration = None
new_status = "down" if action == "fail" else "up"
if self.status != new_status:
flip = Flip(owner=self)
flip.created = self.last_ping
flip.old_status = self.status
flip.new_status = new_status
flip.save()
self.status = new_status
self.alert_after = self.going_down_after()
self.n_pings = models.F("n_pings") + 1
self.has_confirmation_link = "confirm" in body.decode(errors="replace").lower()
self.save()
self.refresh_from_db()
ping = Ping(owner=self)
ping.n = self.n_pings
ping.created = frozen_now
if action in ("start", "fail", "ign"):
ping.kind = action
ping.remote_addr = remote_addr
ping.scheme = scheme
ping.method = method
# If User-Agent is longer than 200 characters, truncate it:
ping.ua = ua[:200]
if len(body) > 100 and settings.S3_BUCKET:
ping.object_size = len(body)
put_object(self.code, ping.n, body)
else:
ping.body_raw = body
ping.exitstatus = exitstatus
ping.save()
# Every 100 received pings, prune old pings and notifications:
if self.n_pings % 100 == 0:
self.prune()
def prune(self):
""" Remove old pings and notifications. """
threshold = self.n_pings - self.project.owner_profile.ping_log_limit
# Remove ping bodies from object storage
if settings.S3_BUCKET:
remove_objects(self.code, threshold)
# Remove ping objects from db
self.ping_set.filter(n__lte=threshold).delete()
try:
ping = self.ping_set.earliest("id")
self.notification_set.filter(created__lt=ping.created).delete()
except Ping.DoesNotExist:
pass
def downtimes(self, months):
""" Calculate the number of downtimes and downtime minutes per month.
Returns a list of (datetime, downtime_in_secs, number_of_outages) tuples.
"""
def monthkey(dt):
return dt.year, dt.month
# Datetimes of the first days of months we're interested in. Ascending order.
boundaries = month_boundaries(months=months)
# Will accumulate totals here.
# (year, month) -> [datetime, total_downtime, number_of_outages]
totals = {monthkey(b): [b, td(), 0] for b in boundaries}
# A list of flips and month boundaries
events = [(b, "---") for b in boundaries]
q = self.flip_set.filter(created__gt=min(boundaries))
for pair in q.values_list("created", "old_status"):
events.append(pair)
# Iterate through flips and month boundaries in reverse order,
# and for each "down" event increase the counters in `totals`.
dt, status = now(), self.status
for prev_dt, prev_status in sorted(events, reverse=True):
if status == "down":
delta = dt - prev_dt
totals[monthkey(prev_dt)][1] += delta
totals[monthkey(prev_dt)][2] += 1
dt = prev_dt
if prev_status != "---":
status = prev_status
# Set counters to None for months when the check didn't exist yet
for ym in totals:
if ym < monthkey(self.created):
totals[ym][1] = None
totals[ym][2] = None
return sorted(totals.values())
def past_downtimes(self):
""" Return downtime summary for two previous months. """
return self.downtimes(3)[:-1]
class Ping(models.Model):
id = models.BigAutoField(primary_key=True)
n = models.IntegerField(null=True)
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField(default=now)
kind = models.CharField(max_length=6, blank=True, null=True)
scheme = models.CharField(max_length=10, default="http")
remote_addr = models.GenericIPAddressField(blank=True, null=True)
method = models.CharField(max_length=10, blank=True)
ua = models.CharField(max_length=200, blank=True)
body = models.TextField(blank=True, null=True)
body_raw = models.BinaryField(null=True)
object_size = models.IntegerField(null=True)
exitstatus = models.SmallIntegerField(null=True)
def to_dict(self):
return {
"type": self.kind or "success",
"date": self.created.isoformat(),
"n": self.n,
"scheme": self.scheme,
"remote_addr": self.remote_addr,
"method": self.method,
"ua": self.ua,
}
def has_body(self):
if self.body or self.body_raw or self.object_size:
return True
return False
def get_body(self):
if self.body:
return self.body
if self.object_size:
body_raw = get_object(self.owner.code, self.n)
if body_raw:
return body_raw.decode(errors="replace")
if self.body_raw:
return bytes(self.body_raw).decode(errors="replace")
class Channel(models.Model):
name = models.CharField(max_length=100, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=20, choices=CHANNEL_KINDS)
value = models.TextField(blank=True)
email_verified = models.BooleanField(default=False)
disabled = models.BooleanField(null=True)
last_notify = models.DateTimeField(null=True, blank=True)
last_error = models.CharField(max_length=200, blank=True)
checks = models.ManyToManyField(Check)
def __str__(self):
if self.name:
return self.name
if self.kind == "email":
return "Email to %s" % self.email_value
elif self.kind == "sms":
return "SMS to %s" % self.phone_number
elif self.kind == "slack":
return "Slack %s" % self.slack_channel
elif self.kind == "telegram":
return "Telegram %s" % self.telegram_name
elif self.kind == "zulip":
if self.zulip_type == "stream":
return "Zulip stream %s" % self.zulip_to
if self.zulip_type == "private":
return "Zulip user %s" % self.zulip_to
return self.get_kind_display()
def to_dict(self):
return {"id": str(self.code), "name": self.name, "kind": self.kind}
def is_editable(self):
return self.kind in ("email", "webhook", "sms", "signal", "whatsapp")
def assign_all_checks(self):
checks = Check.objects.filter(project=self.project)
self.checks.add(*checks)
def make_token(self):
seed = "%s%s" % (self.code, settings.SECRET_KEY)
seed = seed.encode()
return hashlib.sha1(seed).hexdigest()
def send_verify_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-verify-email", args=args)
verify_link = settings.SITE_ROOT + verify_link
emails.verify_email(self.email_value, {"verify_link": verify_link})
def get_unsub_link(self):
signer = TimestampSigner(salt="alerts")
signed_token = signer.sign(self.make_token())
args = [self.code, signed_token]
verify_link = reverse("hc-unsubscribe-alerts", args=args)
return settings.SITE_ROOT + verify_link
def send_signal_captcha_alert(self, challenge, message):
subject = "Signal CAPTCHA proof required"
message = f"Challenge token: {challenge}"
hostname = socket.gethostname()
url = settings.SITE_ROOT + reverse("hc-signal-captcha", args=[challenge])
html_message = f"""
Hostname: {hostname}<br>
Challenge: <code>{challenge}</code><br>
<a href="{url}">Solve CAPTCHA here</a><br>
Message from Signal:<br>
<pre>{message}</pre>
"""
mail_admins(subject, message, html_message=html_message)
@property
def transport(self):
if self.kind == "email":
return transports.Email(self)
elif self.kind == "webhook":
return transports.Webhook(self)
elif self.kind == "slack":
return transports.Slack(self)
elif self.kind == "mattermost":
return transports.Mattermost(self)
elif self.kind == "hipchat":
return transports.HipChat(self)
elif self.kind == "pd":
return transports.PagerDuty(self)
elif self.kind == "pagertree":
return transports.PagerTree(self)
elif self.kind == "pagerteam":
return transports.PagerTeam(self)
elif self.kind == "victorops":
return transports.VictorOps(self)
elif self.kind == "pushbullet":
return transports.Pushbullet(self)
elif self.kind == "po":
return transports.Pushover(self)
elif self.kind == "opsgenie":
return transports.Opsgenie(self)
elif self.kind == "discord":
return transports.Discord(self)
elif self.kind == "telegram":
return transports.Telegram(self)
elif self.kind == "sms":
return transports.Sms(self)
elif self.kind == "trello":
return transports.Trello(self)
elif self.kind == "matrix":
return transports.Matrix(self)
elif self.kind == "whatsapp":
return transports.WhatsApp(self)
elif self.kind == "apprise":
return transports.Apprise(self)
elif self.kind == "msteams":
return transports.MsTeams(self)
elif self.kind == "shell":
return transports.Shell(self)
elif self.kind == "zulip":
return transports.Zulip(self)
elif self.kind == "spike":
return transports.Spike(self)
elif self.kind == "call":
return transports.Call(self)
elif self.kind == "linenotify":
return transports.LineNotify(self)
elif self.kind == "signal":
return transports.Signal(self)
else:
raise NotImplementedError("Unknown channel kind: %s" % self.kind)
def notify(self, check, is_test=False):
if self.transport.is_noop(check):
return "no-op"
n = Notification(channel=self)
if is_test:
# When sending a test notification we leave the owner field null.
# (the passed check is a dummy, unsaved Check instance)
pass
else:
n.owner = check
n.check_status = check.status
n.error = "Sending"
n.save()
error, disabled = "", self.disabled
try:
self.transport.notify(check, notification=n)
except transports.TransportError as e:
disabled = True if e.permanent else disabled
error = e.message
Notification.objects.filter(id=n.id).update(error=error)
Channel.objects.filter(id=self.id).update(
last_notify=now(), last_error=error, disabled=disabled
)
return error
def icon_path(self):
return "img/integrations/%s.png" % self.kind
@property
def json(self):
return json.loads(self.value)
@property
def po_priority(self):
assert self.kind == "po"
parts = self.value.split("|")
prio = int(parts[1])
return PO_PRIORITIES[prio]
def webhook_spec(self, status):
assert self.kind == "webhook"
doc = json.loads(self.value)
if status == "down" and "method_down" in doc:
return {
"method": doc["method_down"],
"url": doc["url_down"],
"body": doc["body_down"],
"headers": doc["headers_down"],
}
elif status == "up" and "method_up" in doc:
return {
"method": doc["method_up"],
"url": doc["url_up"],
"body": doc["body_up"],
"headers": doc["headers_up"],
}
@property
def down_webhook_spec(self):
return self.webhook_spec("down")
@property
def up_webhook_spec(self):
return self.webhook_spec("up")
@property
def url_down(self):
return self.down_webhook_spec["url"]
@property
def url_up(self):
return self.up_webhook_spec["url"]
@property
def cmd_down(self):
assert self.kind == "shell"
return self.json["cmd_down"]
@property
def cmd_up(self):
assert self.kind == "shell"
return self.json["cmd_up"]
@property
def slack_team(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
if "team_name" in doc:
return doc["team_name"]
if "team" in doc:
return doc["team"]["name"]
@property
def slack_channel(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["incoming_webhook"]["channel"]
@property
def slack_webhook_url(self):
assert self.kind in ("slack", "mattermost")
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["incoming_webhook"]["url"]
@property
def discord_webhook_url(self):
assert self.kind == "discord"
url = self.json["webhook"]["url"]
# Discord migrated to discord.com,
# and is dropping support for discordapp.com on 7 November 2020
if url.startswith("https://discordapp.com/"):
url = "https://discord.com/" + url[23:]
return url
@property
def telegram_id(self):
assert self.kind == "telegram"
return self.json.get("id")
@property
def telegram_type(self):
assert self.kind == "telegram"
return self.json.get("type")
@property
def telegram_name(self):
assert self.kind == "telegram"
return self.json.get("name")
def update_telegram_id(self, new_chat_id) -> None:
doc = self.json
doc["id"] = new_chat_id
self.value = json.dumps(doc)
self.save()
@property
def pd_service_key(self):
assert self.kind == "pd"
if not self.value.startswith("{"):
return self.value
return self.json["service_key"]
@property
def pd_account(self):
assert self.kind == "pd"
if self.value.startswith("{"):
return self.json.get("account")
@property
def phone_number(self):
assert self.kind in ("call", "sms", "whatsapp", "signal")
if self.value.startswith("{"):
return self.json["value"]
return self.value
@property
def trello_token(self):
assert self.kind == "trello"
return self.json["token"]
@property
def trello_board_list(self):
assert self.kind == "trello"
doc = json.loads(self.value)
return doc["board_name"], doc["list_name"]
@property
def trello_list_id(self):
assert self.kind == "trello"
return self.json["list_id"]
@property
def email_value(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return self.value
return self.json["value"]
@property
def email_notify_up(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
return self.json.get("up")
@property
def email_notify_down(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
return self.json.get("down")
@property
def whatsapp_notify_up(self):
assert self.kind == "whatsapp"
return self.json["up"]
@property
def whatsapp_notify_down(self):
assert self.kind == "whatsapp"
return self.json["down"]
@property
def signal_notify_up(self):
assert self.kind == "signal"
return self.json["up"]
@property
def signal_notify_down(self):
assert self.kind == "signal"
return self.json["down"]
@property
def sms_notify_up(self):
assert self.kind == "sms"
return self.json.get("up", False)
@property
def sms_notify_down(self):
assert self.kind == "sms"
return self.json.get("down", True)
@property
def opsgenie_key(self):
assert self.kind == "opsgenie"
if not self.value.startswith("{"):
return self.value
return self.json["key"]
@property
def opsgenie_region(self):
assert self.kind == "opsgenie"
if not self.value.startswith("{"):
return "us"
return self.json["region"]
@property
def zulip_bot_email(self):
assert self.kind == "zulip"
return self.json["bot_email"]
@property
def zulip_site(self):
assert self.kind == "zulip"
doc = json.loads(self.value)
if "site" in doc:
return doc["site"]
# Fallback if we don't have the site value:
# derive it from bot's email
_, domain = doc["bot_email"].split("@")
return "https://" + domain
@property
def zulip_api_key(self):
assert self.kind == "zulip"
return self.json["api_key"]
@property
def zulip_type(self):
assert self.kind == "zulip"
return self.json["mtype"]
@property
def zulip_to(self):
assert self.kind == "zulip"
return self.json["to"]
@property
def linenotify_token(self):
assert self.kind == "linenotify"
return self.value
class Notification(models.Model):
code = models.UUIDField(default=uuid.uuid4, null=True, editable=False)
owner = models.ForeignKey(Check, models.CASCADE, null=True)
check_status = models.CharField(max_length=6)
channel = models.ForeignKey(Channel, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
error = models.CharField(max_length=200, blank=True)
class Meta:
get_latest_by = "created"
def status_url(self):
path = reverse("hc-api-notification-status", args=[self.code])
return settings.SITE_ROOT + path
class Flip(models.Model):
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField()
processed = models.DateTimeField(null=True, blank=True)
old_status = models.CharField(max_length=8, choices=STATUSES)
new_status = models.CharField(max_length=8, choices=STATUSES)
class Meta:
indexes = [
# For quickly looking up unprocessed flips.
# Used in the sendalerts management command.
models.Index(
fields=["processed"],
name="api_flip_not_processed",
condition=models.Q(processed=None),
)
]
def to_dict(self):
return {
"timestamp": isostring(self.created),
"up": 1 if self.new_status == "up" else 0,
}
def send_alerts(self):
"""Loop over the enabled channels, call notify() on each.
For each channel, yield a (channel, error, send_time) triple:
* channel is a Channel instance
* error is an empty string ("") on success, error message otherwise
* send_time is the send time in seconds (float)
"""
# Don't send alerts on new->up and paused->up transitions
if self.new_status == "up" and self.old_status in ("new", "paused"):
return
if self.new_status not in ("up", "down"):
raise NotImplementedError("Unexpected status: %s" % self.status)
for channel in self.owner.channel_set.exclude(disabled=True):
start = time.time()
error = channel.notify(self.owner)
if error == "no-op":
continue
yield (channel, error, time.time() - start)
class TokenBucket(models.Model):
value = models.CharField(max_length=80, unique=True)
tokens = models.FloatField(default=1.0)
updated = models.DateTimeField(default=now)
@staticmethod
def authorize(value, capacity, refill_time_secs):
frozen_now = now()
obj, created = TokenBucket.objects.get_or_create(value=value)
if not created:
# Top up the bucket:
delta_secs = (frozen_now - obj.updated).total_seconds()
obj.tokens = min(1.0, obj.tokens + delta_secs / refill_time_secs)
obj.tokens -= 1.0 / capacity
if obj.tokens < 0:
# Not enough tokens
return False
# Race condition: two concurrent authorize calls can overwrite each
# other's changes. It's OK to be a little inexact here for the sake
# of simplicity.
obj.updated = frozen_now
obj.save()
return True
@staticmethod
def authorize_login_email(email):
# remove dots and alias:
mailbox, domain = email.split("@")
mailbox = mailbox.replace(".", "")
mailbox = mailbox.split("+")[0]
email = mailbox + "@" + domain
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "em-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 login attempts for a single email per hour:
return TokenBucket.authorize(value, 20, 3600)
@staticmethod
def authorize_invite(user):
value = "invite-%d" % user.id
# 20 invites per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_login_password(email):
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "pw-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 password attempts per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_telegram(telegram_id):
value = "tg-%s" % telegram_id
# 6 messages for a single chat per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_signal(phone):
salted_encoded = (phone + settings.SECRET_KEY).encode()
value = "signal-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 6 messages for a single recipient per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_pushover(user_key):
salted_encoded = (user_key + settings.SECRET_KEY).encode()
value = "po-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 6 messages for a single user key per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_sudo_code(user):
value = "sudo-%d" % user.id
# 10 sudo attempts per day
return TokenBucket.authorize(value, 10, 3600 * 24)
@staticmethod
def authorize_totp_attempt(user):
value = "totp-%d" % user.id
# 96 attempts per user per 24 hours
# (or, on average, one attempt per 15 minutes)
return TokenBucket.authorize(value, 96, 3600 * 24)
@staticmethod
def authorize_totp_code(user, code):
value = "totpc-%d-%s" % (user.id, code)
# A code has a validity period of 3 * 30 = 90 seconds.
# During that period, allow the code to only be used once,
# so an eavesdropping attacker cannot reuse a code.
return TokenBucket.authorize(value, 1, 90)
Fix CAPTCHA email alert
# coding: utf-8
import hashlib
import json
import socket
import time
import uuid
from datetime import datetime, timedelta as td, timezone
from cronsim import CronSim
from django.conf import settings
from django.core.mail import mail_admins
from django.core.signing import TimestampSigner
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.text import slugify
from hc.accounts.models import Project
from hc.api import transports
from hc.lib import emails
from hc.lib.date import month_boundaries
from hc.lib.s3 import get_object, put_object, remove_objects
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
STATUSES = (("up", "Up"), ("down", "Down"), ("new", "New"), ("paused", "Paused"))
DEFAULT_TIMEOUT = td(days=1)
DEFAULT_GRACE = td(hours=1)
NEVER = datetime(3000, 1, 1, tzinfo=timezone.utc)
CHECK_KINDS = (("simple", "Simple"), ("cron", "Cron"))
# max time between start and ping where we will consider both events related:
MAX_DELTA = td(hours=24)
CHANNEL_KINDS = (
("email", "Email"),
("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"),
("pd", "PagerDuty"),
("pagertree", "PagerTree"),
("pagerteam", "Pager Team"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "Opsgenie"),
("victorops", "Splunk On-Call"),
("discord", "Discord"),
("telegram", "Telegram"),
("sms", "SMS"),
("zendesk", "Zendesk"),
("trello", "Trello"),
("matrix", "Matrix"),
("whatsapp", "WhatsApp"),
("apprise", "Apprise"),
("mattermost", "Mattermost"),
("msteams", "Microsoft Teams"),
("shell", "Shell Command"),
("zulip", "Zulip"),
("spike", "Spike"),
("call", "Phone Call"),
("linenotify", "LINE Notify"),
("signal", "Signal"),
)
PO_PRIORITIES = {-2: "lowest", -1: "low", 0: "normal", 1: "high", 2: "emergency"}
def isostring(dt):
"""Convert the datetime to ISO 8601 format with no microseconds. """
if dt:
return dt.replace(microsecond=0).isoformat()
class Check(models.Model):
name = models.CharField(max_length=100, blank=True)
slug = models.CharField(max_length=100, blank=True)
tags = models.CharField(max_length=500, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
desc = models.TextField(blank=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=10, default="simple", choices=CHECK_KINDS)
timeout = models.DurationField(default=DEFAULT_TIMEOUT)
grace = models.DurationField(default=DEFAULT_GRACE)
schedule = models.CharField(max_length=100, default="* * * * *")
tz = models.CharField(max_length=36, default="UTC")
subject = models.CharField(max_length=200, blank=True)
subject_fail = models.CharField(max_length=200, blank=True)
methods = models.CharField(max_length=30, blank=True)
manual_resume = models.BooleanField(default=False)
n_pings = models.IntegerField(default=0)
last_ping = models.DateTimeField(null=True, blank=True)
last_start = models.DateTimeField(null=True, blank=True)
last_duration = models.DurationField(null=True, blank=True)
last_ping_was_fail = models.BooleanField(default=False)
has_confirmation_link = models.BooleanField(default=False)
alert_after = models.DateTimeField(null=True, blank=True, editable=False)
status = models.CharField(max_length=6, choices=STATUSES, default="new")
class Meta:
indexes = [
# Index for the alert_after field. Excludes rows with status=down.
# Used in the sendalerts management command.
models.Index(
fields=["alert_after"],
name="api_check_aa_not_down",
condition=~models.Q(status="down"),
),
models.Index(fields=["project_id", "slug"], name="api_check_project_slug"),
]
def __str__(self):
return "%s (%d)" % (self.name or self.code, self.id)
def name_then_code(self):
if self.name:
return self.name
return str(self.code)
def url(self):
""" Return check's ping url in user's preferred style.
Note: this method reads self.project. If project is not loaded already,
this causes a SQL query.
"""
if self.project_id and self.project.show_slugs:
if not self.slug:
return None
# If ping_key is not set, use dummy placeholder
key = self.project.ping_key or "{ping_key}"
return settings.PING_ENDPOINT + key + "/" + self.slug
return settings.PING_ENDPOINT + str(self.code)
def details_url(self):
return settings.SITE_ROOT + reverse("hc-details", args=[self.code])
def cloaked_url(self):
return settings.SITE_ROOT + reverse("hc-uncloak", args=[self.unique_key])
def email(self):
return "%s@%s" % (self.code, settings.PING_EMAIL_DOMAIN)
def clamped_last_duration(self):
if self.last_duration and self.last_duration < MAX_DELTA:
return self.last_duration
def set_name_slug(self, name):
self.name = name
self.slug = slugify(name)
def get_grace_start(self, with_started=True):
""" Return the datetime when the grace period starts.
If the check is currently new, paused or down, return None.
"""
# NEVER is a constant sentinel value (year 3000).
# Using None instead would make the min() logic clunky.
result = NEVER
if self.kind == "simple" and self.status == "up":
result = self.last_ping + self.timeout
elif self.kind == "cron" and self.status == "up":
# The complex case, next ping is expected based on cron schedule.
# Don't convert to naive datetimes (and so avoid ambiguities around
# DST transitions). cronsim will handle the timezone-aware datetimes.
last_local = self.last_ping.astimezone(ZoneInfo(self.tz))
result = next(CronSim(self.schedule, last_local))
if with_started and self.last_start and self.status != "down":
result = min(result, self.last_start)
if result != NEVER:
return result
def going_down_after(self):
""" Return the datetime when the check goes down.
If the check is new or paused, and not currently running, return None.
If the check is already down, also return None.
"""
grace_start = self.get_grace_start()
if grace_start is not None:
return grace_start + self.grace
def get_status(self, with_started=False):
""" Return current status for display. """
frozen_now = now()
if self.last_start:
if frozen_now >= self.last_start + self.grace:
return "down"
elif with_started:
return "started"
if self.status in ("new", "paused", "down"):
return self.status
grace_start = self.get_grace_start(with_started=with_started)
grace_end = grace_start + self.grace
if frozen_now >= grace_end:
return "down"
if frozen_now >= grace_start:
return "grace"
return "up"
def assign_all_channels(self):
channels = Channel.objects.filter(project=self.project)
self.channel_set.set(channels)
def tags_list(self):
return [t.strip() for t in self.tags.split(" ") if t.strip()]
def matches_tag_set(self, tag_set):
return tag_set.issubset(self.tags_list())
def channels_str(self):
""" Return a comma-separated string of assigned channel codes. """
# self.channel_set may already be prefetched.
# Sort in python to make sure we do't run additional queries
codes = [str(channel.code) for channel in self.channel_set.all()]
return ",".join(sorted(codes))
@property
def unique_key(self):
code_half = self.code.hex[:16]
return hashlib.sha1(code_half.encode()).hexdigest()
def to_dict(self, readonly=False):
result = {
"name": self.name,
"slug": self.slug,
"tags": self.tags,
"desc": self.desc,
"grace": int(self.grace.total_seconds()),
"n_pings": self.n_pings,
"status": self.get_status(with_started=True),
"last_ping": isostring(self.last_ping),
"next_ping": isostring(self.get_grace_start()),
"manual_resume": self.manual_resume,
"methods": self.methods,
}
if self.last_duration:
result["last_duration"] = int(self.last_duration.total_seconds())
if readonly:
result["unique_key"] = self.unique_key
else:
update_rel_url = reverse("hc-api-single", args=[self.code])
pause_rel_url = reverse("hc-api-pause", args=[self.code])
result["ping_url"] = settings.PING_ENDPOINT + str(self.code)
result["update_url"] = settings.SITE_ROOT + update_rel_url
result["pause_url"] = settings.SITE_ROOT + pause_rel_url
result["channels"] = self.channels_str()
if self.kind == "simple":
result["timeout"] = int(self.timeout.total_seconds())
elif self.kind == "cron":
result["schedule"] = self.schedule
result["tz"] = self.tz
return result
def ping(self, remote_addr, scheme, method, ua, body, action, exitstatus=None):
frozen_now = now()
if self.status == "paused" and self.manual_resume:
action = "ign"
if action == "start":
self.last_start = frozen_now
# Don't update "last_ping" field.
elif action == "ign":
pass
else:
self.last_ping = frozen_now
if self.last_start:
self.last_duration = self.last_ping - self.last_start
self.last_start = None
else:
self.last_duration = None
new_status = "down" if action == "fail" else "up"
if self.status != new_status:
flip = Flip(owner=self)
flip.created = self.last_ping
flip.old_status = self.status
flip.new_status = new_status
flip.save()
self.status = new_status
self.alert_after = self.going_down_after()
self.n_pings = models.F("n_pings") + 1
self.has_confirmation_link = "confirm" in body.decode(errors="replace").lower()
self.save()
self.refresh_from_db()
ping = Ping(owner=self)
ping.n = self.n_pings
ping.created = frozen_now
if action in ("start", "fail", "ign"):
ping.kind = action
ping.remote_addr = remote_addr
ping.scheme = scheme
ping.method = method
# If User-Agent is longer than 200 characters, truncate it:
ping.ua = ua[:200]
if len(body) > 100 and settings.S3_BUCKET:
ping.object_size = len(body)
put_object(self.code, ping.n, body)
else:
ping.body_raw = body
ping.exitstatus = exitstatus
ping.save()
# Every 100 received pings, prune old pings and notifications:
if self.n_pings % 100 == 0:
self.prune()
def prune(self):
""" Remove old pings and notifications. """
threshold = self.n_pings - self.project.owner_profile.ping_log_limit
# Remove ping bodies from object storage
if settings.S3_BUCKET:
remove_objects(self.code, threshold)
# Remove ping objects from db
self.ping_set.filter(n__lte=threshold).delete()
try:
ping = self.ping_set.earliest("id")
self.notification_set.filter(created__lt=ping.created).delete()
except Ping.DoesNotExist:
pass
def downtimes(self, months):
""" Calculate the number of downtimes and downtime minutes per month.
Returns a list of (datetime, downtime_in_secs, number_of_outages) tuples.
"""
def monthkey(dt):
return dt.year, dt.month
# Datetimes of the first days of months we're interested in. Ascending order.
boundaries = month_boundaries(months=months)
# Will accumulate totals here.
# (year, month) -> [datetime, total_downtime, number_of_outages]
totals = {monthkey(b): [b, td(), 0] for b in boundaries}
# A list of flips and month boundaries
events = [(b, "---") for b in boundaries]
q = self.flip_set.filter(created__gt=min(boundaries))
for pair in q.values_list("created", "old_status"):
events.append(pair)
# Iterate through flips and month boundaries in reverse order,
# and for each "down" event increase the counters in `totals`.
dt, status = now(), self.status
for prev_dt, prev_status in sorted(events, reverse=True):
if status == "down":
delta = dt - prev_dt
totals[monthkey(prev_dt)][1] += delta
totals[monthkey(prev_dt)][2] += 1
dt = prev_dt
if prev_status != "---":
status = prev_status
# Set counters to None for months when the check didn't exist yet
for ym in totals:
if ym < monthkey(self.created):
totals[ym][1] = None
totals[ym][2] = None
return sorted(totals.values())
def past_downtimes(self):
""" Return downtime summary for two previous months. """
return self.downtimes(3)[:-1]
class Ping(models.Model):
id = models.BigAutoField(primary_key=True)
n = models.IntegerField(null=True)
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField(default=now)
kind = models.CharField(max_length=6, blank=True, null=True)
scheme = models.CharField(max_length=10, default="http")
remote_addr = models.GenericIPAddressField(blank=True, null=True)
method = models.CharField(max_length=10, blank=True)
ua = models.CharField(max_length=200, blank=True)
body = models.TextField(blank=True, null=True)
body_raw = models.BinaryField(null=True)
object_size = models.IntegerField(null=True)
exitstatus = models.SmallIntegerField(null=True)
def to_dict(self):
return {
"type": self.kind or "success",
"date": self.created.isoformat(),
"n": self.n,
"scheme": self.scheme,
"remote_addr": self.remote_addr,
"method": self.method,
"ua": self.ua,
}
def has_body(self):
if self.body or self.body_raw or self.object_size:
return True
return False
def get_body(self):
if self.body:
return self.body
if self.object_size:
body_raw = get_object(self.owner.code, self.n)
if body_raw:
return body_raw.decode(errors="replace")
if self.body_raw:
return bytes(self.body_raw).decode(errors="replace")
class Channel(models.Model):
name = models.CharField(max_length=100, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
project = models.ForeignKey(Project, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=20, choices=CHANNEL_KINDS)
value = models.TextField(blank=True)
email_verified = models.BooleanField(default=False)
disabled = models.BooleanField(null=True)
last_notify = models.DateTimeField(null=True, blank=True)
last_error = models.CharField(max_length=200, blank=True)
checks = models.ManyToManyField(Check)
def __str__(self):
if self.name:
return self.name
if self.kind == "email":
return "Email to %s" % self.email_value
elif self.kind == "sms":
return "SMS to %s" % self.phone_number
elif self.kind == "slack":
return "Slack %s" % self.slack_channel
elif self.kind == "telegram":
return "Telegram %s" % self.telegram_name
elif self.kind == "zulip":
if self.zulip_type == "stream":
return "Zulip stream %s" % self.zulip_to
if self.zulip_type == "private":
return "Zulip user %s" % self.zulip_to
return self.get_kind_display()
def to_dict(self):
return {"id": str(self.code), "name": self.name, "kind": self.kind}
def is_editable(self):
return self.kind in ("email", "webhook", "sms", "signal", "whatsapp")
def assign_all_checks(self):
checks = Check.objects.filter(project=self.project)
self.checks.add(*checks)
def make_token(self):
seed = "%s%s" % (self.code, settings.SECRET_KEY)
seed = seed.encode()
return hashlib.sha1(seed).hexdigest()
def send_verify_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-verify-email", args=args)
verify_link = settings.SITE_ROOT + verify_link
emails.verify_email(self.email_value, {"verify_link": verify_link})
def get_unsub_link(self):
signer = TimestampSigner(salt="alerts")
signed_token = signer.sign(self.make_token())
args = [self.code, signed_token]
verify_link = reverse("hc-unsubscribe-alerts", args=args)
return settings.SITE_ROOT + verify_link
def send_signal_captcha_alert(self, challenge, raw):
subject = "Signal CAPTCHA proof required"
message = f"Challenge token: {challenge}"
hostname = socket.gethostname()
url = settings.SITE_ROOT + reverse("hc-signal-captcha", args=[challenge])
html_message = f"""
Hostname: {hostname}<br>
Challenge: <code>{challenge}</code><br>
<a href="{url}">Solve CAPTCHA here</a><br>
Message from Signal:<br>
<pre>{raw}</pre>
"""
mail_admins(subject, message, html_message=html_message)
@property
def transport(self):
if self.kind == "email":
return transports.Email(self)
elif self.kind == "webhook":
return transports.Webhook(self)
elif self.kind == "slack":
return transports.Slack(self)
elif self.kind == "mattermost":
return transports.Mattermost(self)
elif self.kind == "hipchat":
return transports.HipChat(self)
elif self.kind == "pd":
return transports.PagerDuty(self)
elif self.kind == "pagertree":
return transports.PagerTree(self)
elif self.kind == "pagerteam":
return transports.PagerTeam(self)
elif self.kind == "victorops":
return transports.VictorOps(self)
elif self.kind == "pushbullet":
return transports.Pushbullet(self)
elif self.kind == "po":
return transports.Pushover(self)
elif self.kind == "opsgenie":
return transports.Opsgenie(self)
elif self.kind == "discord":
return transports.Discord(self)
elif self.kind == "telegram":
return transports.Telegram(self)
elif self.kind == "sms":
return transports.Sms(self)
elif self.kind == "trello":
return transports.Trello(self)
elif self.kind == "matrix":
return transports.Matrix(self)
elif self.kind == "whatsapp":
return transports.WhatsApp(self)
elif self.kind == "apprise":
return transports.Apprise(self)
elif self.kind == "msteams":
return transports.MsTeams(self)
elif self.kind == "shell":
return transports.Shell(self)
elif self.kind == "zulip":
return transports.Zulip(self)
elif self.kind == "spike":
return transports.Spike(self)
elif self.kind == "call":
return transports.Call(self)
elif self.kind == "linenotify":
return transports.LineNotify(self)
elif self.kind == "signal":
return transports.Signal(self)
else:
raise NotImplementedError("Unknown channel kind: %s" % self.kind)
def notify(self, check, is_test=False):
if self.transport.is_noop(check):
return "no-op"
n = Notification(channel=self)
if is_test:
# When sending a test notification we leave the owner field null.
# (the passed check is a dummy, unsaved Check instance)
pass
else:
n.owner = check
n.check_status = check.status
n.error = "Sending"
n.save()
error, disabled = "", self.disabled
try:
self.transport.notify(check, notification=n)
except transports.TransportError as e:
disabled = True if e.permanent else disabled
error = e.message
Notification.objects.filter(id=n.id).update(error=error)
Channel.objects.filter(id=self.id).update(
last_notify=now(), last_error=error, disabled=disabled
)
return error
def icon_path(self):
return "img/integrations/%s.png" % self.kind
@property
def json(self):
return json.loads(self.value)
@property
def po_priority(self):
assert self.kind == "po"
parts = self.value.split("|")
prio = int(parts[1])
return PO_PRIORITIES[prio]
def webhook_spec(self, status):
assert self.kind == "webhook"
doc = json.loads(self.value)
if status == "down" and "method_down" in doc:
return {
"method": doc["method_down"],
"url": doc["url_down"],
"body": doc["body_down"],
"headers": doc["headers_down"],
}
elif status == "up" and "method_up" in doc:
return {
"method": doc["method_up"],
"url": doc["url_up"],
"body": doc["body_up"],
"headers": doc["headers_up"],
}
@property
def down_webhook_spec(self):
return self.webhook_spec("down")
@property
def up_webhook_spec(self):
return self.webhook_spec("up")
@property
def url_down(self):
return self.down_webhook_spec["url"]
@property
def url_up(self):
return self.up_webhook_spec["url"]
@property
def cmd_down(self):
assert self.kind == "shell"
return self.json["cmd_down"]
@property
def cmd_up(self):
assert self.kind == "shell"
return self.json["cmd_up"]
@property
def slack_team(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
if "team_name" in doc:
return doc["team_name"]
if "team" in doc:
return doc["team"]["name"]
@property
def slack_channel(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["incoming_webhook"]["channel"]
@property
def slack_webhook_url(self):
assert self.kind in ("slack", "mattermost")
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["incoming_webhook"]["url"]
@property
def discord_webhook_url(self):
assert self.kind == "discord"
url = self.json["webhook"]["url"]
# Discord migrated to discord.com,
# and is dropping support for discordapp.com on 7 November 2020
if url.startswith("https://discordapp.com/"):
url = "https://discord.com/" + url[23:]
return url
@property
def telegram_id(self):
assert self.kind == "telegram"
return self.json.get("id")
@property
def telegram_type(self):
assert self.kind == "telegram"
return self.json.get("type")
@property
def telegram_name(self):
assert self.kind == "telegram"
return self.json.get("name")
def update_telegram_id(self, new_chat_id) -> None:
doc = self.json
doc["id"] = new_chat_id
self.value = json.dumps(doc)
self.save()
@property
def pd_service_key(self):
assert self.kind == "pd"
if not self.value.startswith("{"):
return self.value
return self.json["service_key"]
@property
def pd_account(self):
assert self.kind == "pd"
if self.value.startswith("{"):
return self.json.get("account")
@property
def phone_number(self):
assert self.kind in ("call", "sms", "whatsapp", "signal")
if self.value.startswith("{"):
return self.json["value"]
return self.value
@property
def trello_token(self):
assert self.kind == "trello"
return self.json["token"]
@property
def trello_board_list(self):
assert self.kind == "trello"
doc = json.loads(self.value)
return doc["board_name"], doc["list_name"]
@property
def trello_list_id(self):
assert self.kind == "trello"
return self.json["list_id"]
@property
def email_value(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return self.value
return self.json["value"]
@property
def email_notify_up(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
return self.json.get("up")
@property
def email_notify_down(self):
assert self.kind == "email"
if not self.value.startswith("{"):
return True
return self.json.get("down")
@property
def whatsapp_notify_up(self):
assert self.kind == "whatsapp"
return self.json["up"]
@property
def whatsapp_notify_down(self):
assert self.kind == "whatsapp"
return self.json["down"]
@property
def signal_notify_up(self):
assert self.kind == "signal"
return self.json["up"]
@property
def signal_notify_down(self):
assert self.kind == "signal"
return self.json["down"]
@property
def sms_notify_up(self):
assert self.kind == "sms"
return self.json.get("up", False)
@property
def sms_notify_down(self):
assert self.kind == "sms"
return self.json.get("down", True)
@property
def opsgenie_key(self):
assert self.kind == "opsgenie"
if not self.value.startswith("{"):
return self.value
return self.json["key"]
@property
def opsgenie_region(self):
assert self.kind == "opsgenie"
if not self.value.startswith("{"):
return "us"
return self.json["region"]
@property
def zulip_bot_email(self):
assert self.kind == "zulip"
return self.json["bot_email"]
@property
def zulip_site(self):
assert self.kind == "zulip"
doc = json.loads(self.value)
if "site" in doc:
return doc["site"]
# Fallback if we don't have the site value:
# derive it from bot's email
_, domain = doc["bot_email"].split("@")
return "https://" + domain
@property
def zulip_api_key(self):
assert self.kind == "zulip"
return self.json["api_key"]
@property
def zulip_type(self):
assert self.kind == "zulip"
return self.json["mtype"]
@property
def zulip_to(self):
assert self.kind == "zulip"
return self.json["to"]
@property
def linenotify_token(self):
assert self.kind == "linenotify"
return self.value
class Notification(models.Model):
code = models.UUIDField(default=uuid.uuid4, null=True, editable=False)
owner = models.ForeignKey(Check, models.CASCADE, null=True)
check_status = models.CharField(max_length=6)
channel = models.ForeignKey(Channel, models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
error = models.CharField(max_length=200, blank=True)
class Meta:
get_latest_by = "created"
def status_url(self):
path = reverse("hc-api-notification-status", args=[self.code])
return settings.SITE_ROOT + path
class Flip(models.Model):
owner = models.ForeignKey(Check, models.CASCADE)
created = models.DateTimeField()
processed = models.DateTimeField(null=True, blank=True)
old_status = models.CharField(max_length=8, choices=STATUSES)
new_status = models.CharField(max_length=8, choices=STATUSES)
class Meta:
indexes = [
# For quickly looking up unprocessed flips.
# Used in the sendalerts management command.
models.Index(
fields=["processed"],
name="api_flip_not_processed",
condition=models.Q(processed=None),
)
]
def to_dict(self):
return {
"timestamp": isostring(self.created),
"up": 1 if self.new_status == "up" else 0,
}
def send_alerts(self):
"""Loop over the enabled channels, call notify() on each.
For each channel, yield a (channel, error, send_time) triple:
* channel is a Channel instance
* error is an empty string ("") on success, error message otherwise
* send_time is the send time in seconds (float)
"""
# Don't send alerts on new->up and paused->up transitions
if self.new_status == "up" and self.old_status in ("new", "paused"):
return
if self.new_status not in ("up", "down"):
raise NotImplementedError("Unexpected status: %s" % self.status)
for channel in self.owner.channel_set.exclude(disabled=True):
start = time.time()
error = channel.notify(self.owner)
if error == "no-op":
continue
yield (channel, error, time.time() - start)
class TokenBucket(models.Model):
value = models.CharField(max_length=80, unique=True)
tokens = models.FloatField(default=1.0)
updated = models.DateTimeField(default=now)
@staticmethod
def authorize(value, capacity, refill_time_secs):
frozen_now = now()
obj, created = TokenBucket.objects.get_or_create(value=value)
if not created:
# Top up the bucket:
delta_secs = (frozen_now - obj.updated).total_seconds()
obj.tokens = min(1.0, obj.tokens + delta_secs / refill_time_secs)
obj.tokens -= 1.0 / capacity
if obj.tokens < 0:
# Not enough tokens
return False
# Race condition: two concurrent authorize calls can overwrite each
# other's changes. It's OK to be a little inexact here for the sake
# of simplicity.
obj.updated = frozen_now
obj.save()
return True
@staticmethod
def authorize_login_email(email):
# remove dots and alias:
mailbox, domain = email.split("@")
mailbox = mailbox.replace(".", "")
mailbox = mailbox.split("+")[0]
email = mailbox + "@" + domain
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "em-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 login attempts for a single email per hour:
return TokenBucket.authorize(value, 20, 3600)
@staticmethod
def authorize_invite(user):
value = "invite-%d" % user.id
# 20 invites per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_login_password(email):
salted_encoded = (email + settings.SECRET_KEY).encode()
value = "pw-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 20 password attempts per day
return TokenBucket.authorize(value, 20, 3600 * 24)
@staticmethod
def authorize_telegram(telegram_id):
value = "tg-%s" % telegram_id
# 6 messages for a single chat per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_signal(phone):
salted_encoded = (phone + settings.SECRET_KEY).encode()
value = "signal-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 6 messages for a single recipient per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_pushover(user_key):
salted_encoded = (user_key + settings.SECRET_KEY).encode()
value = "po-%s" % hashlib.sha1(salted_encoded).hexdigest()
# 6 messages for a single user key per minute:
return TokenBucket.authorize(value, 6, 60)
@staticmethod
def authorize_sudo_code(user):
value = "sudo-%d" % user.id
# 10 sudo attempts per day
return TokenBucket.authorize(value, 10, 3600 * 24)
@staticmethod
def authorize_totp_attempt(user):
value = "totp-%d" % user.id
# 96 attempts per user per 24 hours
# (or, on average, one attempt per 15 minutes)
return TokenBucket.authorize(value, 96, 3600 * 24)
@staticmethod
def authorize_totp_code(user, code):
value = "totpc-%d-%s" % (user.id, code)
# A code has a validity period of 3 * 30 = 90 seconds.
# During that period, allow the code to only be used once,
# so an eavesdropping attacker cannot reuse a code.
return TokenBucket.authorize(value, 1, 90)
|
# -*- coding: utf-8 -*-
"""
surveybot main
"""
from slackbot import db, app
def vote (user, action_text):
"""
Vote for a survey
"""
survey_id = ""
option = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split(' ')[0]
option = parameters.split(' ')[1]
app.logger.debug("%s %s %s " % (user, survey_id, option))
except Exception, e:
return('Parameters ERROR - Example to vote option 3 for survey #2: `/survey reply 2 3`')
try:
database = db.get_db()
database.execute('delete from vote where survey_id = ? and user = ?', [survey_id, user])
database.execute('insert into vote (survey_id, user, option) values (?, ?, ?)', [survey_id, user, option])
database.commit()
except Exception, e:
app.logger.debug(e)
return('DB ERROR')
finally:
database.close()
return "Hi %s, you voted the survey %s with option %s." % (user, survey_id, option)
def myvote (user, action_text):
"""
Show my vote for a survey
"""
survey_id = ""
option = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split(' ')[0]
app.logger.debug("%s %s" % (user, survey_id))
except Exception, e:
return('Parameters ERROR - Example to show your vote survey #2: `/survey myreply 2`')
try:
database = db.get_db()
cur = database.execute('select option from vote where survey_id = ? and user = ?', [survey_id, user])
option = cur.fetchone()
except Exception, e:
app.logger.debug(e)
return('DB ERROR')
finally:
database.close()
if option is None:
return "Hi %s, you didn't vote the survey %s." % (user, survey_id)
else:
return "Hi %s, you voted the survey %s with option %s." % (user, survey_id, option[0][0])
def cancelsurvey(author, action_text):
"""
Cancel a survey
"""
survey_id = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split()[0]
app.logger.debug("%s %s " % (author, survey_id))
except Exception, e:
return('Parameters ERROR - Example to cancel survey #2 : `/survey cancel 2`')
try:
database = db.get_db()
database.execute('delete from survey where id = ?', [survey_id])
database.commit()
except Exception, e:
return('DB ERROR')
finally:
database.close()
return "Hi %s, the survey %s has been canceled." % (author, survey_id)
def createsurvey(author, action_text):
"""
Create a new survey
"""
question = ""
options = ""
warning_msg = ""
try:
parameters = action_text.split(' ',1)[1]
question = parameters.split('options')[0]
options = parameters.split('options')[1]
if not ',' in options:
warning_msg = ":warning: There are limited options"
app.logger.debug("%s %s %s " % (author, question, options))
except Exception, e:
return('Parameters ERROR - Example to create new survey: `/survey this What colour is your favorite? options red, gree, blue`')
try:
database = db.get_db()
database.execute('insert into survey (question, author, options) values (?, ?, ?)', [question, author, options])
database.commit()
except Exception, e:
return('DB ERROR')
finally:
database.close()
return "Hi %s, the survey %s has been created. %s" % (author, question, warning_msg)
def listsurveys(user_name, action_text):
"""
List the surveys
"""
database = db.get_db()
cur = database.execute('SELECT id, question, author, options FROM survey')
surveys = cur.fetchall()
count_surveys = len(surveys)
list_msg = "Hi %s, there are %i surveys. \n This is the list:" % (user_name, count_surveys)
for row in surveys:
list_msg = list_msg + "\n :small_blue_diamond: *%s* %s options are: [%s]" % (row[0], row[1], row[3])
return list_msg
def showresults(user_name, action_text):
"""
Show results of a survey
"""
survey_id = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split()[0]
app.logger.debug("%s %s " % (user_name, survey_id))
except Exception, e:
return('Parameters ERROR - Example to show results for survey #2 : `/survey show 2`')
database = db.get_db()
cur = database.execute('SELECT s.id, s.question, s.author, s.options, v.option, count(v.option) as count FROM survey s JOIN vote v ON s.id = v.survey_id and s.id = ? GROUP BY v.option ORDER BY count DESC' , [survey_id])
try:
first_row = cur.next()
results = [first_row] + cur.fetchall()
print first_row
print results
list_msg = "Hi %s. \n Survey %s Question: %s \n Author [%s] \n Options are (%s) \n Results:" % (user_name, survey_id, results[0][1], results[0][2], results[0][3])
for row in results:
list_msg = list_msg + "\n :small_blue_diamond: Option *%s* = %s votes" % (row[4], row[5])
return list_msg
except Exception, e:
app.logger.debug(e)
return "Hi %s. There are no votes for survey %s" % (user_name, survey_id)
Added author to the survey list. Control cancel, only your own surveys.
# -*- coding: utf-8 -*-
"""
surveybot main
"""
from slackbot import db, app
def vote (user, action_text):
"""
Vote for a survey
"""
survey_id = ""
option = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split(' ')[0]
option = parameters.split(' ')[1]
app.logger.debug("%s %s %s " % (user, survey_id, option))
except Exception, e:
return('Parameters ERROR - Example to vote option 3 for survey #2: `/survey reply 2 3`')
try:
database = db.get_db()
database.execute('delete from vote where survey_id = ? and user = ?', [survey_id, user])
database.execute('insert into vote (survey_id, user, option) values (?, ?, ?)', [survey_id, user, option])
database.commit()
except Exception, e:
app.logger.debug(e)
return('DB ERROR')
finally:
database.close()
return "Hi %s, you voted the survey %s with option %s." % (user, survey_id, option)
def myvote (user, action_text):
"""
Show my vote for a survey
"""
survey_id = ""
option = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split(' ')[0]
app.logger.debug("%s %s" % (user, survey_id))
except Exception, e:
return('Parameters ERROR - Example to show your vote survey #2: `/survey myreply 2`')
try:
database = db.get_db()
cur = database.execute('select option from vote where survey_id = ? and user = ?', [survey_id, user])
option = cur.fetchone()
except Exception, e:
app.logger.debug(e)
return('DB ERROR')
finally:
database.close()
if option is None:
return "Hi %s, you didn't vote the survey %s." % (user, survey_id)
else:
return "Hi %s, you voted the survey %s with option %s." % (user, survey_id, option[0][0])
def cancelsurvey(author, action_text):
"""
Cancel a survey
"""
survey_id = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split()[0]
app.logger.debug("%s %s " % (author, survey_id))
except Exception, e:
return('Parameters ERROR - Example to cancel survey #2 : `/survey cancel 2`')
try:
database = db.get_db()
cur = database.execute('select id, question, author from survey where id = ? and author = ?', [survey_id, author])
survey = cur.fetchone()
if survey is None:
return "No no no no no %s, the survey %s is not yours :confused: " % (author, survey_id)
database.execute('delete from survey where id = ? and author = ?', [survey_id, author])
database.commit()
except Exception, e:
return('DB ERROR')
finally:
database.close()
return "Hi %s, the survey %s has been canceled." % (author, survey_id)
def createsurvey(author, action_text):
"""
Create a new survey
"""
question = ""
options = ""
warning_msg = ""
try:
parameters = action_text.split(' ',1)[1]
question = parameters.split('options')[0]
options = parameters.split('options')[1]
if not ',' in options:
warning_msg = ":warning: There are limited options"
app.logger.debug("%s %s %s " % (author, question, options))
except Exception, e:
return('Parameters ERROR - Example to create new survey: `/survey this What colour is your favorite? options red, gree, blue`')
try:
database = db.get_db()
database.execute('insert into survey (question, author, options) values (?, ?, ?)', [question, author, options])
database.commit()
except Exception, e:
return('DB ERROR')
finally:
database.close()
return "Hi %s, the survey %s has been created. %s" % (author, question, warning_msg)
def listsurveys(user_name, action_text):
"""
List the surveys
"""
database = db.get_db()
cur = database.execute('SELECT id, question, author, options FROM survey')
surveys = cur.fetchall()
count_surveys = len(surveys)
list_msg = "Hi %s, there are %i surveys. \n This is the list:" % (user_name, count_surveys)
for row in surveys:
list_msg = list_msg + "\n :small_blue_diamond: *%s* %s *%s* options are: [%s]" % (row[0], row[2], row[1], row[3])
return list_msg
def showresults(user_name, action_text):
"""
Show results of a survey
"""
survey_id = ""
try:
parameters = action_text.split(' ',1)[1]
survey_id = parameters.split()[0]
app.logger.debug("%s %s " % (user_name, survey_id))
except Exception, e:
return('Parameters ERROR - Example to show results for survey #2 : `/survey show 2`')
database = db.get_db()
cur = database.execute('SELECT s.id, s.question, s.author, s.options, v.option, count(v.option) as count FROM survey s JOIN vote v ON s.id = v.survey_id and s.id = ? GROUP BY v.option ORDER BY count DESC' , [survey_id])
try:
first_row = cur.next()
results = [first_row] + cur.fetchall()
print first_row
print results
list_msg = "Hi %s. \n Survey %s Question: %s \n Author [%s] \n Options are (%s) \n Results:" % (user_name, survey_id, results[0][1], results[0][2], results[0][3])
for row in results:
list_msg = list_msg + "\n :small_blue_diamond: Option *%s* = %s votes" % (row[4], row[5])
return list_msg
except Exception, e:
app.logger.debug(e)
return "Hi %s. There are no votes for survey %s" % (user_name, survey_id)
|
from os import path
from fabric import api as fab
from fabric.contrib.project import rsync_project as rsync_project
fab.env.shell = "/bin/csh -c"
def _git_base():
return fab.local('git rev-parse --show-toplevel', capture=True)
def _default_vars():
import ansible
pb = fab.env.server.get_playbook('../../appserver.yml')
play = ansible.playbook.Play(pb, pb.playbook[0], pb.play_basedirs[0])
return play.default_vars
def _rsync_project(*args, **kwargs):
additional_args = []
ssh_info = fab.env.server.init_ssh_key()
for key in ssh_info:
if key[0].isupper():
additional_args.append('-o')
additional_args.append('%s="%s"' % (key, ssh_info[key].replace('"', '\"')))
kwargs['ssh_opts'] = '%s %s' % (kwargs.get('ssh_opts', ''), ' '.join(additional_args))
rsync_project(*args, **kwargs)
def _checkout_git():
with fab.lcd(_git_base()):
# clean the workdir
fab.local('rm -rf workdir/*')
# check out clean copy of the local git repo
fab.local('git checkout-index -a -f --prefix=%s/deployment/freebsd/workdir/' % _git_base())
def _upload_application():
git_base = _git_base()
default_vars = _default_vars()
with fab.lcd(git_base):
with fab.settings(fab.hide('running')):
# upload the whole project w/o deleting
_rsync_project(remote_dir=default_vars['apphome'],
local_dir="%s/deployment/freebsd/workdir/application/" % git_base,
delete=False)
# upload the source with deleting
_rsync_project(remote_dir='%s/briefkasten' % default_vars['apphome'],
local_dir="%s/deployment/freebsd/workdir/application/briefkasten/" % _git_base(),
delete=True)
fab.run('chown -R %s %s' % (default_vars['appuser'], default_vars['apphome']))
def upload_application():
"""upload and/or update the application with the current git state """
_checkout_git()
_upload_application()
def _upload_theme():
with fab.lcd(_git_base()):
with fab.settings(fab.hide('running')):
local_theme_path = path.join(fab.env['config_base'], fab.env.server.config['local_theme_path'])
remote_theme_path = '%s/themes/%s' % (_default_vars()['apphome'], fab.env.server.config['theme_name'])
_rsync_project(remote_dir=remote_theme_path,
local_dir=local_theme_path,
delete=True)
def upload_theme():
""" upload and/or update the theme with the current git state"""
_checkout_git()
_upload_theme()
def upload_editor_keys():
""" upload and/or update the PGP keys for editors, import them into PGP"""
default_vars = _default_vars()
appuser = default_vars['appuser']
with fab.settings(fab.hide('running')):
local_key_path = path.join(fab.env['config_base'], fab.env.server.config['local_pgpkey_path'])
remote_key_path = '%s/var/pgp_pubkeys/' % _default_vars()['apphome']
_rsync_project(remote_dir=remote_key_path, local_dir=local_key_path, delete=True)
fab.run('chown -R %s %s' % (appuser, remote_key_path))
with fab.prefix("setenv GNUPGHOME %s" % remote_key_path):
fab.sudo('''gpg --import %s/*.gpg''' % remote_key_path,
user=appuser, shell_escape=False)
# - name: run buildout (this *will* take quite a while... be patient)
# command: gmake deployment chdir={{apphome}}
# sudo_user: "{{appuser}}"
# notify: restart supervisord
def upload_project():
""" upload the entire project, including theme, application, pgg keys etc. with the current git state"""
_checkout_git()
_upload_application()
_upload_theme()
upload_editor_keys()
run buildout
from os import path
from fabric import api as fab
from fabric.contrib.project import rsync_project as rsync_project
fab.env.shell = "/bin/csh -c"
def _git_base():
return fab.local('git rev-parse --show-toplevel', capture=True)
def _default_vars():
import ansible
pb = fab.env.server.get_playbook('../../appserver.yml')
play = ansible.playbook.Play(pb, pb.playbook[0], pb.play_basedirs[0])
return play.default_vars
def _rsync_project(*args, **kwargs):
additional_args = []
ssh_info = fab.env.server.init_ssh_key()
for key in ssh_info:
if key[0].isupper():
additional_args.append('-o')
additional_args.append('%s="%s"' % (key, ssh_info[key].replace('"', '\"')))
kwargs['ssh_opts'] = '%s %s' % (kwargs.get('ssh_opts', ''), ' '.join(additional_args))
rsync_project(*args, **kwargs)
def _checkout_git():
with fab.lcd(_git_base()):
# clean the workdir
fab.local('rm -rf workdir/*')
# check out clean copy of the local git repo
fab.local('git checkout-index -a -f --prefix=%s/deployment/freebsd/workdir/' % _git_base())
def _upload_application():
git_base = _git_base()
default_vars = _default_vars()
with fab.lcd(git_base):
with fab.settings(fab.hide('running')):
# upload the whole project w/o deleting
_rsync_project(remote_dir=default_vars['apphome'],
local_dir="%s/deployment/freebsd/workdir/application/" % git_base,
delete=False)
# upload the source with deleting
_rsync_project(remote_dir='%s/briefkasten' % default_vars['apphome'],
local_dir="%s/deployment/freebsd/workdir/application/briefkasten/" % _git_base(),
delete=True)
fab.run('chown -R %s %s' % (default_vars['appuser'], default_vars['apphome']))
def upload_application():
"""upload and/or update the application with the current git state """
_checkout_git()
_upload_application()
def _upload_theme():
with fab.lcd(_git_base()):
with fab.settings(fab.hide('running')):
local_theme_path = path.join(fab.env['config_base'], fab.env.server.config['local_theme_path'])
remote_theme_path = '%s/themes/%s' % (_default_vars()['apphome'], fab.env.server.config['theme_name'])
_rsync_project(remote_dir=remote_theme_path,
local_dir=local_theme_path,
delete=True)
def upload_theme():
""" upload and/or update the theme with the current git state"""
_checkout_git()
_upload_theme()
def upload_editor_keys():
""" upload and/or update the PGP keys for editors, import them into PGP"""
default_vars = _default_vars()
appuser = default_vars['appuser']
with fab.settings(fab.hide('running')):
local_key_path = path.join(fab.env['config_base'], fab.env.server.config['local_pgpkey_path'])
remote_key_path = '%s/var/pgp_pubkeys/' % _default_vars()['apphome']
_rsync_project(remote_dir=remote_key_path, local_dir=local_key_path, delete=True)
fab.run('chown -R %s %s' % (appuser, remote_key_path))
with fab.prefix("setenv GNUPGHOME %s" % remote_key_path):
fab.sudo('''gpg --import %s/*.gpg''' % remote_key_path,
user=appuser, shell_escape=False)
def run_buildout():
default_vars = _default_vars()
with fab.cd(default_vars['apphome']):
fab.sudo('gmake deployment', user=default_vars['appuser'])
# notify: restart supervisord
def upload_project():
""" upload, bootstrap and start the entire project"""
_checkout_git()
_upload_application()
_upload_theme()
upload_editor_keys()
run_buildout()
|
import re
import sys
import shutil
import os.path
import subprocess
import reindent
import untabify
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files():
"""Get the list of changed or added files from the VCS."""
if os.path.isdir('.hg'):
vcs = 'hg'
cmd = 'hg status --added --modified --no-status'
elif os.path.isdir('.svn'):
vcs = 'svn'
cmd = 'svn status --quiet --non-interactive --ignore-externals'
else:
sys.exit('need a checkout to get modified files')
st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
try:
st.wait()
if vcs == 'hg':
return [x.decode().rstrip() for x in st.stdout]
else:
output = (x.decode().rstrip().rsplit(None, 1)[-1]
for x in st.stdout if x[0] in b'AM')
return set(path for path in output if os.path.isfile(path))
finally:
st.stdout.close()
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = []
for path in (x for x in file_paths if x.endswith('.py')):
if reindent.check(path):
fixed.append(path)
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
with open(path, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(path, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
try:
with open(path, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(path, path + '.bak')
with open(path, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return 'Misc/ACKS' in file_paths
@status("Misc/NEWS updated", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
return 'Misc/NEWS' in file_paths
def main():
file_paths = changed_files()
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc')]
special_files = {'Misc/ACKS', 'Misc/NEWS'} & set(file_paths)
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(special_files)
# Misc/NEWS changed.
reported_news(special_files)
# Test suite run and passed.
print()
print("Did you run the test suite?")
if __name__ == '__main__':
main()
remove svn support
import re
import sys
import shutil
import os.path
import subprocess
import reindent
import untabify
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files():
"""Get the list of changed or added files from the VCS."""
if os.path.isdir('.hg'):
vcs = 'hg'
cmd = 'hg status --added --modified --no-status'
else:
sys.exit('need a checkout to get modified files')
st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
try:
st.wait()
if vcs == 'hg':
return [x.decode().rstrip() for x in st.stdout]
else:
output = (x.decode().rstrip().rsplit(None, 1)[-1]
for x in st.stdout if x[0] in b'AM')
return set(path for path in output if os.path.isfile(path))
finally:
st.stdout.close()
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = []
for path in (x for x in file_paths if x.endswith('.py')):
if reindent.check(path):
fixed.append(path)
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
with open(path, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(path, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
try:
with open(path, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(path, path + '.bak')
with open(path, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return 'Misc/ACKS' in file_paths
@status("Misc/NEWS updated", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
return 'Misc/NEWS' in file_paths
def main():
file_paths = changed_files()
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc')]
special_files = {'Misc/ACKS', 'Misc/NEWS'} & set(file_paths)
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(special_files)
# Misc/NEWS changed.
reported_news(special_files)
# Test suite run and passed.
print()
print("Did you run the test suite?")
if __name__ == '__main__':
main()
|
Test person data building for Django
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Logging infrastructure."""
from copy import deepcopy
from enum import Flag, auto
from itertools import count
from functools import reduce
from hoomd.util import dict_map, _SafeNamespaceDict
from hoomd.error import DataAccessError
from collections.abc import Sequence
class LoggerCategories(Flag):
"""Enum that marks all accepted logger types.
This class does not need to be used by users directly. We directly convert
from strings to the enum wherever necessary in the API. This class is
documented to show users what types of quantities can be logged, and what
categories to use for limiting what data is logged, user specified logged
quantities, and custom actions (`hoomd.custom.Action`).
Flags:
scalar: `float` or `int` objects (i.e. numbers)
sequence: sequence (e.g. `list`, `tuple`, `numpy.ndarray`) of numbers of
the same type.
string: a single Python `str` object
strings: a sequence of Python `str` objects
object: any Python object outside a sequence, string, or scalar.
angle: per-angle quantity
bond: per-bond quantity
constraint: per-constraint quantity
dihedral: per-dihedral quantity
improper: per-improper quantity
pair: per-pair quantity
particle: per-particle quantity
state: internal category for specifying object's internal state
ALL: a combination of all other categories
NONE: represents no category
"""
NONE = 0
scalar = auto()
sequence = auto()
string = auto()
strings = auto()
object = auto()
angle = auto()
bond = auto()
constraint = auto()
dihedral = auto()
improper = auto()
pair = auto()
particle = auto()
state = auto()
@classmethod
def any(cls, categories=None):
"""Return a LoggerCategories enum representing any of the categories.
Args:
categories (list[str] or list[`LoggerCategories`]):
A list of `str` or `LoggerCategories` objects that should be
represented by the returned `LoggerCategories` object.
Returns:
`LoggerCategories`: the `LoggerCategories` object that represents
any of the given categories.
"""
categories = cls.__members__.values(
) if categories is None else categories
return reduce(cls._combine_flags, categories, LoggerCategories.NONE)
@classmethod
def _combine_flags(cls, flag1, flag2):
return cls._from_str(flag1) | cls._from_str(flag2)
@classmethod
def _from_str(cls, category):
if isinstance(category, str):
return cls[category]
else:
return category
@classmethod
def _get_string_list(cls, category):
return [mem.name for mem in cls.__members__.values() if mem in category]
LoggerCategories.ALL = LoggerCategories.any()
# function defined here to ensure that each class of type Loggable will have a
# loggables property
def _loggables(self):
"""dict[str, str]: Name, category mapping of loggable quantities."""
return {
name: quantity.category.name
for name, quantity in self._export_dict.items()
}
class _LoggableEntry:
"""Stores entries for _Loggable's store of a class's loggable quantities."""
def __init__(self, category, default):
self.category = category
self.default = default
class _NamespaceFilter:
"""Filter for creating the proper namespace for logging object properties.
Attributes:
remove_names (set[str]): A set of names which to remove for the logging
namespace whenever encountered.
base_names (set[str]): A set of names which indicate that the next
encountered name in the string should be skipped. For example, if a
module hierarchy went like ``project.foo.bar.Bar`` and ``foo``
directly imports ``Bar``, ``bar`` may not be desirable to have in
the logging namespace since users interact with it via ``foo.Bar``.
Currently, this only handles a single level of nesting like this.
skip_duplicates (bool, optional): Whether or not to remove consecutive
duplicates from a logging namespace (e.g. ``foo.foo.bar`` ->
``foo.bar``), default ``True``. By default we assume that this
pattern means that the inner module is imported into its parent.
"""
def __init__(self,
remove_names=None,
base_names=None,
skip_duplicates=True):
self.remove_names = set() if remove_names is None else remove_names
self.base_names = set() if base_names is None else base_names
self._skip_next = False
self.skip_duplicates = skip_duplicates
if skip_duplicates:
self._last_name = None
def __call__(self, namespace):
for name in namespace:
# check for duplicates in the namespace and remove them (e.g.
# `md.pair.pair.LJ` -> `md.pair.LJ`).
if self.skip_duplicates:
last_name = self._last_name
self._last_name = name
if last_name == name:
continue
if name in self.remove_names:
continue
elif self._skip_next:
self._skip_next = False
continue
elif name in self.base_names:
self._skip_next = True
yield name
# Reset for next call of filter
self._skip_next = False
class _LoggerQuantity:
"""The information to automatically log to a `hoomd.logging.Logger`.
Args:
name (str): The name of the quantity.
cls (``class object``): The class that the quantity comes from.
category (str or LoggerCategories, optional): The type of quantity.
Valid values are given in the `hoomd.logging.LoggerCategories`
documentation.
Note:
For users, this class is meant to be used in conjunction with
`hoomd.custom.Action` for exposing loggable quantities for custom user
actions.
"""
namespace_filter = _NamespaceFilter(
# Names that are imported directly into the hoomd namespace
remove_names={'simulation', 'state', 'operations', 'snapshot'},
# Names that have their submodules' classes directly imported into them
# (e.g. `hoomd.update.box_resize.BoxResize` gets used as
# `hoomd.update.BoxResize`)
base_names={'update', 'tune', 'write'},
skip_duplicates=True)
def __init__(self, name, cls, category='scalar', default=True):
self.name = name
self.update_cls(cls)
if isinstance(category, str):
self.category = LoggerCategories[category]
elif isinstance(category, LoggerCategories):
self.category = category
else:
raise ValueError("Flag must be a string convertable into "
"LoggerCategories or a LoggerCategories object.")
self.default = bool(default)
def yield_names(self, user_name=None):
"""Infinitely yield potential namespaces.
Used to ensure that all namespaces are unique for a
`hoomd.logging.Logger` object. We simple increment a number at the end
until the caller stops asking for another namespace.
Yields:
tuple[str]: A potential namespace for the object.
"""
if user_name is None:
namespace = self.namespace
else:
namespace = self.namespace[:-1] + (user_name,)
yield namespace + (self.name,)
for i in count(start=1, step=1):
yield namespace[:-1] + (namespace[-1] + '_' + str(i), self.name)
def update_cls(self, cls):
"""Allow updating the class/namespace of the object.
Since the namespace is determined by the passed class's module and class
name, if inheritanting `hoomd.logging._LoggerQuantity`, the class needs
to be updated to the subclass.
Args:
cls (``class object``): The class to update the namespace with.
"""
self.namespace = self._generate_namespace(cls)
return self
@classmethod
def _generate_namespace(cls, loggable_cls):
"""Generate the namespace of a class given its module hierarchy."""
ns = tuple(loggable_cls.__module__.split('.'))
cls_name = loggable_cls.__name__
# Only filter namespaces of objects in the hoomd package
if ns[0] == 'hoomd':
return tuple(cls.namespace_filter(ns[1:])) + (cls_name,)
else:
return ns + (cls_name,)
class Loggable(type):
"""Loggable quantity."""
_meta_export_dict = dict()
def __init__(cls, name, bases, dct):
"""Adds marked quantities for logging in new class.
Also adds a loggables property that returns a mapping of loggable
quantity names with the string category. We overwrite __init__ instead
of __new__ since this plays much more nicely with inheritance. This
allows, for instance, `Loggable` to be subclassed with metaclasses that
use __new__ without having to hack the subclass's behavior.
"""
# grab loggable quantities through class inheritance.
log_dict = Loggable._get_inherited_loggables(cls)
# Add property to get all available loggable quantities. We ensure that
# we haven't already added a loggables property first. The empty dict
# check is for improved speed while the not any checking of subclasses
# allows for certainty that a previous class of type Loggable (or one
# of its subclasses) did not already add that property. This is not
# necessary, but allows us to check that an user or developer didn't
# accidentally create a loggables method, attribute, or property
# already. We can speed this up by just removing the check and
# overwriting the property every time, but lose the ability to error on
# improper class definitions.
if log_dict == {} and not any(
issubclass(type(c), Loggable) for c in cls.__mro__[1:]):
Loggable._add_property_for_displaying_loggables(cls)
# grab the current class's loggable quantities
log_dict.update(Loggable._get_current_cls_loggables(cls))
cls._export_dict = log_dict
Loggable._meta_export_dict = dict()
@staticmethod
def _add_property_for_displaying_loggables(new_cls):
if hasattr(new_cls, 'loggables'):
raise ValueError("classes of type Loggable cannot implement a "
"loggables method, property, or attribute.")
else:
new_cls.loggables = property(_loggables)
@classmethod
def _get_inherited_loggables(cls, new_cls):
"""Get loggable quantities from new class's __mro__."""
# We reverse the mro list to ensure that if a conflict in names exist we
# take the one with the most priority in the mro. Also track if any
# parent classes also have Loggable as a metaclass. This allows us to
# know if we should error if a loggables method is defined. We also
# skip the first entry since that is the new_cls itself.
inherited_loggables = dict()
for base_cls in reversed(new_cls.__mro__[1:]):
# The conditional checks if the type of one of the parent classes of
# new_cls has a metaclass (or type) which is a subclass of Loggable
# or one of its subclasses.
if issubclass(type(base_cls), Loggable):
inherited_loggables.update({
name: deepcopy(quantity).update_cls(new_cls)
for name, quantity in base_cls._export_dict.items()
})
return inherited_loggables
@classmethod
def _get_current_cls_loggables(cls, new_cls):
"""Gets the current class's new loggables (not inherited)."""
current_loggables = {}
for name, entry in cls._meta_export_dict.items():
current_loggables[name] = _LoggerQuantity(name, new_cls,
entry.category,
entry.default)
cls._add_loggable_docstring_info(new_cls, name, entry.category,
entry.default)
return current_loggables
@classmethod
def _add_loggable_docstring_info(cls, new_cls, attr, category, default):
doc = getattr(new_cls, attr).__doc__
# Don't add documentation to empty docstrings. This means that the
# quantity is not documented would needs to be fixed, but this prevents
# the rendering of invalid docs since we need a non-empty docstring.
if __doc__ == "":
return
str_msg = '\n\n{}(`Loggable <hoomd.logging.Logger>`: '
str_msg += f'category="{str(category)[17:]}"'
if default:
str_msg += ')'
else:
str_msg += ', default=False)'
if doc is None:
getattr(new_cls, attr).__doc__ = str_msg.format('')
else:
indent = 0
lines = doc.split('\n')
if len(lines) >= 3:
cnt = 2
while lines[cnt] == '':
cnt += 1
indent = len(lines[cnt]) - len(lines[cnt].lstrip())
getattr(new_cls, attr).__doc__ += str_msg.format(' ' * indent)
def log(func=None,
*,
is_property=True,
category='scalar',
default=True,
requires_attach=False):
"""Creates loggable quantities for classes of type Loggable.
For users this should be used with `hoomd.custom.Action` for exposing
loggable quantities from a custom action.
Args:
func (`method`): class method to make loggable. If using non-default
arguments, func should not be set.
is_property (`bool`, optional): Whether to make the method a
property, defaults to True. Argument keyword only
category (`str`, optional): The string represention of the type of
loggable quantity, defaults to 'scalar'. See
`hoomd.logging.LoggerCategories` for available types. Argument
keyword only
default (`bool`, optional): Whether the quantity should be logged
by default, defaults to True. This is orthogonal to the loggable
quantity's type. An example would be performance orientated
loggable quantities. Many users may not want to log such
quantities even when logging other quantities of that type. The
default category allows for these to be pass over by
`hoomd.logging.Logger` objects by default. Argument keyword only.
requires_attach (`bool`, optional): Whether this property is
accessible before attaching.
Note:
The namespace (where the loggable object is stored in the
`hoomd.logging.Logger` object's nested dictionary, is determined by
the module/script and class name the loggable class comes from. In
creating subclasses of `hoomd.custom.Action`, for instance, if the
module the subclass is defined in is ``user.custom.action`` and the
class name is ``Foo`` then the namespace used will be ``('user',
'custom', 'action', 'Foo')``. This helps to prevent naming conflicts,
and automate the logging specification for developers and users.
"""
def helper(func):
name = func.__name__
if name in Loggable._meta_export_dict:
raise KeyError(
"Multiple loggable quantities named {}.".format(name))
Loggable._meta_export_dict[name] = _LoggableEntry(
LoggerCategories[category], default)
if requires_attach:
def wrapped_with_exception(self, *args, **kwargs):
if not self._attached:
raise DataAccessError(name)
return func(*args, **kwargs)
func = wrapped_with_exception
if is_property:
return property(func)
else:
return func
if func is None:
return helper
else:
return helper(func)
class _LoggerEntry:
"""Stores the information for an entry in a `hoomd.logging.Logger`.
The class deals with the logic of converting `tuple` and
`hoomd.logging._LoggerQuantity` objects into an object that can obtain the
actually log value when called.
Note:
This class could perform verification of the logged quantities. It
currently doesn't for performance reasons; this can be changed to give
greater security with regards to user specified quantities.
"""
def __init__(self, obj, attr, category):
self.obj = obj
self.attr = attr
self.category = category
@classmethod
def from_logger_quantity(cls, obj, logger_quantity):
return cls(obj, logger_quantity.name, logger_quantity.category)
@classmethod
def from_tuple(cls, entry):
err_msg = "Expected either (callable, category) or \
(obj, method/property, category)."
if (not isinstance(entry, Sequence) or len(entry) <= 1
or len(entry) > 3):
raise ValueError(err_msg)
# Get the method and category from the passed entry. Also perform some
# basic validation.
if len(entry) == 2:
if not callable(entry[0]):
raise ValueError(err_msg)
category = entry[1]
method = '__call__'
elif len(entry) == 3:
if not isinstance(entry[1], str):
raise ValueError(err_msg)
method = entry[1]
if not hasattr(entry[0], method):
raise ValueError(
"Provided method/property must exist in given object.")
category = entry[2]
# Ensure category is valid and converted to LoggerCategories enum.
if isinstance(category, str):
category = LoggerCategories[category]
elif not isinstance(category, LoggerCategories):
raise ValueError(
"category must be a string or hoomd.logging.LoggerCategories "
"object.")
return cls(entry[0], method, category)
def __call__(self):
try:
attr = getattr(self.obj, self.attr)
except DataAccessError:
attr = None
if self.category is LoggerCategories.state:
return attr
if callable(attr):
return (attr(), self.category.name)
else:
return (attr, self.category.name)
def __eq__(self, other):
return (self.obj == other.obj and self.attr == other.attr
and self.category == other.category)
return all(
getattr(self, attr) == getattr(other, attr)
for attr in ['obj', 'attr', 'category'])
class Logger(_SafeNamespaceDict):
"""Logs HOOMD-blue operation data and custom quantities.
The `Logger` class provides an intermediary between a back end such as the
`hoomd.write.Table` and many of HOOMD-blue's object (as most objects are
loggable). The `Logger` class makes use of *namespaces* which denote where a
logged quantity fits in. For example internally all loggable quantities are
ordered by the module and class them come from. For instance, the
`hoomd.md.pair.LJ` class has a namespace ``('md', 'pair', 'LJ')``. This
applies to all loggable internal objects in HOOMD-blue. This ensures that
logged quantities remain unambigious. To add a loggable object's quantities
two methods exist `Logger.add` and the ``+=`` operator. Here we show an
example using the ``+=`` operator.
Example:
.. code-block:: python
logger = hoomd.logging.Logger()
lj = md.pair.lj(nlist)
# Log all default quantities of the lj object
logger += lj
logger = hoomd.logging.Logger(categories=['scalar'])
# Log all default scalar quantities of the lj object
logger += lj
The `Logger` class also supports user specified quantities using namespaces
as well.
Example:
.. code-block:: python
logger = hoomd.logging.Logger()
# Add quantity to ('custom', 'name') namespace
logger[('custom', 'name')] = (lambda: 42, 'scalar')
# Add quantity to ('custom_name',) namespace
logger[('custom_name',)] = (lambda: 43, 'scalar')
`Logger` objects support two ways of discriminating what loggable quantities
they will accept: ``categories`` and ``only_default`` (the constructor
arguments). Both of these are static meaning that once instantiated a
`Logger` object will not change the values of these two properties.
``categories`` determines what if any types of loggable quantities (see
`hoomd.logging.LoggerCategories`) are appropriate for a given `Logger`
object. This helps logging back ends determine if a `Logger` object is
compatible. The ``only_default`` flag is mainly a convenience by allowing
quantities not commonly logged (but available) to be passed over unless
explicitly asked for. You can override the ``only_default`` flag by
explicitly listing the quantities you want in `Logger.add`, but the same is
not true with regards to ``categories``.
Note:
The logger provides a way for users to create their own logger back ends
if they wish. In making a custom logger back end, understanding the
intermediate representation is key. To get an introduction see
`hoomd.logging.Logger.log`. To understand the various categories
available to specify logged quantities, see
`hoomd.logging.LoggerCategories`. To integrate with `hoomd.Operations`
the back end should be a subclass of `hoomd.custom.Action` and used with
`hoomd.writer.CustomWriter`.
Note:
When logging multiple instances of the same class `Logger.add` provides
a means of specifying the class level of the namespace (e.g. ``'LJ`` in
``('md', 'pair', 'LJ')``). The default behavior (without specifying a
user name) is to just append ``_{num}`` where ``num`` is the smallest
positive integer which makes the full namespace unique. This appending
will also occur for user specified names that are reused.
Args:
categories (`list` of `str`, optional): A list of string categories
(list of categories can be found in
`hoomd.logging.LoggerCategories`).
These are the only types of loggable quantities that can be logged
by this logger. Defaults to allowing every type.
only_default (`bool`, optional): Whether to log only quantities that are
logged by "default", defaults to ``True``. This mostly means that
performance centric loggable quantities will be passed over when
logging when false.
"""
def __init__(self, categories=None, only_default=True):
self._categories = LoggerCategories.ALL if categories is None else \
LoggerCategories.any(categories)
self._only_default = only_default
super().__init__()
@property
def categories(self):
"""`hoomd.logging.LoggerCategories`: The enum representing the \
acceptable categories for the `Logger` object."""
return self._categories
@property
def string_categories(self):
"""`list` of `str`: A list of the string names of the allowed \
categories for logging."""
return LoggerCategories._get_string_list(self._categories)
@property
def only_default(self):
"""`bool`: Whether the logger object should only grab default loggable \
quantities."""
return self._only_default
def _filter_quantities(self, quantities):
for quantity in quantities:
if self._only_default and not quantity.default:
continue
elif quantity.category in self._categories:
yield quantity
def _get_loggables_by_name(self, obj, quantities):
if quantities is None:
yield from self._filter_quantities(obj._export_dict.values())
else:
quantities = self._wrap_quantity(quantities)
bad_keys = [q for q in quantities if q not in obj._export_dict]
# ensure all keys are valid
if bad_keys != []:
raise ValueError(
"object {} has not loggable quantities {}.".format(
obj, bad_keys))
yield from self._filter_quantities(
map(lambda q: obj._export_dict[q], quantities))
def add(self, obj, quantities=None, user_name=None):
"""Add loggables from obj to logger.
Args:
obj (object of class of type ``Loggable``): class of type loggable
to add loggable quantities from.
quantities (Sequence[str]): list of str names of quantities to log.
user_name (`str`, optional): A string to replace the class name in
the loggable quantities namespace. This allows for easier
differentiation in the output of the `Logger` and any `Writer`
which outputs its data.
Returns:
list[tuple[str]]: A list of namespaces that were
added to the logger.
"""
for quantity in self._get_loggables_by_name(obj, quantities):
self._add_single_quantity(obj, quantity, user_name)
def remove(self, obj=None, quantities=None, user_name=None):
"""Remove specified quantities from the logger.
Args:
obj (object of class of type ``Loggable``, optional): Object to
remove quantities from. If ``quantities`` is None, ``obj`` must
be set. If ``obj`` is set and ``quantities`` is None, all logged
quantities from ``obj`` will be removed from the logger.
quantities (Sequence[tuple]): a sequence of namespaces to remove
from the logger. If specified with ``obj`` only remove
quantities listed that are exposed from ``obj``. If ``obj`` is
None, then ``quantities`` must be given.
user_name (str): A user name to specify the final entry in the
namespace of the object. This must be used in ``user_name`` was
specified in `Logger.add`.
"""
if obj is None and quantities is None:
raise ValueError(
"Either obj, quantities, or both must be specified.")
if obj is None:
for quantity in self._wrap_quantity(quantities):
if quantity in self:
del self[quantity]
else:
for quantity in self._get_loggables_by_name(obj, quantities):
# Check all currently used namespaces for object's quantities.
for namespace in quantity.yield_names(user_name):
if namespace in self:
if self._contains_obj(namespace, obj):
del self[namespace]
# We deterministically go through namespaces, so once a
# namespace is not in the logger, than we can be sure no
# further ones will be as well and break.
else:
break
def _add_single_quantity(self, obj, quantity, user_name):
"""Add to first available namespace if obj is not logged."""
for namespace in quantity.yield_names(user_name):
if namespace in self:
# Check if the quantity is already logged by the same object
if self._contains_obj(namespace, obj):
return None
else:
self[namespace] = _LoggerEntry.from_logger_quantity(
obj, quantity)
return None
def __setitem__(self, namespace, value):
"""Allows user specified loggable quantities.
Args:
namespace (tuple[str,] or str): key or nested key to determine where
to store logged quantity.
value (tuple[Callable, str] or tuple[object, str, str]): Either a
tuple with a callable and the `hoomd.logging.LoggerCategories`
object or associated string or a object with a method/property
name and category. If using a method it should not take
arguments or have defaults for all arguments.
"""
if isinstance(value, _LoggerEntry):
super().__setitem__(namespace, value)
else:
super().__setitem__(namespace, _LoggerEntry.from_tuple(value))
def __iadd__(self, obj):
"""Add quantities from object or list of objects to logger.
Adds all quantities compatible with given categories and default value.
Examples:
.. code-block:: python
logger += lj
logger += [lj, harmonic_bonds]
"""
if hasattr(obj, '__iter__'):
for o in obj:
self.add(o)
else:
self.add(obj)
return self
def __isub__(self, value):
"""Remove log entries for a list of quantities or objects.
Examples:
.. code-block:: python
logger -= ('md', 'pair', 'lj')
logger -= [('md', 'pair', 'lj', 'energy'),
('md', 'pair', 'lj', 'forces')]
logger -= lj
logger -= [lj, harmonic_bonds]
"""
if isinstance(value, str) or isinstance(value, tuple):
self.remove(quantities=value)
elif hasattr(value, '__iter__'):
for v in value:
self.__isub__(v)
else:
self.remove(obj=value)
return self
def log(self):
"""Get a nested dictionary of the current values for logged quantities.
The nested dictionary consist of one level for each element of a
namespace. The logged value and category for the namespace ``('example',
'namespace')`` would be accessible in the returned dictionary via
``logger.log()['example']['namespace']``.
Returns:
dict: A nested dictionary of the current logged quantities. The end
values are (value, category) pairs which hold the value along
with its associated `hoomd.logging.LoggerCategories` category
represented as a string (to get the
`hoomd.logging.LoggerCategories` enum value use
``LoggerCategories[category]``.
"""
return dict_map(self._dict, lambda x: x())
def _contains_obj(self, namespace, obj):
"""Evaluates based on identity."""
return self._unsafe_getitem(namespace).obj is obj
@staticmethod
def _wrap_quantity(quantity):
"""Handles wrapping strings and tuples for iterating over namespaces."""
if isinstance(quantity, (str, tuple)):
return [quantity]
else:
return quantity
def __eq__(self, other):
"""Check for equality."""
if not isinstance(other, type(self)):
return NotImplemented
return (self.categories == other.categories
and self.only_default == other.only_default
and self._dict == other._dict)
change requires_attach description
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Logging infrastructure."""
from copy import deepcopy
from enum import Flag, auto
from itertools import count
from functools import reduce
from hoomd.util import dict_map, _SafeNamespaceDict
from hoomd.error import DataAccessError
from collections.abc import Sequence
class LoggerCategories(Flag):
"""Enum that marks all accepted logger types.
This class does not need to be used by users directly. We directly convert
from strings to the enum wherever necessary in the API. This class is
documented to show users what types of quantities can be logged, and what
categories to use for limiting what data is logged, user specified logged
quantities, and custom actions (`hoomd.custom.Action`).
Flags:
scalar: `float` or `int` objects (i.e. numbers)
sequence: sequence (e.g. `list`, `tuple`, `numpy.ndarray`) of numbers of
the same type.
string: a single Python `str` object
strings: a sequence of Python `str` objects
object: any Python object outside a sequence, string, or scalar.
angle: per-angle quantity
bond: per-bond quantity
constraint: per-constraint quantity
dihedral: per-dihedral quantity
improper: per-improper quantity
pair: per-pair quantity
particle: per-particle quantity
state: internal category for specifying object's internal state
ALL: a combination of all other categories
NONE: represents no category
"""
NONE = 0
scalar = auto()
sequence = auto()
string = auto()
strings = auto()
object = auto()
angle = auto()
bond = auto()
constraint = auto()
dihedral = auto()
improper = auto()
pair = auto()
particle = auto()
state = auto()
@classmethod
def any(cls, categories=None):
"""Return a LoggerCategories enum representing any of the categories.
Args:
categories (list[str] or list[`LoggerCategories`]):
A list of `str` or `LoggerCategories` objects that should be
represented by the returned `LoggerCategories` object.
Returns:
`LoggerCategories`: the `LoggerCategories` object that represents
any of the given categories.
"""
categories = cls.__members__.values(
) if categories is None else categories
return reduce(cls._combine_flags, categories, LoggerCategories.NONE)
@classmethod
def _combine_flags(cls, flag1, flag2):
return cls._from_str(flag1) | cls._from_str(flag2)
@classmethod
def _from_str(cls, category):
if isinstance(category, str):
return cls[category]
else:
return category
@classmethod
def _get_string_list(cls, category):
return [mem.name for mem in cls.__members__.values() if mem in category]
LoggerCategories.ALL = LoggerCategories.any()
# function defined here to ensure that each class of type Loggable will have a
# loggables property
def _loggables(self):
"""dict[str, str]: Name, category mapping of loggable quantities."""
return {
name: quantity.category.name
for name, quantity in self._export_dict.items()
}
class _LoggableEntry:
"""Stores entries for _Loggable's store of a class's loggable quantities."""
def __init__(self, category, default):
self.category = category
self.default = default
class _NamespaceFilter:
"""Filter for creating the proper namespace for logging object properties.
Attributes:
remove_names (set[str]): A set of names which to remove for the logging
namespace whenever encountered.
base_names (set[str]): A set of names which indicate that the next
encountered name in the string should be skipped. For example, if a
module hierarchy went like ``project.foo.bar.Bar`` and ``foo``
directly imports ``Bar``, ``bar`` may not be desirable to have in
the logging namespace since users interact with it via ``foo.Bar``.
Currently, this only handles a single level of nesting like this.
skip_duplicates (bool, optional): Whether or not to remove consecutive
duplicates from a logging namespace (e.g. ``foo.foo.bar`` ->
``foo.bar``), default ``True``. By default we assume that this
pattern means that the inner module is imported into its parent.
"""
def __init__(self,
remove_names=None,
base_names=None,
skip_duplicates=True):
self.remove_names = set() if remove_names is None else remove_names
self.base_names = set() if base_names is None else base_names
self._skip_next = False
self.skip_duplicates = skip_duplicates
if skip_duplicates:
self._last_name = None
def __call__(self, namespace):
for name in namespace:
# check for duplicates in the namespace and remove them (e.g.
# `md.pair.pair.LJ` -> `md.pair.LJ`).
if self.skip_duplicates:
last_name = self._last_name
self._last_name = name
if last_name == name:
continue
if name in self.remove_names:
continue
elif self._skip_next:
self._skip_next = False
continue
elif name in self.base_names:
self._skip_next = True
yield name
# Reset for next call of filter
self._skip_next = False
class _LoggerQuantity:
"""The information to automatically log to a `hoomd.logging.Logger`.
Args:
name (str): The name of the quantity.
cls (``class object``): The class that the quantity comes from.
category (str or LoggerCategories, optional): The type of quantity.
Valid values are given in the `hoomd.logging.LoggerCategories`
documentation.
Note:
For users, this class is meant to be used in conjunction with
`hoomd.custom.Action` for exposing loggable quantities for custom user
actions.
"""
namespace_filter = _NamespaceFilter(
# Names that are imported directly into the hoomd namespace
remove_names={'simulation', 'state', 'operations', 'snapshot'},
# Names that have their submodules' classes directly imported into them
# (e.g. `hoomd.update.box_resize.BoxResize` gets used as
# `hoomd.update.BoxResize`)
base_names={'update', 'tune', 'write'},
skip_duplicates=True)
def __init__(self, name, cls, category='scalar', default=True):
self.name = name
self.update_cls(cls)
if isinstance(category, str):
self.category = LoggerCategories[category]
elif isinstance(category, LoggerCategories):
self.category = category
else:
raise ValueError("Flag must be a string convertable into "
"LoggerCategories or a LoggerCategories object.")
self.default = bool(default)
def yield_names(self, user_name=None):
"""Infinitely yield potential namespaces.
Used to ensure that all namespaces are unique for a
`hoomd.logging.Logger` object. We simple increment a number at the end
until the caller stops asking for another namespace.
Yields:
tuple[str]: A potential namespace for the object.
"""
if user_name is None:
namespace = self.namespace
else:
namespace = self.namespace[:-1] + (user_name,)
yield namespace + (self.name,)
for i in count(start=1, step=1):
yield namespace[:-1] + (namespace[-1] + '_' + str(i), self.name)
def update_cls(self, cls):
"""Allow updating the class/namespace of the object.
Since the namespace is determined by the passed class's module and class
name, if inheritanting `hoomd.logging._LoggerQuantity`, the class needs
to be updated to the subclass.
Args:
cls (``class object``): The class to update the namespace with.
"""
self.namespace = self._generate_namespace(cls)
return self
@classmethod
def _generate_namespace(cls, loggable_cls):
"""Generate the namespace of a class given its module hierarchy."""
ns = tuple(loggable_cls.__module__.split('.'))
cls_name = loggable_cls.__name__
# Only filter namespaces of objects in the hoomd package
if ns[0] == 'hoomd':
return tuple(cls.namespace_filter(ns[1:])) + (cls_name,)
else:
return ns + (cls_name,)
class Loggable(type):
"""Loggable quantity."""
_meta_export_dict = dict()
def __init__(cls, name, bases, dct):
"""Adds marked quantities for logging in new class.
Also adds a loggables property that returns a mapping of loggable
quantity names with the string category. We overwrite __init__ instead
of __new__ since this plays much more nicely with inheritance. This
allows, for instance, `Loggable` to be subclassed with metaclasses that
use __new__ without having to hack the subclass's behavior.
"""
# grab loggable quantities through class inheritance.
log_dict = Loggable._get_inherited_loggables(cls)
# Add property to get all available loggable quantities. We ensure that
# we haven't already added a loggables property first. The empty dict
# check is for improved speed while the not any checking of subclasses
# allows for certainty that a previous class of type Loggable (or one
# of its subclasses) did not already add that property. This is not
# necessary, but allows us to check that an user or developer didn't
# accidentally create a loggables method, attribute, or property
# already. We can speed this up by just removing the check and
# overwriting the property every time, but lose the ability to error on
# improper class definitions.
if log_dict == {} and not any(
issubclass(type(c), Loggable) for c in cls.__mro__[1:]):
Loggable._add_property_for_displaying_loggables(cls)
# grab the current class's loggable quantities
log_dict.update(Loggable._get_current_cls_loggables(cls))
cls._export_dict = log_dict
Loggable._meta_export_dict = dict()
@staticmethod
def _add_property_for_displaying_loggables(new_cls):
if hasattr(new_cls, 'loggables'):
raise ValueError("classes of type Loggable cannot implement a "
"loggables method, property, or attribute.")
else:
new_cls.loggables = property(_loggables)
@classmethod
def _get_inherited_loggables(cls, new_cls):
"""Get loggable quantities from new class's __mro__."""
# We reverse the mro list to ensure that if a conflict in names exist we
# take the one with the most priority in the mro. Also track if any
# parent classes also have Loggable as a metaclass. This allows us to
# know if we should error if a loggables method is defined. We also
# skip the first entry since that is the new_cls itself.
inherited_loggables = dict()
for base_cls in reversed(new_cls.__mro__[1:]):
# The conditional checks if the type of one of the parent classes of
# new_cls has a metaclass (or type) which is a subclass of Loggable
# or one of its subclasses.
if issubclass(type(base_cls), Loggable):
inherited_loggables.update({
name: deepcopy(quantity).update_cls(new_cls)
for name, quantity in base_cls._export_dict.items()
})
return inherited_loggables
@classmethod
def _get_current_cls_loggables(cls, new_cls):
"""Gets the current class's new loggables (not inherited)."""
current_loggables = {}
for name, entry in cls._meta_export_dict.items():
current_loggables[name] = _LoggerQuantity(name, new_cls,
entry.category,
entry.default)
cls._add_loggable_docstring_info(new_cls, name, entry.category,
entry.default)
return current_loggables
@classmethod
def _add_loggable_docstring_info(cls, new_cls, attr, category, default):
doc = getattr(new_cls, attr).__doc__
# Don't add documentation to empty docstrings. This means that the
# quantity is not documented would needs to be fixed, but this prevents
# the rendering of invalid docs since we need a non-empty docstring.
if __doc__ == "":
return
str_msg = '\n\n{}(`Loggable <hoomd.logging.Logger>`: '
str_msg += f'category="{str(category)[17:]}"'
if default:
str_msg += ')'
else:
str_msg += ', default=False)'
if doc is None:
getattr(new_cls, attr).__doc__ = str_msg.format('')
else:
indent = 0
lines = doc.split('\n')
if len(lines) >= 3:
cnt = 2
while lines[cnt] == '':
cnt += 1
indent = len(lines[cnt]) - len(lines[cnt].lstrip())
getattr(new_cls, attr).__doc__ += str_msg.format(' ' * indent)
def log(func=None,
*,
is_property=True,
category='scalar',
default=True,
requires_attach=False):
"""Creates loggable quantities for classes of type Loggable.
For users this should be used with `hoomd.custom.Action` for exposing
loggable quantities from a custom action.
Args:
func (`method`): class method to make loggable. If using non-default
arguments, func should not be set.
is_property (`bool`, optional): Whether to make the method a
property, defaults to True. Argument keyword only
category (`str`, optional): The string represention of the type of
loggable quantity, defaults to 'scalar'. See
`hoomd.logging.LoggerCategories` for available types. Argument
keyword only
default (`bool`, optional): Whether the quantity should be logged
by default, defaults to True. This is orthogonal to the loggable
quantity's type. An example would be performance orientated
loggable quantities. Many users may not want to log such
quantities even when logging other quantities of that type. The
default category allows for these to be pass over by
`hoomd.logging.Logger` objects by default. Argument keyword only.
requires_attach (`bool`, optional): Whether this property requires
attachment before being accessible.
Note:
The namespace (where the loggable object is stored in the
`hoomd.logging.Logger` object's nested dictionary, is determined by
the module/script and class name the loggable class comes from. In
creating subclasses of `hoomd.custom.Action`, for instance, if the
module the subclass is defined in is ``user.custom.action`` and the
class name is ``Foo`` then the namespace used will be ``('user',
'custom', 'action', 'Foo')``. This helps to prevent naming conflicts,
and automate the logging specification for developers and users.
"""
def helper(func):
name = func.__name__
if name in Loggable._meta_export_dict:
raise KeyError(
"Multiple loggable quantities named {}.".format(name))
Loggable._meta_export_dict[name] = _LoggableEntry(
LoggerCategories[category], default)
if requires_attach:
def wrapped_with_exception(self, *args, **kwargs):
if not self._attached:
raise DataAccessError(name)
return func(*args, **kwargs)
func = wrapped_with_exception
if is_property:
return property(func)
else:
return func
if func is None:
return helper
else:
return helper(func)
class _LoggerEntry:
"""Stores the information for an entry in a `hoomd.logging.Logger`.
The class deals with the logic of converting `tuple` and
`hoomd.logging._LoggerQuantity` objects into an object that can obtain the
actually log value when called.
Note:
This class could perform verification of the logged quantities. It
currently doesn't for performance reasons; this can be changed to give
greater security with regards to user specified quantities.
"""
def __init__(self, obj, attr, category):
self.obj = obj
self.attr = attr
self.category = category
@classmethod
def from_logger_quantity(cls, obj, logger_quantity):
return cls(obj, logger_quantity.name, logger_quantity.category)
@classmethod
def from_tuple(cls, entry):
err_msg = "Expected either (callable, category) or \
(obj, method/property, category)."
if (not isinstance(entry, Sequence) or len(entry) <= 1
or len(entry) > 3):
raise ValueError(err_msg)
# Get the method and category from the passed entry. Also perform some
# basic validation.
if len(entry) == 2:
if not callable(entry[0]):
raise ValueError(err_msg)
category = entry[1]
method = '__call__'
elif len(entry) == 3:
if not isinstance(entry[1], str):
raise ValueError(err_msg)
method = entry[1]
if not hasattr(entry[0], method):
raise ValueError(
"Provided method/property must exist in given object.")
category = entry[2]
# Ensure category is valid and converted to LoggerCategories enum.
if isinstance(category, str):
category = LoggerCategories[category]
elif not isinstance(category, LoggerCategories):
raise ValueError(
"category must be a string or hoomd.logging.LoggerCategories "
"object.")
return cls(entry[0], method, category)
def __call__(self):
try:
attr = getattr(self.obj, self.attr)
except DataAccessError:
attr = None
if self.category is LoggerCategories.state:
return attr
if callable(attr):
return (attr(), self.category.name)
else:
return (attr, self.category.name)
def __eq__(self, other):
return (self.obj == other.obj and self.attr == other.attr
and self.category == other.category)
return all(
getattr(self, attr) == getattr(other, attr)
for attr in ['obj', 'attr', 'category'])
class Logger(_SafeNamespaceDict):
"""Logs HOOMD-blue operation data and custom quantities.
The `Logger` class provides an intermediary between a back end such as the
`hoomd.write.Table` and many of HOOMD-blue's object (as most objects are
loggable). The `Logger` class makes use of *namespaces* which denote where a
logged quantity fits in. For example internally all loggable quantities are
ordered by the module and class them come from. For instance, the
`hoomd.md.pair.LJ` class has a namespace ``('md', 'pair', 'LJ')``. This
applies to all loggable internal objects in HOOMD-blue. This ensures that
logged quantities remain unambigious. To add a loggable object's quantities
two methods exist `Logger.add` and the ``+=`` operator. Here we show an
example using the ``+=`` operator.
Example:
.. code-block:: python
logger = hoomd.logging.Logger()
lj = md.pair.lj(nlist)
# Log all default quantities of the lj object
logger += lj
logger = hoomd.logging.Logger(categories=['scalar'])
# Log all default scalar quantities of the lj object
logger += lj
The `Logger` class also supports user specified quantities using namespaces
as well.
Example:
.. code-block:: python
logger = hoomd.logging.Logger()
# Add quantity to ('custom', 'name') namespace
logger[('custom', 'name')] = (lambda: 42, 'scalar')
# Add quantity to ('custom_name',) namespace
logger[('custom_name',)] = (lambda: 43, 'scalar')
`Logger` objects support two ways of discriminating what loggable quantities
they will accept: ``categories`` and ``only_default`` (the constructor
arguments). Both of these are static meaning that once instantiated a
`Logger` object will not change the values of these two properties.
``categories`` determines what if any types of loggable quantities (see
`hoomd.logging.LoggerCategories`) are appropriate for a given `Logger`
object. This helps logging back ends determine if a `Logger` object is
compatible. The ``only_default`` flag is mainly a convenience by allowing
quantities not commonly logged (but available) to be passed over unless
explicitly asked for. You can override the ``only_default`` flag by
explicitly listing the quantities you want in `Logger.add`, but the same is
not true with regards to ``categories``.
Note:
The logger provides a way for users to create their own logger back ends
if they wish. In making a custom logger back end, understanding the
intermediate representation is key. To get an introduction see
`hoomd.logging.Logger.log`. To understand the various categories
available to specify logged quantities, see
`hoomd.logging.LoggerCategories`. To integrate with `hoomd.Operations`
the back end should be a subclass of `hoomd.custom.Action` and used with
`hoomd.writer.CustomWriter`.
Note:
When logging multiple instances of the same class `Logger.add` provides
a means of specifying the class level of the namespace (e.g. ``'LJ`` in
``('md', 'pair', 'LJ')``). The default behavior (without specifying a
user name) is to just append ``_{num}`` where ``num`` is the smallest
positive integer which makes the full namespace unique. This appending
will also occur for user specified names that are reused.
Args:
categories (`list` of `str`, optional): A list of string categories
(list of categories can be found in
`hoomd.logging.LoggerCategories`).
These are the only types of loggable quantities that can be logged
by this logger. Defaults to allowing every type.
only_default (`bool`, optional): Whether to log only quantities that are
logged by "default", defaults to ``True``. This mostly means that
performance centric loggable quantities will be passed over when
logging when false.
"""
def __init__(self, categories=None, only_default=True):
self._categories = LoggerCategories.ALL if categories is None else \
LoggerCategories.any(categories)
self._only_default = only_default
super().__init__()
@property
def categories(self):
"""`hoomd.logging.LoggerCategories`: The enum representing the \
acceptable categories for the `Logger` object."""
return self._categories
@property
def string_categories(self):
"""`list` of `str`: A list of the string names of the allowed \
categories for logging."""
return LoggerCategories._get_string_list(self._categories)
@property
def only_default(self):
"""`bool`: Whether the logger object should only grab default loggable \
quantities."""
return self._only_default
def _filter_quantities(self, quantities):
for quantity in quantities:
if self._only_default and not quantity.default:
continue
elif quantity.category in self._categories:
yield quantity
def _get_loggables_by_name(self, obj, quantities):
if quantities is None:
yield from self._filter_quantities(obj._export_dict.values())
else:
quantities = self._wrap_quantity(quantities)
bad_keys = [q for q in quantities if q not in obj._export_dict]
# ensure all keys are valid
if bad_keys != []:
raise ValueError(
"object {} has not loggable quantities {}.".format(
obj, bad_keys))
yield from self._filter_quantities(
map(lambda q: obj._export_dict[q], quantities))
def add(self, obj, quantities=None, user_name=None):
"""Add loggables from obj to logger.
Args:
obj (object of class of type ``Loggable``): class of type loggable
to add loggable quantities from.
quantities (Sequence[str]): list of str names of quantities to log.
user_name (`str`, optional): A string to replace the class name in
the loggable quantities namespace. This allows for easier
differentiation in the output of the `Logger` and any `Writer`
which outputs its data.
Returns:
list[tuple[str]]: A list of namespaces that were
added to the logger.
"""
for quantity in self._get_loggables_by_name(obj, quantities):
self._add_single_quantity(obj, quantity, user_name)
def remove(self, obj=None, quantities=None, user_name=None):
"""Remove specified quantities from the logger.
Args:
obj (object of class of type ``Loggable``, optional): Object to
remove quantities from. If ``quantities`` is None, ``obj`` must
be set. If ``obj`` is set and ``quantities`` is None, all logged
quantities from ``obj`` will be removed from the logger.
quantities (Sequence[tuple]): a sequence of namespaces to remove
from the logger. If specified with ``obj`` only remove
quantities listed that are exposed from ``obj``. If ``obj`` is
None, then ``quantities`` must be given.
user_name (str): A user name to specify the final entry in the
namespace of the object. This must be used in ``user_name`` was
specified in `Logger.add`.
"""
if obj is None and quantities is None:
raise ValueError(
"Either obj, quantities, or both must be specified.")
if obj is None:
for quantity in self._wrap_quantity(quantities):
if quantity in self:
del self[quantity]
else:
for quantity in self._get_loggables_by_name(obj, quantities):
# Check all currently used namespaces for object's quantities.
for namespace in quantity.yield_names(user_name):
if namespace in self:
if self._contains_obj(namespace, obj):
del self[namespace]
# We deterministically go through namespaces, so once a
# namespace is not in the logger, than we can be sure no
# further ones will be as well and break.
else:
break
def _add_single_quantity(self, obj, quantity, user_name):
"""Add to first available namespace if obj is not logged."""
for namespace in quantity.yield_names(user_name):
if namespace in self:
# Check if the quantity is already logged by the same object
if self._contains_obj(namespace, obj):
return None
else:
self[namespace] = _LoggerEntry.from_logger_quantity(
obj, quantity)
return None
def __setitem__(self, namespace, value):
"""Allows user specified loggable quantities.
Args:
namespace (tuple[str,] or str): key or nested key to determine where
to store logged quantity.
value (tuple[Callable, str] or tuple[object, str, str]): Either a
tuple with a callable and the `hoomd.logging.LoggerCategories`
object or associated string or a object with a method/property
name and category. If using a method it should not take
arguments or have defaults for all arguments.
"""
if isinstance(value, _LoggerEntry):
super().__setitem__(namespace, value)
else:
super().__setitem__(namespace, _LoggerEntry.from_tuple(value))
def __iadd__(self, obj):
"""Add quantities from object or list of objects to logger.
Adds all quantities compatible with given categories and default value.
Examples:
.. code-block:: python
logger += lj
logger += [lj, harmonic_bonds]
"""
if hasattr(obj, '__iter__'):
for o in obj:
self.add(o)
else:
self.add(obj)
return self
def __isub__(self, value):
"""Remove log entries for a list of quantities or objects.
Examples:
.. code-block:: python
logger -= ('md', 'pair', 'lj')
logger -= [('md', 'pair', 'lj', 'energy'),
('md', 'pair', 'lj', 'forces')]
logger -= lj
logger -= [lj, harmonic_bonds]
"""
if isinstance(value, str) or isinstance(value, tuple):
self.remove(quantities=value)
elif hasattr(value, '__iter__'):
for v in value:
self.__isub__(v)
else:
self.remove(obj=value)
return self
def log(self):
"""Get a nested dictionary of the current values for logged quantities.
The nested dictionary consist of one level for each element of a
namespace. The logged value and category for the namespace ``('example',
'namespace')`` would be accessible in the returned dictionary via
``logger.log()['example']['namespace']``.
Returns:
dict: A nested dictionary of the current logged quantities. The end
values are (value, category) pairs which hold the value along
with its associated `hoomd.logging.LoggerCategories` category
represented as a string (to get the
`hoomd.logging.LoggerCategories` enum value use
``LoggerCategories[category]``.
"""
return dict_map(self._dict, lambda x: x())
def _contains_obj(self, namespace, obj):
"""Evaluates based on identity."""
return self._unsafe_getitem(namespace).obj is obj
@staticmethod
def _wrap_quantity(quantity):
"""Handles wrapping strings and tuples for iterating over namespaces."""
if isinstance(quantity, (str, tuple)):
return [quantity]
else:
return quantity
def __eq__(self, other):
"""Check for equality."""
if not isinstance(other, type(self)):
return NotImplemented
return (self.categories == other.categories
and self.only_default == other.only_default
and self._dict == other._dict)
|
from osgeo import gdal
import numpy as np
import subprocess
import datetime
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
# Calculates a range of tile statistics
def create_tile_statistics(tile):
tile_stats = '{}_{}'.format(uu.date_today, cn.tile_stats_pattern)
# Extracts the tile id from the full tile name
tile_id = uu.get_tile_id(tile)
print "Calculating tile statistics for {0}, tile id {1}...".format(tile, tile_id)
# start time
start = datetime.datetime.now()
# Source: http://gis.stackexchange.com/questions/90726
# Opens raster we're getting statistics on
focus_tile = gdal.Open(tile)
nodata = uu.get_raster_nodata_value(tile)
print "NoData value =", nodata
# Turns the raster into a numpy array
tile_array = np.array(focus_tile.GetRasterBand(1).ReadAsArray())
# Flattens the numpy array to a single dimension
tile_array_flat = tile_array.flatten()
# Removes NoData values from the array. NoData are generally either 0 or -9999.
tile_array_flat_mask = tile_array_flat[tile_array_flat != nodata]
### For converting value/hectare to value/pixel
# Tile with the area of each pixel in m2
area_tile = '{0}_{1}.tif'.format(cn.pattern_pixel_area, tile_id)
# Output file name
tile_short = tile[:-4]
outname = '{0}_value_per_pixel.tif'.format(tile_short)
# Equation argument for converting emissions from per hectare to per pixel.
# First, multiplies the per hectare emissions by the area of the pixel in m2, then divides by the number of m2 in a hectare.
calc = '--calc=A*B/{}'.format(cn.m2_per_ha)
# Argument for outputting file
out = '--outfile={}'.format(outname)
print "Converting {} from /ha to /pixel...".format(tile)
cmd = ['gdal_calc.py', '-A', tile, '-B', area_tile, calc, out, '--NoDataValue=0', '--co', 'COMPRESS=LZW',
'--overwrite']
subprocess.check_call(cmd)
print "{} converted to /pixel".format(tile)
print "Converting value/pixel tile {} to numpy array...".format(tile)
# Opens raster with value per pixel
value_per_pixel = gdal.Open(outname)
# Turns the pixel area raster into a numpy array
value_per_pixel_array = np.array(value_per_pixel.GetRasterBand(1).ReadAsArray())
# Flattens the pixel area numpy array to a single dimension
value_per_pixel_array_flat = value_per_pixel_array.flatten()
print "Converted {} to numpy array".format(tile)
# Empty statistics list
stats = [None] * 13
# Calculates basic tile info
stats[0] = tile_id
stats[1] = tile[9:-4]
stats[2] = tile
stats[3] = tile_array_flat_mask.size
# If there are no pixels with values in the tile (as determined by the length of the array when NoData values are removed),
# the statistics are all N/A.
if stats[3] == 0:
stats[4] = "N/A"
stats[5] = "N/A"
stats[6] = "N/A"
stats[7] = "N/A"
stats[8] = "N/A"
stats[9] = "N/A"
stats[10] = "N/A"
stats[11] = "N/A"
stats[12] = "N/A"
# If there are pixels with values in the tile, the following statistics are calculated
else:
stats[4] = np.mean(tile_array_flat_mask, dtype=np.float64)
stats[5] = np.median(tile_array_flat_mask)
stats[6] = np.percentile(tile_array_flat_mask, 10)
stats[7] = np.percentile(tile_array_flat_mask, 25)
stats[8] = np.percentile(tile_array_flat_mask, 75)
stats[9] = np.percentile(tile_array_flat_mask, 90)
stats[10] = np.amin(tile_array_flat_mask)
stats[11] = np.amax(tile_array_flat_mask)
stats[12] = np.sum(value_per_pixel_array_flat)
stats_no_brackets = ', '.join(map(str, stats))
print stats_no_brackets
# Adds the tile's statistics to the txt file
with open(tile_stats, 'a+') as f:
f.write(stats_no_brackets + '\r\n')
f.close()
# Prints information about the tile that was just processed
uu.end_of_fx_summary(start, tile_id, 'value_per_pixel.tif')
Getting net flux tile stats for standard from model v1.1.2 for 80N_170W and 80N_170E. They weren't generated before.
from osgeo import gdal
import numpy as np
import subprocess
import datetime
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
# Calculates a range of tile statistics
def create_tile_statistics(tile):
tile_stats = '{}_{}'.format(uu.date_today, cn.tile_stats_pattern)
# Extracts the tile id from the full tile name
tile_id = uu.get_tile_id(tile)
print tile
print tile_id
print "Calculating tile statistics for {0}, tile id {1}...".format(tile, tile_id)
# start time
start = datetime.datetime.now()
# Source: http://gis.stackexchange.com/questions/90726
# Opens raster we're getting statistics on
focus_tile = gdal.Open(tile)
nodata = uu.get_raster_nodata_value(tile)
print "NoData value =", nodata
# Turns the raster into a numpy array
tile_array = np.array(focus_tile.GetRasterBand(1).ReadAsArray())
# Flattens the numpy array to a single dimension
tile_array_flat = tile_array.flatten()
# Removes NoData values from the array. NoData are generally either 0 or -9999.
tile_array_flat_mask = tile_array_flat[tile_array_flat != nodata]
### For converting value/hectare to value/pixel
# Tile with the area of each pixel in m2
area_tile = '{0}_{1}.tif'.format(cn.pattern_pixel_area, tile_id)
# Output file name
tile_short = tile[:-4]
outname = '{0}_value_per_pixel.tif'.format(tile_short)
# Equation argument for converting emissions from per hectare to per pixel.
# First, multiplies the per hectare emissions by the area of the pixel in m2, then divides by the number of m2 in a hectare.
calc = '--calc=A*B/{}'.format(cn.m2_per_ha)
# Argument for outputting file
out = '--outfile={}'.format(outname)
print "Converting {} from /ha to /pixel...".format(tile)
cmd = ['gdal_calc.py', '-A', tile, '-B', area_tile, calc, out, '--NoDataValue=0', '--co', 'COMPRESS=LZW',
'--overwrite']
subprocess.check_call(cmd)
print "{} converted to /pixel".format(tile)
print "Converting value/pixel tile {} to numpy array...".format(tile)
# Opens raster with value per pixel
value_per_pixel = gdal.Open(outname)
# Turns the pixel area raster into a numpy array
value_per_pixel_array = np.array(value_per_pixel.GetRasterBand(1).ReadAsArray())
# Flattens the pixel area numpy array to a single dimension
value_per_pixel_array_flat = value_per_pixel_array.flatten()
print "Converted {} to numpy array".format(tile)
# Empty statistics list
stats = [None] * 13
# Calculates basic tile info
stats[0] = tile_id
stats[1] = tile[9:-4]
stats[2] = tile
stats[3] = tile_array_flat_mask.size
# If there are no pixels with values in the tile (as determined by the length of the array when NoData values are removed),
# the statistics are all N/A.
if stats[3] == 0:
stats[4] = "N/A"
stats[5] = "N/A"
stats[6] = "N/A"
stats[7] = "N/A"
stats[8] = "N/A"
stats[9] = "N/A"
stats[10] = "N/A"
stats[11] = "N/A"
stats[12] = "N/A"
# If there are pixels with values in the tile, the following statistics are calculated
else:
stats[4] = np.mean(tile_array_flat_mask, dtype=np.float64)
stats[5] = np.median(tile_array_flat_mask)
stats[6] = np.percentile(tile_array_flat_mask, 10)
stats[7] = np.percentile(tile_array_flat_mask, 25)
stats[8] = np.percentile(tile_array_flat_mask, 75)
stats[9] = np.percentile(tile_array_flat_mask, 90)
stats[10] = np.amin(tile_array_flat_mask)
stats[11] = np.amax(tile_array_flat_mask)
stats[12] = np.sum(value_per_pixel_array_flat)
stats_no_brackets = ', '.join(map(str, stats))
print stats_no_brackets
# Adds the tile's statistics to the txt file
with open(tile_stats, 'a+') as f:
f.write(stats_no_brackets + '\r\n')
f.close()
# Prints information about the tile that was just processed
uu.end_of_fx_summary(start, tile_id, 'value_per_pixel.tif') |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import mock
from socorro.external.hb import connection_context
from socorro.lib.util import SilentFakeLogger, DotDict
from socorro.database.transaction_executor import TransactionExecutor
from configman import Namespace
from hbase import ttypes
from thrift import Thrift
from socket import timeout, error
class FakeHB_Connection(object):
def __init__(self, config):
self.hbaseThriftExceptions = (error,)
self.close_counter = 0
self.commit_counter = 0
self.rollback_counter = 0
def close(self):
self.close_counter += 1
def commit(self):
self.commit_counter += 1
def rollback(self):
self.rollback_counter += 1
class TestConnectionContext(unittest.TestCase):
def test_basic_hbase_usage(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBaseConnectionContext(
local_config
)
# open a connection
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
1
)
# open another connection connection again
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
# get a named connection
with hb_context('fred') as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
3
)
# close all connections
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
3
)
def test_hbase_usage_with_transaction(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBaseConnectionContext(
local_config
)
def all_ok(connection, dummy):
self.assertEqual(dummy, 'hello')
return True
transaction = TransactionExecutor(local_config, hb_context)
result = transaction(all_ok, 'hello')
self.assertTrue(result)
self.assertEqual(
a_fake_hbase_connection.close_counter,
1
)
self.assertEqual(
a_fake_hbase_connection.rollback_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
def bad_deal(connection, dummy):
raise KeyError('fred')
self.assertRaises(KeyError, transaction, bad_deal, 'hello')
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
class TestHBasePooledConnectionContext(unittest.TestCase):
def test_basic_hbase_usage(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBasePooledConnectionContext(
local_config
)
# open a connection
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# open another connection connection again
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# get a named connection
with hb_context('fred') as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# close all connections
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
def test_hbase_usage_with_transaction(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBasePooledConnectionContext(
local_config
)
def all_ok(connection, dummy):
self.assertEqual(dummy, 'hello')
return True
transaction = TransactionExecutor(local_config, hb_context)
result = transaction(all_ok, 'hello')
self.assertTrue(result)
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.rollback_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
def bad_deal(connection, dummy):
raise KeyError('fred')
self.assertRaises(KeyError, transaction, bad_deal, 'hello')
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
removed repeated redundancy
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import mock
from socorro.external.hb import connection_context
from socorro.lib.util import SilentFakeLogger, DotDict
from socorro.database.transaction_executor import TransactionExecutor
from configman import Namespace
from hbase import ttypes
from thrift import Thrift
from socket import timeout, error
class FakeHB_Connection(object):
def __init__(self, config):
self.hbaseThriftExceptions = (error,)
self.close_counter = 0
self.commit_counter = 0
self.rollback_counter = 0
def close(self):
self.close_counter += 1
def commit(self):
self.commit_counter += 1
def rollback(self):
self.rollback_counter += 1
class TestConnectionContext(unittest.TestCase):
def test_basic_hbase_usage(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBaseConnectionContext(
local_config
)
# open a connection
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
1
)
# open another connection again
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
# get a named connection
with hb_context('fred') as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
3
)
# close all connections
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
3
)
def test_hbase_usage_with_transaction(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBaseConnectionContext(
local_config
)
def all_ok(connection, dummy):
self.assertEqual(dummy, 'hello')
return True
transaction = TransactionExecutor(local_config, hb_context)
result = transaction(all_ok, 'hello')
self.assertTrue(result)
self.assertEqual(
a_fake_hbase_connection.close_counter,
1
)
self.assertEqual(
a_fake_hbase_connection.rollback_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
def bad_deal(connection, dummy):
raise KeyError('fred')
self.assertRaises(KeyError, transaction, bad_deal, 'hello')
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
class TestHBasePooledConnectionContext(unittest.TestCase):
def test_basic_hbase_usage(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBasePooledConnectionContext(
local_config
)
# open a connection
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# open another connection again
with hb_context() as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# get a named connection
with hb_context('fred') as conn:
pass
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
# close all connections
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
2
)
def test_hbase_usage_with_transaction(self):
local_config = DotDict({
'hbase_host': 'host',
'database_name': 'name',
'hbase_port': 9090,
'hbase_timeout': 9000,
'number_of_retries': 2,
'logger': SilentFakeLogger(),
})
a_fake_hbase_connection = FakeHB_Connection(local_config)
with mock.patch.object(connection_context, 'HBaseConnection',
mock.Mock(return_value=a_fake_hbase_connection)):
hb_context = connection_context.HBasePooledConnectionContext(
local_config
)
def all_ok(connection, dummy):
self.assertEqual(dummy, 'hello')
return True
transaction = TransactionExecutor(local_config, hb_context)
result = transaction(all_ok, 'hello')
self.assertTrue(result)
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.rollback_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
def bad_deal(connection, dummy):
raise KeyError('fred')
self.assertRaises(KeyError, transaction, bad_deal, 'hello')
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
self.assertEqual(
a_fake_hbase_connection.commit_counter,
1
)
hb_context.close()
self.assertEqual(
a_fake_hbase_connection.close_counter,
0
)
|
"""UNIX event loop and related classes.
The event loop can be broken up into a selector (the part responsible
for telling us when file descriptors are ready) and the event loop
proper, which wraps a selector with functionality for scheduling
callbacks, immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import errno
import heapq
import logging
import select
import socket
import ssl
import sys
import threading
import time
try:
import signal
except ImportError:
signal = None
from . import events
from . import futures
from . import protocols
from . import selectors
from . import tasks
from . import transports
try:
from socket import socketpair
except ImportError:
assert sys.platform == 'win32'
from .winsocketpair import socketpair
# Errno values indicating the connection was disconnected.
_DISCONNECTED = frozenset((errno.ECONNRESET,
errno.ENOTCONN,
errno.ESHUTDOWN,
errno.ECONNABORTED,
errno.EPIPE,
errno.EBADF,
))
# Errno values indicating the socket isn't ready for I/O just yet.
_TRYAGAIN = frozenset((errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS))
if sys.platform == 'win32':
_TRYAGAIN = frozenset(list(_TRYAGAIN) + [errno.WSAEWOULDBLOCK])
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _raise_stop_error():
raise _StopError
class UnixEventLoop(events.EventLoop):
"""Unix event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
# pick the best selector class for the platform
selector = selectors.Selector()
logging.info('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._ready = collections.deque()
self._scheduled = []
self._everytime = []
self._default_executor = None
self._signal_handlers = {}
self._make_self_pipe()
def close(self):
if self._selector is not None:
self._selector.close()
self._selector = None
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _read_from_self(self):
try:
self._ssock.recv(1)
except socket.error as exc:
if exc in _TRYAGAIN:
return
raise # Halp!
def _write_to_self(self):
try:
self._csock.send(b'x')
except socket.error as exc:
if exc in _TRYAGAIN:
return
raise # Halp!
def run(self):
"""Run the event loop until nothing left to do or stop() called.
This keeps going as long as there are either readable and
writable file descriptors, or scheduled callbacks (of either
variety).
TODO: Give this a timeout too?
"""
while (self._ready or
self._scheduled or
self._selector.registered_count() > 1):
try:
self._run_once()
except _StopError:
break
def run_forever(self):
"""Run until stop() is called.
This only makes sense over run() if you have another thread
scheduling callbacks using call_soon_threadsafe().
"""
handler = self.call_repeatedly(24*3600, lambda: None)
try:
self.run()
finally:
handler.cancel()
def run_once(self, timeout=None):
"""Run through all callbacks and all I/O polls once.
Calling stop() will break out of this too.
"""
try:
self._run_once(timeout)
except _StopError:
pass
def run_until_complete(self, future, timeout=None):
"""Run until the Future is done, or until a timeout.
Return the Future's result, or raise its exception. If the
timeout is reached or stop() is called, raise TimeoutError.
"""
if timeout is None:
timeout = 0x7fffffff/1000.0 # 24 days
future.add_done_callback(lambda _: self.stop())
handler = self.call_later(timeout, _raise_stop_error)
self.run()
handler.cancel()
if future.done():
return future.result() # May raise future.exception().
else:
raise futures.TimeoutError
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run.
Callback scheduled after stop() is called won't. However,
those callbacks will run if run() is called again later.
"""
self.call_soon(_raise_stop_error)
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return an object with a cancel() method that can be used to
cancel the call.
The delay can be an int or float, expressed in seconds. It is
always a relative time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Callbacks scheduled in the past are passed on to call_soon(),
so these will be called in the order in which they were
registered rather than by time due. This is so you can't
cheat and insert yourself at the front of the ready queue by
using a negative time.
Any positional arguments after the callback will be passed to
the callback when it is called.
# TODO: Should delay is None be interpreted as Infinity?
"""
if delay <= 0:
return self.call_soon(callback, *args)
handler = events.make_handler(time.monotonic() + delay, callback, args)
heapq.heappush(self._scheduled, handler)
return handler
def call_repeatedly(self, interval, callback, *args):
"""Call a callback every 'interval' seconds."""
def wrapper():
callback(*args) # If this fails, the chain is broken.
handler._when = time.monotonic() + interval
heapq.heappush(self._scheduled, handler)
handler = events.make_handler(time.monotonic() + interval, wrapper, ())
heapq.heappush(self._scheduled, handler)
return handler
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue, callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
handler = events.make_handler(None, callback, args)
self._ready.append(handler)
return handler
def call_soon_threadsafe(self, callback, *args):
"""XXX"""
handler = self.call_soon(callback, *args)
self._write_to_self()
return handler
def call_every_iteration(self, callback, *args):
"""Call a callback just before the loop blocks.
The callback is called for every iteration of the loop.
"""
handler = events.make_handler(None, callback, args)
self._everytime.append(handler)
return handler
def wrap_future(self, future):
"""XXX"""
if isinstance(future, futures.Future):
return future # Don't wrap our own type of Future.
new_future = futures.Future()
future.add_done_callback(
lambda future:
self.call_soon_threadsafe(new_future._copy_state, future))
return new_future
def run_in_executor(self, executor, callback, *args):
if isinstance(callback, events.Handler):
assert not args
assert callback.when is None
if callback.cancelled:
f = futures.Future()
f.set_result(None)
return f
callback, args = callback.callback, callback.args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return self.wrap_future(executor.submit(callback, *args))
def set_default_executor(self, executor):
self._default_executor = executor
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
# TODO: Or create_connection()? Or create_client()?
@tasks.task
def create_transport(self, protocol_factory, host, port, *, ssl=False,
family=0, type=socket.SOCK_STREAM, proto=0, flags=0):
"""XXX"""
infos = yield from self.getaddrinfo(host, port,
family=family, type=type,
proto=proto, flags=flags)
if not infos:
raise socket.error('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
sock = None
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
yield self.sock_connect(sock, address)
except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise socket.error('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
protocol = protocol_factory()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
waiter = futures.Future()
transport = _UnixSslTransport(self, sock, protocol, sslcontext,
waiter)
yield from waiter
else:
transport = _UnixSocketTransport(self, sock, protocol)
return transport, protocol
# TODO: Or create_server()?
@tasks.task
def start_serving(self, protocol_factory, host, port, *,
family=0, type=socket.SOCK_STREAM, proto=0, flags=0,
backlog=100, fastopen=5):
"""XXX"""
infos = yield from self.getaddrinfo(host, port,
family=family, type=type,
proto=proto, flags=flags)
if not infos:
raise socket.error('getaddrinfo() returned empty list')
# TODO: Maybe we want to bind every address in the list
# instead of the first one that works?
exceptions = []
for family, type, proto, cname, address in infos:
sock = socket.socket(family=family, type=type, proto=proto)
try:
sock.bind(address)
except socket.error as exc:
sock.close()
exceptions.append(exc)
else:
break
else:
raise exceptions[0]
if fastopen and hasattr(socket, 'TCP_FASTOPEN'):
try:
sock.setsockopt(socket.SOL_TCP, socket.TCP_FASTOPEN, fastopen)
except socket.error:
# Even if TCP_FASTOPEN is defined by glibc, it may
# still not be supported by the kernel.
logging.info('TCP_FASTOPEN(%r) failed', fastopen)
sock.listen(backlog)
sock.setblocking(False)
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock)
return sock
def _accept_connection(self, protocol_factory, sock):
try:
conn, addr = sock.accept()
except socket.error as exc:
if exc in _TRYAGAIN:
return # False alarm.
# Bad error. Stop serving.
self.remove_reader(sock.fileno())
sock.close()
# There's nowhere to send the error, so just log it.
# TODO: Someone will want an error handler for this.
logging.exception('Accept failed')
return
protocol = protocol_factory()
transport = _UnixSocketTransport(self, conn, protocol)
# It's now up to the protocol to handle the connection.
def add_reader(self, fd, callback, *args):
"""Add a reader callback. Return a Handler instance."""
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_IN,
(handler, None, None))
else:
self._selector.modify(fd, mask | selectors.SELECT_IN,
(handler, writer, connector))
return handler
def remove_reader(self, fd):
"""Remove a reader callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_IN
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer, connector))
def add_writer(self, fd, callback, *args):
"""Add a writer callback. Return a Handler instance."""
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_OUT,
(None, handler, None))
else:
self._selector.modify(fd, mask | selectors.SELECT_OUT,
(reader, handler, connector))
return handler
def remove_writer(self, fd):
"""Remove a writer callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_OUT
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None, connector))
def add_connector(self, fd, callback, *args):
"""Add a connector callback. Return a Handler instance."""
# XXX As long as SELECT_CONNECT == SELECT_OUT, set the handler
# as both writer and connector.
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_CONNECT,
(None, handler, handler))
else:
self._selector.modify(fd, mask | selectors.SELECT_CONNECT,
(reader, handler, handler))
return handler
def remove_connector(self, fd):
"""Remove a connector callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_CONNECT
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None, None))
def sock_recv(self, sock, n):
"""XXX"""
fut = futures.Future()
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
fut.set_result(data)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
def sock_sendall(self, sock, data):
"""XXX"""
fut = futures.Future()
self._sock_sendall(fut, False, sock, data)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
n = 0
try:
if data:
n = sock.send(data)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""XXX"""
# That address better not require a lookup! We're not calling
# self.getaddrinfo() for you here. But verifying this is
# complicated; the socket module doesn't have a pattern for
# IPv6 addresses (there are too many forms, apparently).
fut = futures.Future()
self._sock_connect(fut, False, sock, address)
return fut
def _sock_connect(self, fut, registered, sock, address):
fd = sock.fileno()
if registered:
self.remove_connector(fd)
if fut.cancelled():
return
try:
if not registered:
# First time around.
sock.connect(address)
else:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to the except clause below.
raise socket.error(err, 'Connect call failed')
fut.set_result(None)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_connector(fd, self._sock_connect,
fut, True, sock, address)
def sock_accept(self, sock):
"""XXX"""
fut = futures.Future()
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
fut.set_result((conn, address))
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_reader(fd, self._sock_accept, fut, True, sock)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
self._check_signal(sig)
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except ValueError as exc:
raise RuntimeError(str(exc))
handler = events.make_handler(None, callback, args)
self._signal_handlers[sig] = handler
try:
signal.signal(sig, self._handle_signal)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except ValueError as nexc:
logging.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
return handler
def _handle_signal(self, sig, arg):
"""Internal helper that is the actual signal handler."""
handler = self._signal_handlers.get(sig)
if handler is None:
return # Assume it's some race condition.
if handler.cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self.call_soon_threadsafe(handler.callback, *handler.args)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not."""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except ValueError as exc:
logging.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if signal is None:
raise RuntimeError('Signals are not supported')
if not (1 <= sig < signal.NSIG):
raise ValueError('sig {} out of range(1, {})'.format(sig,
signal.NSIG))
def _add_callback(self, handler):
"""Add a Handler to ready or scheduled."""
if handler.cancelled:
return
if handler.when is None:
self._ready.append(handler)
else:
heapq.heappush(self._scheduled, handler)
def _run_once(self, timeout=None):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# TODO: Break each of these into smaller pieces.
# TODO: Refactor to separate the callbacks from the readers/writers.
# TODO: An alternative API would be to do the *minimal* amount
# of work, e.g. one callback or one I/O poll.
# Add everytime handlers, skipping cancelled ones.
any_cancelled = False
for handler in self._everytime:
if handler.cancelled:
any_cancelled = True
else:
self._ready.append(handler)
# Remove cancelled everytime handlers if there are any.
if any_cancelled:
self._everytime = [handler for handler in self._everytime
if not handler.cancelled]
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0].cancelled:
heapq.heappop(self._scheduled)
# Inspect the poll queue. If there's exactly one selectable
# file descriptor, it's the self-pipe, and if there's nothing
# scheduled, we should ignore it.
if self._selector.registered_count() > 1 or self._scheduled:
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0].when
deadline = max(0, when - time.monotonic())
if timeout is None:
timeout = deadline
else:
timeout = min(timeout, deadline)
t0 = time.monotonic()
event_list = self._selector.select(timeout)
t1 = time.monotonic()
argstr = '' if timeout is None else ' %.3f' % timeout
if t1-t0 >= 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.log(level, 'poll%s took %.3f seconds', argstr, t1-t0)
for fileobj, mask, (reader, writer, connector) in event_list:
if mask & selectors.SELECT_IN and reader is not None:
self._add_callback(reader)
if mask & selectors.SELECT_OUT and writer is not None:
self._add_callback(writer)
elif mask & selectors.SELECT_CONNECT and connector is not None:
self._add_callback(connector)
# Handle 'later' callbacks that are ready.
now = time.monotonic()
while self._scheduled:
handler = self._scheduled[0]
if handler.when > now:
break
handler = heapq.heappop(self._scheduled)
self._ready.append(handler)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# TODO: Ensure this loop always finishes, even if some
# callbacks keeps registering more callbacks.
while self._ready:
handler = self._ready.popleft()
if not handler.cancelled:
try:
handler.callback(*handler.args)
except Exception:
logging.exception('Exception in callback %s %r',
handler.callback, handler.args)
class _UnixSocketTransport(transports.Transport):
def __init__(self, event_loop, sock, protocol):
self._event_loop = event_loop
self._sock = sock
self._protocol = protocol
self._buffer = []
self._closing = False # Set when close() called.
self._event_loop.add_reader(self._sock.fileno(), self._read_ready)
self._event_loop.call_soon(self._protocol.connection_made, self)
def _read_ready(self):
try:
data = self._sock.recv(16*1024)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
else:
if data:
self._event_loop.call_soon(self._protocol.data_received, data)
else:
self._event_loop.remove_reader(self._sock.fileno())
self._event_loop.call_soon(self._protocol.eof_received)
def write(self, data):
assert isinstance(data, bytes)
assert not self._closing
if not data:
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = self._sock.send(data)
except socket.error as exc:
if exc.errno in _TRYAGAIN:
n = 0
else:
self._fatal_error(exc)
return
if n == len(data):
return
if n:
data = data[n:]
self.add_writer(self._sock.fileno(), self._write_ready)
self._buffer.append(data)
def _write_ready(self):
data = b''.join(self._buffer)
self._buffer = []
try:
if data:
n = self._sock.send(data)
else:
n = 0
except socket.error as exc:
if exc.errno in _TRYAGAIN:
n = 0
else:
self._fatal_error(exc)
return
if n == len(data):
self._event_loop.remove_writer(self._sock.fileno())
if self._closing:
self._event_loop.call_soon(self._call_connection_lost, None)
return
if n:
data = data[n:]
self._buffer.append(data) # Try again later.
# TODO: write_eof(), can_write_eof().
def abort(self):
self._fatal_error(None)
def close(self):
self._closing = True
self._event_loop.remove_reader(self._sock.fileno())
if not self._buffer:
self._event_loop.call_soon(self._call_connection_lost, None)
def _fatal_error(self, exc):
logging.exception('Fatal error for %s', self)
self._event_loop.remove_writer(self._sock.fileno())
self._event_loop.remove_reader(self._sock.fileno())
self._buffer = []
self._event_loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
class _UnixSslTransport(transports.Transport):
def __init__(self, event_loop, rawsock, protocol, sslcontext, waiter):
self._event_loop = event_loop
self._rawsock = rawsock
self._protocol = protocol
sslcontext = sslcontext or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._sslcontext = sslcontext
self._waiter = waiter
sslsock = sslcontext.wrap_socket(rawsock,
do_handshake_on_connect=False)
self._sslsock = sslsock
self._buffer = []
self._closing = False # Set when close() called.
self._on_handshake()
def _on_handshake(self):
fd = self._sslsock.fileno()
try:
self._sslsock.do_handshake()
except ssl.SSLWantReadError:
self._event_loop.add_reader(fd, self._on_handshake)
return
except ssl.SSLWantWriteError:
self._event_loop.add_writable(fd, self._on_handshake)
return
except Exception as exc:
self._sslsock.close()
self._waiter.set_exception(exc)
return
except BaseException as exc:
self._sslsock.close()
self._waiter.set_exception(exc)
raise
self._event_loop.remove_reader(fd)
self._event_loop.remove_writer(fd)
self._event_loop.add_reader(fd, self._on_ready)
self._event_loop.add_writer(fd, self._on_ready)
self._event_loop.call_soon(self._protocol.connection_made, self)
self._waiter.set_result(None)
def _on_ready(self):
# Because of renegotiations (?), there's no difference between
# readable and writable. We just try both. XXX This may be
# incorrect; we probably need to keep state about what we
# should do next.
# Maybe we're already closed...
fd = self._sslsock.fileno()
if fd < 0:
return
# First try reading.
try:
data = self._sslsock.recv(8192)
except ssl.SSLWantReadError:
pass
except ssl.SSLWantWriteError:
pass
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
return
else:
if data:
self._protocol.data_received(data)
else:
# TODO: Don't close when self._buffer is non-empty.
assert not self._buffer
self._event_loop.remove_reader(fd)
self._event_loop.remove_writer(fd)
self._sslsock.close()
self._protocol.connection_lost(None)
return
# Now try writing, if there's anything to write.
if not self._buffer:
return
data = b''.join(self._buffer)
self._buffer = []
try:
n = self._sslsock.send(data)
except ssl.SSLWantReadError:
pass
except ssl.SSLWantWriteError:
pass
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
return
else:
if n < len(data):
self._buffer.append(data[n:])
def write(self, data):
assert isinstance(data, bytes)
assert not self._closing
if not data:
return
self._buffer.append(data)
# We could optimize, but the callback can do this for now.
# TODO: write_eof(), can_write_eof().
def abort(self):
self._fatal_error(None)
def close(self):
self._closing = True
self._event_loop.remove_reader(self._sslsock.fileno())
if not self._buffer:
self._event_loop.call_soon(self._protocol.connection_lost, None)
def _fatal_error(self, exc):
logging.exception('Fatal error for %s', self)
self._event_loop.remove_writer(self._sslsock.fileno())
self._event_loop.remove_reader(self._sslsock.fileno())
self._buffer = []
self._event_loop.call_soon(self._protocol.connection_lost, exc)
Fix typos in _TRYAGAIN tests.
"""UNIX event loop and related classes.
The event loop can be broken up into a selector (the part responsible
for telling us when file descriptors are ready) and the event loop
proper, which wraps a selector with functionality for scheduling
callbacks, immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import errno
import heapq
import logging
import select
import socket
import ssl
import sys
import threading
import time
try:
import signal
except ImportError:
signal = None
from . import events
from . import futures
from . import protocols
from . import selectors
from . import tasks
from . import transports
try:
from socket import socketpair
except ImportError:
assert sys.platform == 'win32'
from .winsocketpair import socketpair
# Errno values indicating the connection was disconnected.
_DISCONNECTED = frozenset((errno.ECONNRESET,
errno.ENOTCONN,
errno.ESHUTDOWN,
errno.ECONNABORTED,
errno.EPIPE,
errno.EBADF,
))
# Errno values indicating the socket isn't ready for I/O just yet.
_TRYAGAIN = frozenset((errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS))
if sys.platform == 'win32':
_TRYAGAIN = frozenset(list(_TRYAGAIN) + [errno.WSAEWOULDBLOCK])
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _raise_stop_error():
raise _StopError
class UnixEventLoop(events.EventLoop):
"""Unix event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
# pick the best selector class for the platform
selector = selectors.Selector()
logging.info('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._ready = collections.deque()
self._scheduled = []
self._everytime = []
self._default_executor = None
self._signal_handlers = {}
self._make_self_pipe()
def close(self):
if self._selector is not None:
self._selector.close()
self._selector = None
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _read_from_self(self):
try:
self._ssock.recv(1)
except socket.error as exc:
if exc.errno in _TRYAGAIN:
return
raise # Halp!
def _write_to_self(self):
try:
self._csock.send(b'x')
except socket.error as exc:
if exc.errno in _TRYAGAIN:
return
raise # Halp!
def run(self):
"""Run the event loop until nothing left to do or stop() called.
This keeps going as long as there are either readable and
writable file descriptors, or scheduled callbacks (of either
variety).
TODO: Give this a timeout too?
"""
while (self._ready or
self._scheduled or
self._selector.registered_count() > 1):
try:
self._run_once()
except _StopError:
break
def run_forever(self):
"""Run until stop() is called.
This only makes sense over run() if you have another thread
scheduling callbacks using call_soon_threadsafe().
"""
handler = self.call_repeatedly(24*3600, lambda: None)
try:
self.run()
finally:
handler.cancel()
def run_once(self, timeout=None):
"""Run through all callbacks and all I/O polls once.
Calling stop() will break out of this too.
"""
try:
self._run_once(timeout)
except _StopError:
pass
def run_until_complete(self, future, timeout=None):
"""Run until the Future is done, or until a timeout.
Return the Future's result, or raise its exception. If the
timeout is reached or stop() is called, raise TimeoutError.
"""
if timeout is None:
timeout = 0x7fffffff/1000.0 # 24 days
future.add_done_callback(lambda _: self.stop())
handler = self.call_later(timeout, _raise_stop_error)
self.run()
handler.cancel()
if future.done():
return future.result() # May raise future.exception().
else:
raise futures.TimeoutError
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run.
Callback scheduled after stop() is called won't. However,
those callbacks will run if run() is called again later.
"""
self.call_soon(_raise_stop_error)
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return an object with a cancel() method that can be used to
cancel the call.
The delay can be an int or float, expressed in seconds. It is
always a relative time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Callbacks scheduled in the past are passed on to call_soon(),
so these will be called in the order in which they were
registered rather than by time due. This is so you can't
cheat and insert yourself at the front of the ready queue by
using a negative time.
Any positional arguments after the callback will be passed to
the callback when it is called.
# TODO: Should delay is None be interpreted as Infinity?
"""
if delay <= 0:
return self.call_soon(callback, *args)
handler = events.make_handler(time.monotonic() + delay, callback, args)
heapq.heappush(self._scheduled, handler)
return handler
def call_repeatedly(self, interval, callback, *args):
"""Call a callback every 'interval' seconds."""
def wrapper():
callback(*args) # If this fails, the chain is broken.
handler._when = time.monotonic() + interval
heapq.heappush(self._scheduled, handler)
handler = events.make_handler(time.monotonic() + interval, wrapper, ())
heapq.heappush(self._scheduled, handler)
return handler
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue, callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
handler = events.make_handler(None, callback, args)
self._ready.append(handler)
return handler
def call_soon_threadsafe(self, callback, *args):
"""XXX"""
handler = self.call_soon(callback, *args)
self._write_to_self()
return handler
def call_every_iteration(self, callback, *args):
"""Call a callback just before the loop blocks.
The callback is called for every iteration of the loop.
"""
handler = events.make_handler(None, callback, args)
self._everytime.append(handler)
return handler
def wrap_future(self, future):
"""XXX"""
if isinstance(future, futures.Future):
return future # Don't wrap our own type of Future.
new_future = futures.Future()
future.add_done_callback(
lambda future:
self.call_soon_threadsafe(new_future._copy_state, future))
return new_future
def run_in_executor(self, executor, callback, *args):
if isinstance(callback, events.Handler):
assert not args
assert callback.when is None
if callback.cancelled:
f = futures.Future()
f.set_result(None)
return f
callback, args = callback.callback, callback.args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return self.wrap_future(executor.submit(callback, *args))
def set_default_executor(self, executor):
self._default_executor = executor
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
# TODO: Or create_connection()? Or create_client()?
@tasks.task
def create_transport(self, protocol_factory, host, port, *, ssl=False,
family=0, type=socket.SOCK_STREAM, proto=0, flags=0):
"""XXX"""
infos = yield from self.getaddrinfo(host, port,
family=family, type=type,
proto=proto, flags=flags)
if not infos:
raise socket.error('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
sock = None
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
yield self.sock_connect(sock, address)
except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise socket.error('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
protocol = protocol_factory()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
waiter = futures.Future()
transport = _UnixSslTransport(self, sock, protocol, sslcontext,
waiter)
yield from waiter
else:
transport = _UnixSocketTransport(self, sock, protocol)
return transport, protocol
# TODO: Or create_server()?
@tasks.task
def start_serving(self, protocol_factory, host, port, *,
family=0, type=socket.SOCK_STREAM, proto=0, flags=0,
backlog=100, fastopen=5):
"""XXX"""
infos = yield from self.getaddrinfo(host, port,
family=family, type=type,
proto=proto, flags=flags)
if not infos:
raise socket.error('getaddrinfo() returned empty list')
# TODO: Maybe we want to bind every address in the list
# instead of the first one that works?
exceptions = []
for family, type, proto, cname, address in infos:
sock = socket.socket(family=family, type=type, proto=proto)
try:
sock.bind(address)
except socket.error as exc:
sock.close()
exceptions.append(exc)
else:
break
else:
raise exceptions[0]
if fastopen and hasattr(socket, 'TCP_FASTOPEN'):
try:
sock.setsockopt(socket.SOL_TCP, socket.TCP_FASTOPEN, fastopen)
except socket.error:
# Even if TCP_FASTOPEN is defined by glibc, it may
# still not be supported by the kernel.
logging.info('TCP_FASTOPEN(%r) failed', fastopen)
sock.listen(backlog)
sock.setblocking(False)
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock)
return sock
def _accept_connection(self, protocol_factory, sock):
try:
conn, addr = sock.accept()
except socket.error as exc:
if exc.errno in _TRYAGAIN:
return # False alarm.
# Bad error. Stop serving.
self.remove_reader(sock.fileno())
sock.close()
# There's nowhere to send the error, so just log it.
# TODO: Someone will want an error handler for this.
logging.exception('Accept failed')
return
protocol = protocol_factory()
transport = _UnixSocketTransport(self, conn, protocol)
# It's now up to the protocol to handle the connection.
def add_reader(self, fd, callback, *args):
"""Add a reader callback. Return a Handler instance."""
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_IN,
(handler, None, None))
else:
self._selector.modify(fd, mask | selectors.SELECT_IN,
(handler, writer, connector))
return handler
def remove_reader(self, fd):
"""Remove a reader callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_IN
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer, connector))
def add_writer(self, fd, callback, *args):
"""Add a writer callback. Return a Handler instance."""
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_OUT,
(None, handler, None))
else:
self._selector.modify(fd, mask | selectors.SELECT_OUT,
(reader, handler, connector))
return handler
def remove_writer(self, fd):
"""Remove a writer callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_OUT
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None, connector))
def add_connector(self, fd, callback, *args):
"""Add a connector callback. Return a Handler instance."""
# XXX As long as SELECT_CONNECT == SELECT_OUT, set the handler
# as both writer and connector.
handler = events.make_handler(None, callback, args)
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
self._selector.register(fd, selectors.SELECT_CONNECT,
(None, handler, handler))
else:
self._selector.modify(fd, mask | selectors.SELECT_CONNECT,
(reader, handler, handler))
return handler
def remove_connector(self, fd):
"""Remove a connector callback."""
try:
mask, (reader, writer, connector) = self._selector.get_info(fd)
except KeyError:
pass
else:
mask &= ~selectors.SELECT_CONNECT
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None, None))
def sock_recv(self, sock, n):
"""XXX"""
fut = futures.Future()
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
fut.set_result(data)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
def sock_sendall(self, sock, data):
"""XXX"""
fut = futures.Future()
self._sock_sendall(fut, False, sock, data)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
n = 0
try:
if data:
n = sock.send(data)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""XXX"""
# That address better not require a lookup! We're not calling
# self.getaddrinfo() for you here. But verifying this is
# complicated; the socket module doesn't have a pattern for
# IPv6 addresses (there are too many forms, apparently).
fut = futures.Future()
self._sock_connect(fut, False, sock, address)
return fut
def _sock_connect(self, fut, registered, sock, address):
fd = sock.fileno()
if registered:
self.remove_connector(fd)
if fut.cancelled():
return
try:
if not registered:
# First time around.
sock.connect(address)
else:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to the except clause below.
raise socket.error(err, 'Connect call failed')
fut.set_result(None)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_connector(fd, self._sock_connect,
fut, True, sock, address)
def sock_accept(self, sock):
"""XXX"""
fut = futures.Future()
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
fut.set_result((conn, address))
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
fut.set_exception(exc)
else:
self.add_reader(fd, self._sock_accept, fut, True, sock)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
self._check_signal(sig)
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except ValueError as exc:
raise RuntimeError(str(exc))
handler = events.make_handler(None, callback, args)
self._signal_handlers[sig] = handler
try:
signal.signal(sig, self._handle_signal)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except ValueError as nexc:
logging.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
return handler
def _handle_signal(self, sig, arg):
"""Internal helper that is the actual signal handler."""
handler = self._signal_handlers.get(sig)
if handler is None:
return # Assume it's some race condition.
if handler.cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self.call_soon_threadsafe(handler.callback, *handler.args)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not."""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except ValueError as exc:
logging.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if signal is None:
raise RuntimeError('Signals are not supported')
if not (1 <= sig < signal.NSIG):
raise ValueError('sig {} out of range(1, {})'.format(sig,
signal.NSIG))
def _add_callback(self, handler):
"""Add a Handler to ready or scheduled."""
if handler.cancelled:
return
if handler.when is None:
self._ready.append(handler)
else:
heapq.heappush(self._scheduled, handler)
def _run_once(self, timeout=None):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# TODO: Break each of these into smaller pieces.
# TODO: Refactor to separate the callbacks from the readers/writers.
# TODO: An alternative API would be to do the *minimal* amount
# of work, e.g. one callback or one I/O poll.
# Add everytime handlers, skipping cancelled ones.
any_cancelled = False
for handler in self._everytime:
if handler.cancelled:
any_cancelled = True
else:
self._ready.append(handler)
# Remove cancelled everytime handlers if there are any.
if any_cancelled:
self._everytime = [handler for handler in self._everytime
if not handler.cancelled]
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0].cancelled:
heapq.heappop(self._scheduled)
# Inspect the poll queue. If there's exactly one selectable
# file descriptor, it's the self-pipe, and if there's nothing
# scheduled, we should ignore it.
if self._selector.registered_count() > 1 or self._scheduled:
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0].when
deadline = max(0, when - time.monotonic())
if timeout is None:
timeout = deadline
else:
timeout = min(timeout, deadline)
t0 = time.monotonic()
event_list = self._selector.select(timeout)
t1 = time.monotonic()
argstr = '' if timeout is None else ' %.3f' % timeout
if t1-t0 >= 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.log(level, 'poll%s took %.3f seconds', argstr, t1-t0)
for fileobj, mask, (reader, writer, connector) in event_list:
if mask & selectors.SELECT_IN and reader is not None:
self._add_callback(reader)
if mask & selectors.SELECT_OUT and writer is not None:
self._add_callback(writer)
elif mask & selectors.SELECT_CONNECT and connector is not None:
self._add_callback(connector)
# Handle 'later' callbacks that are ready.
now = time.monotonic()
while self._scheduled:
handler = self._scheduled[0]
if handler.when > now:
break
handler = heapq.heappop(self._scheduled)
self._ready.append(handler)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# TODO: Ensure this loop always finishes, even if some
# callbacks keeps registering more callbacks.
while self._ready:
handler = self._ready.popleft()
if not handler.cancelled:
try:
handler.callback(*handler.args)
except Exception:
logging.exception('Exception in callback %s %r',
handler.callback, handler.args)
class _UnixSocketTransport(transports.Transport):
def __init__(self, event_loop, sock, protocol):
self._event_loop = event_loop
self._sock = sock
self._protocol = protocol
self._buffer = []
self._closing = False # Set when close() called.
self._event_loop.add_reader(self._sock.fileno(), self._read_ready)
self._event_loop.call_soon(self._protocol.connection_made, self)
def _read_ready(self):
try:
data = self._sock.recv(16*1024)
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
else:
if data:
self._event_loop.call_soon(self._protocol.data_received, data)
else:
self._event_loop.remove_reader(self._sock.fileno())
self._event_loop.call_soon(self._protocol.eof_received)
def write(self, data):
assert isinstance(data, bytes)
assert not self._closing
if not data:
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = self._sock.send(data)
except socket.error as exc:
if exc.errno in _TRYAGAIN:
n = 0
else:
self._fatal_error(exc)
return
if n == len(data):
return
if n:
data = data[n:]
self.add_writer(self._sock.fileno(), self._write_ready)
self._buffer.append(data)
def _write_ready(self):
data = b''.join(self._buffer)
self._buffer = []
try:
if data:
n = self._sock.send(data)
else:
n = 0
except socket.error as exc:
if exc.errno in _TRYAGAIN:
n = 0
else:
self._fatal_error(exc)
return
if n == len(data):
self._event_loop.remove_writer(self._sock.fileno())
if self._closing:
self._event_loop.call_soon(self._call_connection_lost, None)
return
if n:
data = data[n:]
self._buffer.append(data) # Try again later.
# TODO: write_eof(), can_write_eof().
def abort(self):
self._fatal_error(None)
def close(self):
self._closing = True
self._event_loop.remove_reader(self._sock.fileno())
if not self._buffer:
self._event_loop.call_soon(self._call_connection_lost, None)
def _fatal_error(self, exc):
logging.exception('Fatal error for %s', self)
self._event_loop.remove_writer(self._sock.fileno())
self._event_loop.remove_reader(self._sock.fileno())
self._buffer = []
self._event_loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
class _UnixSslTransport(transports.Transport):
def __init__(self, event_loop, rawsock, protocol, sslcontext, waiter):
self._event_loop = event_loop
self._rawsock = rawsock
self._protocol = protocol
sslcontext = sslcontext or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._sslcontext = sslcontext
self._waiter = waiter
sslsock = sslcontext.wrap_socket(rawsock,
do_handshake_on_connect=False)
self._sslsock = sslsock
self._buffer = []
self._closing = False # Set when close() called.
self._on_handshake()
def _on_handshake(self):
fd = self._sslsock.fileno()
try:
self._sslsock.do_handshake()
except ssl.SSLWantReadError:
self._event_loop.add_reader(fd, self._on_handshake)
return
except ssl.SSLWantWriteError:
self._event_loop.add_writable(fd, self._on_handshake)
return
except Exception as exc:
self._sslsock.close()
self._waiter.set_exception(exc)
return
except BaseException as exc:
self._sslsock.close()
self._waiter.set_exception(exc)
raise
self._event_loop.remove_reader(fd)
self._event_loop.remove_writer(fd)
self._event_loop.add_reader(fd, self._on_ready)
self._event_loop.add_writer(fd, self._on_ready)
self._event_loop.call_soon(self._protocol.connection_made, self)
self._waiter.set_result(None)
def _on_ready(self):
# Because of renegotiations (?), there's no difference between
# readable and writable. We just try both. XXX This may be
# incorrect; we probably need to keep state about what we
# should do next.
# Maybe we're already closed...
fd = self._sslsock.fileno()
if fd < 0:
return
# First try reading.
try:
data = self._sslsock.recv(8192)
except ssl.SSLWantReadError:
pass
except ssl.SSLWantWriteError:
pass
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
return
else:
if data:
self._protocol.data_received(data)
else:
# TODO: Don't close when self._buffer is non-empty.
assert not self._buffer
self._event_loop.remove_reader(fd)
self._event_loop.remove_writer(fd)
self._sslsock.close()
self._protocol.connection_lost(None)
return
# Now try writing, if there's anything to write.
if not self._buffer:
return
data = b''.join(self._buffer)
self._buffer = []
try:
n = self._sslsock.send(data)
except ssl.SSLWantReadError:
pass
except ssl.SSLWantWriteError:
pass
except socket.error as exc:
if exc.errno not in _TRYAGAIN:
self._fatal_error(exc)
return
else:
if n < len(data):
self._buffer.append(data[n:])
def write(self, data):
assert isinstance(data, bytes)
assert not self._closing
if not data:
return
self._buffer.append(data)
# We could optimize, but the callback can do this for now.
# TODO: write_eof(), can_write_eof().
def abort(self):
self._fatal_error(None)
def close(self):
self._closing = True
self._event_loop.remove_reader(self._sslsock.fileno())
if not self._buffer:
self._event_loop.call_soon(self._protocol.connection_lost, None)
def _fatal_error(self, exc):
logging.exception('Fatal error for %s', self)
self._event_loop.remove_writer(self._sslsock.fileno())
self._event_loop.remove_reader(self._sslsock.fileno())
self._buffer = []
self._event_loop.call_soon(self._protocol.connection_lost, exc)
|
Use test_utils.build_module instead of building with AstroidBuilder in some tests.
|
#!/usr/bin/python
import json
import sys
import os
def main():
'''
This script works by coming up with a list of possible completions and printing
those suggestions, separated by spaces to stdout
'''
completer = Completer()
completions = completer.get_completions()
if completions is not None:
for c in completions:
print str(c) + ' ',
else:
sys.exit(0)
class Completer(object):
JSON_FILE = os.path.join(os.path.expanduser('~'), '.kubos', 'completion', 'options.json')
def __init__(self):
if os.path.isfile(self.JSON_FILE):
with open(self.JSON_FILE, 'r') as _fil:
self.arg_data = json.loads(_fil.read())
else:
self.arg_data = None
self.args = sys.argv[2:] #chop off the initial 'python kubos' arguments
self.subcommands = self.get_current_subcommands()
self.load_targets()
def load_targets(self):
'''
Load the available targets, based on the current project's platform type
'''
platform = self.get_platform()
targets = self.load_target_list(platform)
self.arg_data['subcommands']['target']['set_target']['choices'] = targets
def get_completions(self):
# Only completing the subcommands and their args is supported right now.
# Completing the global options (--config, --target, etc.) should be supported in the future.
return self.eval_subcommands()
def eval_subcommands(self):
'''
Returns list of possible subcommand, and subcommand specific arguments based
on the currently provided arguments
This works by starting with the first argument, removing arguments from
the front of the self.args list, as subcommand and arguments are processed.
'''
num_args = len(self.args)
if num_args == 0:
#nothing has been entered - return every subcommand
return self.subcommands
else:
possible_arguments = []
#get all the possible subcommand completions for the entered text
possible_subcommands = self.get_current_subcommand_completion()
subcommand = self.get_current_subcommand()
if subcommand is not None:
#gets all possible argument values for the subcommand
possible_arguments = self.get_valid_subcommand_argument_list(subcommand)
#try to get an argument following the subcommand
arg = self.get_next_arg()
if arg is not None:
#drop other subcommand completions - they're already typing an argument for the subcommand
possible_subcommands = []
possible_arguments = self.get_completions_from_list(arg, possible_arguments)
if self.is_valid_subcommand_arg(subcommand, arg):
return [] #if we've already completed a complete and valid argument, stop suggesting it.
return possible_arguments + possible_subcommands
def get_completions_from_list(self, val, option_list):
'''
Generic function for returning all values from option_list that start with
the value of val
'''
ret_list = []
for option in option_list:
if option.startswith(val):
ret_list.append(option)
return ret_list
def get_current_subcommand_completion(self):
'''
Returns all possible subcommand name completions for the next argument
'''
arg_val = self.args[0] #we should get the subcommand name first
return self.get_completions_from_list(arg_val, self.subcommands)
def get_next_arg(self):
'''
pop the next arg off the front of the provided arguments and return it
'''
if len(self.args) > 0:
return self.args.pop(0)
return None
def get_current_subcommand(self):
'''
Returns the subcommand name if next argument is a valid subcommand or None if it isn't
'''
val = self.args.pop(0)
if val in self.subcommands:
return val
else:
return None
def is_valid_subcommand_arg(self, subcommand, arg):
'''
Returns True if arg is a valid argument for subcommand, otherwise it returns False
'''
valid_args = self.get_valid_subcommand_argument_list(subcommand)
if arg in valid_args:
return True
return False
def get_valid_subcommand_argument_list(self, subcommand):
'''
Returns a list of the valid argument completions for subcommand.
The list is constructed of argument names that start with '--' and the
choices for positional arguments (like target names)
'''
args = self.arg_data['subcommands'][subcommand]
choices = []
for arg in args:
if arg.startswith('--'):
choices.append(arg)
else:
if 'choices' in args[arg] and args[arg]['choices'] is not None:
choices += args[arg]['choices']
return choices
def get_current_subcommands(self):
'''
This function contains the try/except because it's the first function
that would encounter a type error in the case the options.json file
does not exist.
'''
try:
subcommands = self.arg_data['subcommands']
return subcommands.keys()
except TypeError:
sys.exit(1)
################################################################
# CLI DUPLICATED FUNCTIONS
################################################################
'''
Importing the following functions from the CLI slows down the execution of this
script by a factor of about 20.
'''
def get_platform(self):
module_json = os.path.join(os.getcwd(), 'module.json')
if os.path.isfile(module_json):
with open(module_json, 'r') as module_file:
data = json.loads(module_file.read())
if 'dependencies' in data:
deps = data['dependencies']
if 'kubos-rt' in deps:
return 'rt'
else:
return 'linux'
else:
#This project doesn't have a dependencies field. This is most likely running in a unit testing context
return None
else:
#There is no module.json
return None
def load_target_list(self, platform):
KUBOS_TARGET_CACHE_FILE = os.path.join(os.path.expanduser('~'), '.kubos', 'targets.json')
if not os.path.isfile(KUBOS_TARGET_CACHE_FILE):
return None
with open(KUBOS_TARGET_CACHE_FILE, 'r') as json_file:
data = json.loads(json_file.read())
linux_targets = data['linux-targets']
rt_targets = data['rt-targets']
if platform == None: #if no platform is listed in the module.json, dont restrict the target type
return linux_targets + rt_targets
elif platform == 'linux':
return linux_targets
elif platform == 'rt':
return rt_targets
if __name__ == '__main__':
main()
Stop suggesting a subcommand once it has been fully entered
#!/usr/bin/python
import json
import sys
import os
def main():
'''
This script works by coming up with a list of possible completions and printing
those suggestions, separated by spaces to stdout
'''
completer = Completer()
completions = completer.get_completions()
if completions is not None:
for c in completions:
print str(c) + ' ',
else:
sys.exit(0)
class Completer(object):
JSON_FILE = os.path.join(os.path.expanduser('~'), '.kubos', 'completion', 'options.json')
def __init__(self):
if os.path.isfile(self.JSON_FILE):
with open(self.JSON_FILE, 'r') as _fil:
self.arg_data = json.loads(_fil.read())
else:
self.arg_data = None
self.args = sys.argv[2:] #chop off the initial 'python kubos' arguments
self.subcommands = self.get_current_subcommands()
self.load_targets()
def load_targets(self):
'''
Load the available targets, based on the current project's platform type
'''
platform = self.get_platform()
targets = self.load_target_list(platform)
self.arg_data['subcommands']['target']['set_target']['choices'] = targets
def get_completions(self):
# Only completing the subcommands and their args is supported right now.
# Completing the global options (--config, --target, etc.) should be supported in the future.
return self.eval_subcommands()
def eval_subcommands(self):
'''
Returns list of possible subcommand, and subcommand specific arguments based
on the currently provided arguments
This works by starting with the first argument, removing arguments from
the front of the self.args list, as subcommand and arguments are processed.
'''
num_args = len(self.args)
if num_args == 0:
#nothing has been entered - return every subcommand
return self.subcommands
else:
possible_arguments = []
#get all the possible subcommand completions for the entered text
possible_subcommands = self.get_current_subcommand_completion()
subcommand = self.get_current_subcommand()
if subcommand is not None:
#gets all possible argument values for the subcommand
possible_arguments = self.get_valid_subcommand_argument_list(subcommand)
possible_subcommands = [] #they've already entered a complete subcommand don't suggest it
#try to get an argument following the subcommand
arg = self.get_next_arg()
if arg is not None:
#drop other subcommand completions - they're already typing an argument for the subcommand
possible_subcommands = []
possible_arguments = self.get_completions_from_list(arg, possible_arguments)
if self.is_valid_subcommand_arg(subcommand, arg):
return [] #if we've already completed a complete and valid argument, stop suggesting it.
return possible_arguments + possible_subcommands
def get_completions_from_list(self, val, option_list):
'''
Generic function for returning all values from option_list that start with
the value of val
'''
ret_list = []
for option in option_list:
if option.startswith(val):
ret_list.append(option)
return ret_list
def get_current_subcommand_completion(self):
'''
Returns all possible subcommand name completions for the next argument
'''
arg_val = self.args[0] #we should get the subcommand name first
return self.get_completions_from_list(arg_val, self.subcommands)
def get_next_arg(self):
'''
pop the next arg off the front of the provided arguments and return it
'''
if len(self.args) > 0:
return self.args.pop(0)
return None
def get_current_subcommand(self):
'''
Returns the subcommand name if next argument is a valid subcommand or None if it isn't
'''
val = self.args.pop(0)
if val in self.subcommands:
return val
else:
return None
def is_valid_subcommand_arg(self, subcommand, arg):
'''
Returns True if arg is a valid argument for subcommand, otherwise it returns False
'''
valid_args = self.get_valid_subcommand_argument_list(subcommand)
if arg in valid_args:
return True
return False
def get_valid_subcommand_argument_list(self, subcommand):
'''
Returns a list of the valid argument completions for subcommand.
The list is constructed of argument names that start with '--' and the
choices for positional arguments (like target names)
'''
args = self.arg_data['subcommands'][subcommand]
choices = []
for arg in args:
if arg.startswith('--'):
choices.append(arg)
else:
if 'choices' in args[arg] and args[arg]['choices'] is not None:
choices += args[arg]['choices']
return choices
def get_current_subcommands(self):
'''
This function contains the try/except because it's the first function
that would encounter a type error in the case the options.json file
does not exist.
'''
try:
subcommands = self.arg_data['subcommands']
return subcommands.keys()
except TypeError:
sys.exit(1)
################################################################
# CLI DUPLICATED FUNCTIONS
################################################################
'''
Importing the following functions from the CLI slows down the execution of this
script by a factor of about 20.
'''
def get_platform(self):
module_json = os.path.join(os.getcwd(), 'module.json')
if os.path.isfile(module_json):
with open(module_json, 'r') as module_file:
data = json.loads(module_file.read())
if 'dependencies' in data:
deps = data['dependencies']
if 'kubos-rt' in deps:
return 'rt'
else:
return 'linux'
else:
#This project doesn't have a dependencies field. This is most likely running in a unit testing context
return None
else:
#There is no module.json
return None
def load_target_list(self, platform):
KUBOS_TARGET_CACHE_FILE = os.path.join(os.path.expanduser('~'), '.kubos', 'targets.json')
if not os.path.isfile(KUBOS_TARGET_CACHE_FILE):
return None
with open(KUBOS_TARGET_CACHE_FILE, 'r') as json_file:
data = json.loads(json_file.read())
linux_targets = data['linux-targets']
rt_targets = data['rt-targets']
if platform == None: #if no platform is listed in the module.json, dont restrict the target type
return linux_targets + rt_targets
elif platform == 'linux':
return linux_targets
elif platform == 'rt':
return rt_targets
if __name__ == '__main__':
main()
|
"""
Created on 27 Sep 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
QueueReport.STATUS_NONE: "",
QueueReport.STATUS_INHIBITED: "PUBLISHING INHIBITED",
QueueReport.STATUS_DISCONNECTED: "CONNECTING",
QueueReport.STATUS_PUBLISHING: "PUBLISHING DATA",
QueueReport.STATUS_QUEUING: "QUEUING DATA",
QueueReport.STATUS_CLEARING: "CLEARING DATA BACKLOG "
"""
import json
import time
from collections import OrderedDict
from multiprocessing import Manager
from AWSIoTPythonSDK.exception.operationError import operationError
from AWSIoTPythonSDK.exception.operationTimeoutException import operationTimeoutException
from scs_core.aws.client.client_auth import ClientAuth
from scs_core.aws.client.mqtt_client import MQTTClient
from scs_core.comms.mqtt_conf import MQTTConf
from scs_core.data.message_queue import MessageQueue
from scs_core.data.publication import Publication
from scs_core.data.queue_report import QueueReport, ClientStatus
from scs_core.sync.synchronised_process import SynchronisedProcess
from scs_dev.handler.mqtt_reporter import MQTTReporter
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTPublisher(SynchronisedProcess):
"""
classdocs
"""
__QUEUE_INSPECTION_INTERVAL = 2.0
__CONNECT_TIME = 3.0 # seconds
__CONNECT_RETRY_TIME = 2.0 # seconds
__POST_PUBLISH_TIME = 0.1 # seconds - was 0.5
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, conf: MQTTConf, auth: ClientAuth, queue: MessageQueue, client: MQTTClient,
reporter: MQTTReporter):
"""
Constructor
"""
manager = Manager()
SynchronisedProcess.__init__(self, AWSMQTTReport(manager.dict()))
initial_state = ClientStatus.INHIBITED if conf.inhibit_publishing else ClientStatus.WAITING
self.__state = AWSMQTTState(initial_state, reporter)
self.__conf = conf
self.__auth = auth
self.__queue = queue
self.__client = client
self.__reporter = reporter
self.__report = QueueReport(0, initial_state, False)
self.__report.save(self.__conf.report_file)
self.__reporter.set_led(self.__report)
# ----------------------------------------------------------------------------------------------------------------
# SynchronisedProcess implementation...
def run(self):
try:
while True:
self.__process_messages()
time.sleep(self.__QUEUE_INSPECTION_INTERVAL)
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
def stop(self):
try:
self.__disconnect()
self.__state.set_disconnected()
self.__reporter.set_led(self.__report)
self.__report.delete(self.__conf.report_file)
super().stop()
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
# ----------------------------------------------------------------------------------------------------------------
# state management...
def __process_messages(self):
while True:
self.__report.length = self.__queue.length()
if self.__report.length is None or self.__report.length < 1:
return
self.__reporter.set_led(self.__report)
if self.__conf.report_file:
self.__report.save(self.__conf.report_file)
self.__reporter.print("queue: %s" % self.__report.length)
try:
self.__process_message(self.__next_message())
except Exception as ex:
self.__reporter.print("pms: %s" % ex.__class__.__name__)
def __process_message(self, publication):
if publication is None:
self.__queue.dequeue()
return
self.__report.client_state = self.__state.state
if self.__report.client_state == ClientStatus.WAITING:
self.__report.client_state = ClientStatus.CONNECTING
if self.__report.client_state == ClientStatus.INHIBITED:
# discard...
self.__queue.dequeue()
return
if self.__report.client_state == ClientStatus.CONNECTING:
# connect...
if self.__connect():
self.__state.set_connected()
time.sleep(self.__CONNECT_TIME)
else:
time.sleep(self.__CONNECT_RETRY_TIME)
return
if self.__report.client_state == ClientStatus.CONNECTED:
# publish...
self.__publish_message(publication)
self.__queue.dequeue()
time.sleep(self.__POST_PUBLISH_TIME)
return
else:
raise ValueError("unknown AWSMQTTState: %s" % self.__report.client_state)
# ----------------------------------------------------------------------------------------------------------------
# connection management...
def __connect(self):
try:
success = self.__client.connect(self.__auth)
if success:
self.__reporter.print("connect: done")
return True
else:
self.__reporter.print("connect: failed")
return False
except OSError as ex:
self.__reporter.print("connect: %s" % ex)
return False
def __disconnect(self):
self.__client.disconnect()
# ----------------------------------------------------------------------------------------------------------------
# message management...
def __next_message(self):
message = self.__queue.next()
try:
datum = json.loads(message, object_pairs_hook=OrderedDict)
return Publication.construct_from_jdict(datum)
except (TypeError, ValueError) as ex:
self.__reporter.print("next_message: %s" % ex)
return None
def __publish_message(self, publication):
self.__report.publish_success = False
try:
start_time = time.time()
reached_paho = self.__client.publish(publication)
elapsed_time = time.time() - start_time
self.__reporter.print("paho: %s: %0.3f" % ("1" if reached_paho else "0", elapsed_time))
self.__report.publish_success = reached_paho
except operationTimeoutException:
pass
except (OSError, operationError) as ex:
self.__reporter.print("pm: %s" % ex.__class__.__name__)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTPublisher:{state:%s, conf:%s, auth:%s, queue:%s, client:%s, reporter:%s, report:%s}" % \
(self.__state, self.__conf, self.__auth, self.__queue, self.__client, self.__reporter, self.__report)
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTState(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, state, reporter):
"""
Constructor
"""
self.__state = state # ClientStatus
self.__reporter = reporter # MQTTReporter
self.__latest_success = None # bool
# ----------------------------------------------------------------------------------------------------------------
def set_connected(self):
self.__latest_success = time.time()
if self.__state == ClientStatus.CONNECTED:
return
self.__state = ClientStatus.CONNECTED
self.__reporter.print("-> CONNECTED")
def set_disconnected(self):
self.__latest_success = None
self.__state = ClientStatus.CONNECTING
self.__reporter.print("-> CONNECTING")
# ----------------------------------------------------------------------------------------------------------------
@property
def state(self):
return self.__state
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTState:{state:%s, latest_success:%s}}" % (self.__state, self.__latest_success)
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTReport(object):
"""
classdocs
"""
__PUB_TIME = 'pub_time'
__QUEUE_LENGTH = 'queue_length'
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, value):
"""
Constructor
"""
self.__value = value
self.pub_time = 0
self.queue_length = 0
# ----------------------------------------------------------------------------------------------------------------
@property
def pub_time(self):
return self.__value[self.__PUB_TIME]
@pub_time.setter
def pub_time(self, pub_time):
self.__value[self.__PUB_TIME] = pub_time
@property
def queue_length(self):
return self.__value[self.__QUEUE_LENGTH]
@queue_length.setter
def queue_length(self, queue_length):
self.__value[self.__QUEUE_LENGTH] = queue_length
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTReport:{pub_time:%s, queue_length:%s}}" % (self.pub_time, self.queue_length)
Fixed ClientStatus.WAITING
"""
Created on 27 Sep 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
QueueReport.STATUS_NONE: "",
QueueReport.STATUS_INHIBITED: "PUBLISHING INHIBITED",
QueueReport.STATUS_DISCONNECTED: "CONNECTING",
QueueReport.STATUS_PUBLISHING: "PUBLISHING DATA",
QueueReport.STATUS_QUEUING: "QUEUING DATA",
QueueReport.STATUS_CLEARING: "CLEARING DATA BACKLOG "
"""
import json
import time
from collections import OrderedDict
from multiprocessing import Manager
from AWSIoTPythonSDK.exception.operationError import operationError
from AWSIoTPythonSDK.exception.operationTimeoutException import operationTimeoutException
from scs_core.aws.client.client_auth import ClientAuth
from scs_core.aws.client.mqtt_client import MQTTClient
from scs_core.comms.mqtt_conf import MQTTConf
from scs_core.data.message_queue import MessageQueue
from scs_core.data.publication import Publication
from scs_core.data.queue_report import QueueReport, ClientStatus
from scs_core.sync.synchronised_process import SynchronisedProcess
from scs_dev.handler.mqtt_reporter import MQTTReporter
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTPublisher(SynchronisedProcess):
"""
classdocs
"""
__QUEUE_INSPECTION_INTERVAL = 2.0
__CONNECT_TIME = 3.0 # seconds
__CONNECT_RETRY_TIME = 2.0 # seconds
__POST_PUBLISH_TIME = 0.1 # seconds - was 0.5
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, conf: MQTTConf, auth: ClientAuth, queue: MessageQueue, client: MQTTClient,
reporter: MQTTReporter):
"""
Constructor
"""
manager = Manager()
SynchronisedProcess.__init__(self, AWSMQTTReport(manager.dict()))
initial_state = ClientStatus.INHIBITED if conf.inhibit_publishing else ClientStatus.WAITING
self.__state = AWSMQTTState(initial_state, reporter)
self.__conf = conf
self.__auth = auth
self.__queue = queue
self.__client = client
self.__reporter = reporter
self.__report = QueueReport(0, initial_state, False)
self.__report.save(self.__conf.report_file)
self.__reporter.set_led(self.__report)
# ----------------------------------------------------------------------------------------------------------------
# SynchronisedProcess implementation...
def run(self):
try:
if self.__conf.report_file:
self.__report.save(self.__conf.report_file)
while True:
self.__process_messages()
time.sleep(self.__QUEUE_INSPECTION_INTERVAL)
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
def stop(self):
try:
self.__disconnect()
self.__state.set_disconnected()
self.__reporter.set_led(self.__report)
self.__report.delete(self.__conf.report_file)
super().stop()
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
# ----------------------------------------------------------------------------------------------------------------
# state management...
def __process_messages(self):
while True:
self.__report.length = self.__queue.length()
if self.__report.length is None or self.__report.length < 1:
return
self.__reporter.set_led(self.__report)
if self.__conf.report_file:
self.__report.save(self.__conf.report_file)
self.__reporter.print("queue: %s" % self.__report.length)
try:
self.__process_message(self.__next_message())
except Exception as ex:
self.__reporter.print("pms: %s" % ex.__class__.__name__)
def __process_message(self, publication):
if publication is None:
self.__queue.dequeue()
return
self.__report.client_state = self.__state.state
if self.__report.client_state == ClientStatus.WAITING:
self.__report.client_state = ClientStatus.CONNECTING
if self.__report.client_state == ClientStatus.INHIBITED:
# discard...
self.__queue.dequeue()
return
if self.__report.client_state == ClientStatus.CONNECTING:
# connect...
if self.__connect():
self.__state.set_connected()
time.sleep(self.__CONNECT_TIME)
else:
time.sleep(self.__CONNECT_RETRY_TIME)
return
if self.__report.client_state == ClientStatus.CONNECTED:
# publish...
self.__publish_message(publication)
self.__queue.dequeue()
time.sleep(self.__POST_PUBLISH_TIME)
return
else:
raise ValueError("unknown AWSMQTTState: %s" % self.__report.client_state)
# ----------------------------------------------------------------------------------------------------------------
# connection management...
def __connect(self):
try:
success = self.__client.connect(self.__auth)
if success:
self.__reporter.print("connect: done")
return True
else:
self.__reporter.print("connect: failed")
return False
except OSError as ex:
self.__reporter.print("connect: %s" % ex)
return False
def __disconnect(self):
self.__client.disconnect()
# ----------------------------------------------------------------------------------------------------------------
# message management...
def __next_message(self):
message = self.__queue.next()
try:
datum = json.loads(message, object_pairs_hook=OrderedDict)
return Publication.construct_from_jdict(datum)
except (TypeError, ValueError) as ex:
self.__reporter.print("next_message: %s" % ex)
return None
def __publish_message(self, publication):
self.__report.publish_success = False
try:
start_time = time.time()
reached_paho = self.__client.publish(publication)
elapsed_time = time.time() - start_time
self.__reporter.print("paho: %s: %0.3f" % ("1" if reached_paho else "0", elapsed_time))
self.__report.publish_success = reached_paho
except operationTimeoutException:
pass
except (OSError, operationError) as ex:
self.__reporter.print("pm: %s" % ex.__class__.__name__)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTPublisher:{state:%s, conf:%s, auth:%s, queue:%s, client:%s, reporter:%s, report:%s}" % \
(self.__state, self.__conf, self.__auth, self.__queue, self.__client, self.__reporter, self.__report)
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTState(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, state, reporter):
"""
Constructor
"""
self.__state = state # ClientStatus
self.__reporter = reporter # MQTTReporter
self.__latest_success = None # bool
# ----------------------------------------------------------------------------------------------------------------
def set_connected(self):
self.__latest_success = time.time()
if self.__state == ClientStatus.CONNECTED:
return
self.__state = ClientStatus.CONNECTED
self.__reporter.print("-> CONNECTED")
def set_disconnected(self):
self.__latest_success = None
self.__state = ClientStatus.CONNECTING
self.__reporter.print("-> CONNECTING")
# ----------------------------------------------------------------------------------------------------------------
@property
def state(self):
return self.__state
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTState:{state:%s, latest_success:%s}}" % (self.__state, self.__latest_success)
# --------------------------------------------------------------------------------------------------------------------
class AWSMQTTReport(object):
"""
classdocs
"""
__PUB_TIME = 'pub_time'
__QUEUE_LENGTH = 'queue_length'
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, value):
"""
Constructor
"""
self.__value = value
self.pub_time = 0
self.queue_length = 0
# ----------------------------------------------------------------------------------------------------------------
@property
def pub_time(self):
return self.__value[self.__PUB_TIME]
@pub_time.setter
def pub_time(self, pub_time):
self.__value[self.__PUB_TIME] = pub_time
@property
def queue_length(self):
return self.__value[self.__QUEUE_LENGTH]
@queue_length.setter
def queue_length(self, queue_length):
self.__value[self.__QUEUE_LENGTH] = queue_length
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSMQTTReport:{pub_time:%s, queue_length:%s}}" % (self.pub_time, self.queue_length)
|
from ncolony import ctllib
BOREDBOT_MAIN_OK = True
def main(dummyArgs):
place = os.path.abspath('boredbot-config')
shutil.rmtree(place)
os.mkdir(place)
ctllib.
froop
import os
import shutil
import sys
from ncolony import ctllib
def calcCommandline():
argv0 = sys.argv[0]
if not argv0.endswith('__main__.py'):
return [argv0]
prefix = os.path.dirname(argv0)
path = map(os.path.abspath, sys.path)
while prefix not in path:
up = os.path.dirname(prefix)
if up == prefix:
raise RuntimeError('Could not find prefix', argv0)
prefix = up
module = '.'.join(argv0[len(prefix):].split('/')[1:-1])
return [sys.executable, '-m', module]
BOREDBOT_MAIN_OK = True
def main(args):
place = os.path.abspath('boredbot-config')
if os.path.exists(place):
shutil.rmtree(place)
os.mkdir(place)
config = os.path.join(place, 'config')
messages = os.path.join(place, 'messages')
places = ctllib.Places(config=config, messages=messages)
for dr in places:
os.mkdir(dr)
cmdLine = calcCommandline()
ctllib.add(places, 'boredbot', cmd=cmdLine[0], args=cmdLine[1:] + ['loop'], env=['PARSE_REST_API_KEY='+os.environ['PARSE_REST_API_KEY']])
|
from django.views.generic import View
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from trix.trix_core import models
import json
class HowsolvedView(View):
"""docstring for UpdateHowSolvedView"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
howsolved = request.POST.get('howsolved', None)
self.assignment_id = request.POST.get('assignment_id', None)
self.assignment = get_object_or_404(models.Assignment, id=self.assignment_id)
self.assignment_solution = self.assignment.assignment_set\
.filter(id=self.assignment.id)\
.filter(user=request.user)
if self.assignment_solution:
self.assignment_solution.howsolved = self.howsolved
self.assignment_solution.save()
else:
models.AssignmentSolution.create(howsolved=self.howsolved, assignment=self.assignment, user=request.user)
response_data = {}
response_data['success'] = 'True'
return HttpResponse(json.dumps(response_data), content_type='application/json')
trix_student: Return howsolved
from django.views.generic import View
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from trix.trix_core import models
import json
class HowsolvedView(View):
"""docstring for UpdateHowSolvedView"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
howsolved = request.POST.get('howsolved', None)
self.assignment_id = request.POST.get('assignment_id', None)
self.assignment = get_object_or_404(models.Assignment, id=self.assignment_id)
self.assignment_solution = self.assignment.assignment_set\
.filter(id=self.assignment.id)\
.filter(user=request.user)
if self.assignment_solution:
self.assignment_solution.howsolved = self.howsolved
self.assignment_solution.save()
else:
models.AssignmentSolution.create(howsolved=self.howsolved, assignment=self.assignment, user=request.user)
response_data = {}
response_data['success'] = 'True'
response_data['howsolved'] = self.howsolved
return HttpResponse(json.dumps(response_data), content_type='application/json') |
#!/usr/bin/env python3
import glob
import numpy as np
import codecs
import json
import argparse
from collections import defaultdict
from collections import OrderedDict
import Bio.PDB
from utilities import *
from pdb_utilities import *
nested_dict = lambda: defaultdict(nested_dict)
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Generates the xdb database from preprocessed single and double modules.')
parser.add_argument('--relaxed_pdbs_dir', default='./resources/pdb_relaxed/')
parser.add_argument('--metadata_dir', default='./resources/metadata/')
parser.add_argument('--output', default='./resources/xdb.json')
parser.add_argument('--aligned_pdb_dir', default='./resources/pdb_aligned/')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
XDBGenerator(
args.relaxed_pdbs_dir,
args.metadata_dir,
args.aligned_pdb_dir,
args.output
).run()
class XDBGenerator:
def __init__(
self,
relaxed_pdbs_dir,
metadata_dir,
aligned_pdb_dir,
out_file
):
self.relaxed_pdbs_dir = relaxed_pdbs_dir
module_types = ['doubles', 'singles', 'hubs']
make_dir(aligned_pdb_dir)
for mt in module_types:
make_dir(aligned_pdb_dir + '/{}/'.format(mt))
self.hub_info = read_json(metadata_dir + '/hub_info.json')
self.aligned_pdb_dir = aligned_pdb_dir
self.out_file = out_file
self.si = Bio.PDB.Superimposer()
self.modules = nested_dict()
self.n_to_c_tx = []
self.hub_tx = []
# Cache in memory because disk I/O is really heavy here
self.single_pdbs = defaultdict(dict)
self.double_pdbs = defaultdict(dict)
def find_tip(self, term, struct, chain_id):
term = term.lower()
assert(term in {'c', 'n'})
chain = get_chain(struct, chain_id=chain_id)
residues = chain.child_list
n = len(residues)
divider = 6 # The smaller the divider, the closer to terminus.
assert(n > 0)
if term == 'n':
start_idx, end_idx = 0, n//divider
else:
start_idx, end_idx = (divider-1)*n//divider, n
sum_coord = np.asarray([0., 0., 0.])
for r in residues[start_idx:end_idx]:
sum_coord += r['CA'].get_coord().astype('float64')
tip_vector = sum_coord/(end_idx - start_idx - 1)
return tip_vector.tolist()
def create_tx(self, mod_a, a_chain, mod_b, b_chain, rot, tran):
tx_entry = \
OrderedDict([
('mod_a', mod_a),
('mod_a_chain', a_chain),
('mod_b', mod_b),
('mod_b_chain', b_chain),
('rot', rot.tolist()),
('tran', np.asarray(tran).tolist())
])
return tx_entry
def process_hub(self, file_name):
"""Aligns a hub module to its A component (chain A), then computes the
transform for aligning itself to its other components.
"""
# Load structures
hub = read_pdb(file_name)
# Centre the hub
self.move_to_origin(hub)
hub_fusion_factor = 4
hub_name = os.path.basename(file_name).replace('.pdb', '')
hub_meta = self.hub_info.get(hub_name, None)
assert(hub_meta != None)
if hub_meta is None:
raise ValueError('Could not get hub metadata for hub {}\n'.format(hub_name))
# Create module entry first
comp_data = hub_meta['component_data']
del hub_meta['component_data']
hub_meta['chains'] = {
c.id: {
'single_name': comp_data[c.id]['single_name'],
'n': nested_dict(),
'n_tip': nested_dict(),
'c': nested_dict(),
'c_tip': nested_dict()
} for c in hub.get_chains()
}
hub_meta['radii'] = self.get_radii(hub)
self.modules['hubs'][hub_name] = hub_meta
# The current process does not allow hub to hub connections. Maybe this
# need to be changed?
for hub_chain_id in comp_data:
chain_data = comp_data[hub_chain_id]
comp_name = chain_data['single_name']
if chain_data['c_free']:
b_name_gen = (tx['mod_b'] for tx in self.n_to_c_tx if tx['mod_a'] == comp_name)
for single_b_name in b_name_gen:
# Compute the transformation required to move a single
# module B from its aligned position to the current hub's
# "finger tip".
#
# Here we do not use the second quadrant method, because during
# stitching none of the hubs' residues get changed. The stitching
# will take place at the end of the hub's component's terminal.
rc_hub_a = get_chain_residue_count(hub, hub_chain_id)
rc_dbl_a = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_dbl_a) / hub_fusion_factor)
double = self.double_pdbs[comp_name][single_b_name]
# Compute transformation matrix.
# Find transform between component single and single b.
hub_single_chain_id = \
list(self.single_pdbs[comp_name].get_chains())[0].id
single_b_chain_id = \
list(self.single_pdbs[single_b_name].get_chains())[0].id
dbl_tx_id = self.modules['singles'][comp_name]['chains'] \
[hub_single_chain_id]['c'] \
[single_b_name][single_b_chain_id]
assert(dbl_tx_id is not None)
dbl_n_to_c = self.n_to_c_tx[dbl_tx_id]
dbl_tx = np.vstack(
(np.hstack((dbl_n_to_c['rot'], np.transpose([dbl_n_to_c['tran']]))),
[0,0,0,1])
)
# Find transform from hub to single A.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=rc_hub_a - fusion_count,
moving_resi_offset=rc_dbl_a - fusion_count,
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
comp_to_single_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub's component frame.
# 2. Shift to double B frame.
dbl_raised_tx = np.matmul(comp_to_single_tx, dbl_tx);
# Decompose transform.
rot = dbl_raised_tx[:3, :3]
tran = dbl_raised_tx[:3, 3]
tx = self.create_tx(
hub_name,
hub_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c'] \
[single_b_name][single_b_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c_tip'] = \
self.find_tip('c', hub, hub_chain_id)
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'] \
[hub_name][hub_chain_id] = tx_id
self.hub_tx.append(tx)
if chain_data['n_free']:
a_name_gen = (tx['mod_a'] for tx in self.n_to_c_tx if tx['mod_b'] == comp_name)
for single_a_name in a_name_gen:
# Same as c_free except comp acts as single b
rc_a = get_pdb_residue_count(self.single_pdbs[single_a_name])
rc_b = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_b) / hub_fusion_factor)
double = self.double_pdbs[single_a_name][comp_name]
# Compute transformation matrix.
# Find transform from double component B to hub component.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=0, # start matching from the n-term of hub component, which is index 0
moving_resi_offset=rc_a, # start matching at the beginning of single b in the double
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
dbl_to_hub_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub frame - do nothing; just dbl_to_hub_tx.
# Decompose transform.
rot = dbl_to_hub_tx[:3, :3]
tran = dbl_to_hub_tx[:3, 3]
single_a_chain_id = \
list(self.single_pdbs[single_a_name].get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
hub_name,
hub_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'] \
[hub_name][hub_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n'] \
[single_a_name][single_a_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n_tip'] = \
self.find_tip('n', hub, hub_chain_id)
self.hub_tx.append(tx)
save_pdb(
struct=hub,
path=self.aligned_pdb_dir + '/hubs/' + hub_name + '.pdb'
)
def process_double(self, file_name):
"""Aligns a double module to its A component and then computes the transform
for aligning to its B component. Saves aligned structure to output folder.
"""
# Step 1: Load structures
double = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(double.get_chains())) == 1)
double_name = file_name.split('/')[-1].replace('.pdb', '')
single_a_name, single_b_name = double_name.split('-')
single_a = self.single_pdbs[single_a_name]
single_b = self.single_pdbs[single_b_name]
rc_a = get_pdb_residue_count(single_a)
rc_b = get_pdb_residue_count(single_b)
rc_double = get_pdb_residue_count(double)
rc_a_half = int_floor(float(rc_a)/2)
rc_b_half = int_ceil(float(rc_b)/2)
# -- About the "fusion count" variable --
#
# The fusion_count is the number of residues we use to align double to
# single_a. The higher this number is, the more global the alignment will
# be, which causes loop jumps (disconnections) in the chain. This is
# because in Stitch we're stitching atoms from different doubles into the
# same chain. Different doubles have their single components stuck
# together using an interface, the participation of which causes atom
# positions in a double's single component to differ from that of the
# original single module.
#
# When we fuse different doubles together, each double is cut at 25%
# and 75% of their sequence in order to be as far way to interfaces
# (0%, 50%, 100%) as possible.
#
# The fusion alignment here is about aligning subsequent doubles using a
# few residues before the 25% mark. The lower the fusion_count is, the
# fewer residues we use to align, the more local the alignment will be.
# However, if this number is too low the alignment could cause subsequent
# modules to overlap (shortsighted).
#
# Through some experients I found that using 1/8 of the length of the
# alignment target (single a or b) is a good balance between not causing
# discontinuities and also not creating atom overlaps.
dbl_fusion_factor = 8
fusion_count_a = int_ceil(float(rc_a) / dbl_fusion_factor)
fusion_count_b = int_ceil(float(rc_b) / dbl_fusion_factor)
# Step 2: Move double to align with the first single.
#
# This aligns double by superimposing double[0] with single_a. Only align
# double to the SECOND quardrant of single_a's atoms.
self.align(
moving=double,
fixed=single_a,
moving_resi_offset=rc_a_half - fusion_count_a,
fixed_resi_offset=rc_a_half - fusion_count_a,
match_count=fusion_count_a
)
# Step 3: Get COM of the single_b as seen in the double.
#
# Only align double to the SECOND quardrant of single_b.
com_b = self.get_centre_of_mass(
single_b,
mother=double,
child_resi_offset=rc_b_half - fusion_count_b,
mother_resi_offset=rc_a + rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Step 4: Get transformation of double to the second single.
#
# Double is already aligned to first single so there is no need for
# the first transformation.
#
# This can be varifyed by checking that self.get_rot_trans(double,
# single_a) has identity rotation and zero translation.
#
# Only align the second quardrant of single_b in order to be
# consistent with the Stitch script, where doubles are fused together
# by chopping the first and last quardrant of a double. This means the
# second half of single_b is chopped off during fusion, while the first
# quardrant of single_b participates in interfacing. Therefore we align
# by uperimposing just the second quardrant.
rot, tran = self.get_rot_trans(
moving=double,
fixed=single_b,
moving_resi_offset=rc_a + rc_b_half - fusion_count_b,
fixed_resi_offset=rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
# Inverse result transform.
tmp_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
inv_tx = np.linalg.inv(tmp_tx);
# Decompose transform.
rot = inv_tx[:3, :3]
tran = inv_tx[:3, 3]
# Step 5: Save the aligned molecules.
#
# Here the PDB format adds some slight floating point error. PDB is
# already phased out so and we should really consider using mmCIF for
# all modules.
save_pdb(
struct=double,
path=self.aligned_pdb_dir + '/doubles/' + double_name + '.pdb'
)
single_a_chain_id = list(single_a.get_chains())[0].id
single_b_chain_id = list(single_b.get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'][single_b_name][single_b_chain_id] = tx_id
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'][single_a_name][single_a_chain_id] = tx_id
self.n_to_c_tx.append(tx)
# Cache structure in memory
self.double_pdbs[single_a_name][single_b_name] = double
def process_single(self, file_name):
"""Centres a single module and saves to output folder."""
single_name = file_name.split('/')[-1].replace('.pdb', '')
single = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(single.get_chains())) == 1)
# Check that there is only one chain
chain_list = list(single.get_chains())
if len(chain_list) != 1:
raise ValueError('Single PDB contains {} chains!\n'.format(len(chain_list)))
self.move_to_origin(single)
save_pdb(
struct=single,
path=self.aligned_pdb_dir + '/singles/' + single_name + '.pdb'
)
self.modules['singles'][single_name] = {
'chains': {
chain_list[0].id: {
'n': nested_dict(),
'c': nested_dict()
}
},
'radii': self.get_radii(single)
}
# Cache structure in memory
self.single_pdbs[single_name] = single
def dump_xdb(self):
"""Writes alignment data to a json file."""
to_dump = \
OrderedDict([
('modules', self.modules),
('n_to_c_tx', self.n_to_c_tx)
])
json.dump(to_dump,
open(self.out_file, 'w'),
separators=(',', ':'),
ensure_ascii=False,
indent=4)
def get_centre_of_mass(
self,
child,
mother=None,
child_resi_offset=0,
mother_resi_offset=0,
match_count=-1
):
"""Computes centre-of-mass coordinate of a Bio.PDB.Structure.Structure.
Args:
- child - Bio.PDB.Structure.Structure for which the centre-of-mass should
be calculated.
- mother - Bio.PDB.Structure.Structure onto which child is to be first
aligned.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
Returns:
- com - 3x1 numpy array of the centre-of-mass.
"""
CAs = [r['CA'].get_coord().astype('float64') for r in child.get_residues()]
com = np.mean(CAs, axis=0)
if mother is not None:
# This is for finding COM of a single inside a double
_, tran = self.get_rot_trans(
moving=child,
fixed=mother,
moving_resi_offset=child_resi_offset,
fixed_resi_offset=mother_resi_offset,
match_count=match_count
)
com += tran
return com
def get_radii(self, pose):
"""Computes three different measures of the radius.
Args:
- pose - Bio.PDB.Structure.Structure
Returns:
- _ - an dict containing: average of all atoms distances, max
carbon alpha distance, and max heavy atom distance, each calculated
against the centre-of-mass.
"""
if not pose.at_origin:
raise ValueError('get_radii() must be called with centered modules.')
natoms = 0;
rg_sum = 0;
max_ca_dist = 0;
nHeavy = 0;
max_heavy_dist = 0;
for a in pose.get_atoms():
dist = np.linalg.norm(
a.get_coord().astype('float64'));
rg_sum += dist;
if(a.name =='CA'):
max_ca_dist = max(max_ca_dist, dist);
if(a.element != 'H'):
max_heavy_dist = max(max_heavy_dist, dist);
nHeavy = nHeavy + 1;
natoms = natoms + 1;
average_all = rg_sum / natoms;
return {
'average_all': average_all,
'max_ca_dist': max_ca_dist,
'max_heavy_dist': max_heavy_dist
}
def move_to_origin(self, pdb):
"""Centres a Bio.PDB.Structure.Structure to the global origin."""
com = self.get_centre_of_mass(pdb)
# No rotation - just move to centre
pdb.transform([[1,0,0],[0,1,0],[0,0,1]], -com)
# Tag the pdb
pdb.at_origin = True
def align(
self,
**kwargs
):
"""Moves the moving Bio.PDB.Structure.Structure to the fixed
Bio.PDB.Structure.Structure.
"""
moving = kwargs.pop('moving')
fixed = kwargs.pop('fixed')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
rot, tran = self.get_rot_trans(
moving=moving,
fixed=fixed,
moving_resi_offset=moving_resi_offset,
fixed_resi_offset=fixed_resi_offset,
match_count=match_count
)
# BioPython's own transform() deals with the inversed rotation
# correctly.
moving.transform(rot, tran)
def get_rot_trans(
self,
**kwargs
):
"""Computes the rotation and transformation matrices using BioPython's
superimposer.
Args:
- moving - the Bio.PDB.Structure.Structure that is to move towards the
other (fixed).
- fixed - the Bio.PDB.Structure.Structure that the other (moving) is to
align to.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
----IMPORT NOTE----
The rotation from BioPython is the second dot operand instead of the
conventional first dot operand.
This means instead of the standard R*v + T, the actual transform is done
with v'*R + T.
Hence, the resultant rotation matrix might need transposing if not
passed back into BioPython.
----IMPORT NOTE----
Returns:
- (rot, tran) - a tuple containing the rotation and transformation
matrices.
"""
moving = kwargs.pop('moving')
moving_chain_id = kwargs.pop('moving_chain_id', 'A')
fixed = kwargs.pop('fixed')
fixed_chain_id = kwargs.pop('fixed_chain_id', 'A')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
moving_chain = get_chain(moving, chain_id=moving_chain_id)
moving_residues = moving_chain.child_list \
[moving_resi_offset:(moving_resi_offset+match_count)]
ma = [r['CA'] for r in moving_residues]
fixed_chain = get_chain(fixed, chain_id=fixed_chain_id)
fixed_residues = fixed_chain.child_list \
[fixed_resi_offset:(fixed_resi_offset+match_count)]
fa = [r['CA'] for r in fixed_residues]
self.si.set_atoms(fa, ma)
return self.si.rotran
def run(self):
"""Calls the processing functions for singles, doubles, and hubs in that
order. Dumps alignment data into json database.
"""
# Single modules
single_files = glob.glob(self.relaxed_pdbs_dir + '/singles/*.pdb')
n_singles = len(single_files)
for i in range(0, n_singles):
print('Centering single [{}/{}] {}' \
.format(i+1, n_singles, single_files[i]))
self.process_single(single_files[i])
# Double modules
double_files = glob.glob(self.relaxed_pdbs_dir + '/doubles/*.pdb')
nDoubles = len(double_files)
for i in range(0, nDoubles):
print('Aligning double [{}/{}] {}' \
.format(i+1, nDoubles, double_files[i]))
self.process_double(double_files[i])
# Hub modules
hub_files = glob.glob(self.relaxed_pdbs_dir + '/hubs/*.pdb')
nHubs = len(hub_files)
for i in range(0, nHubs):
print('Aligning hub [{}/{}] {}' \
.format(i+1, nHubs, hub_files[i]))
self.process_hub(hub_files[i])
self.n_to_c_tx += self.hub_tx
print('Total: {} singles, {} doubles, {} hubs'.format(n_singles, nDoubles, nHubs))
self.dump_xdb()
if __name__ =='__main__':
safe_exec(main)
Added n_residues field per chain.
#!/usr/bin/env python3
import glob
import numpy as np
import codecs
import json
import argparse
from collections import defaultdict
from collections import OrderedDict
import Bio.PDB
from utilities import *
from pdb_utilities import *
nested_dict = lambda: defaultdict(nested_dict)
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Generates the xdb database from preprocessed single and double modules.')
parser.add_argument('--relaxed_pdbs_dir', default='./resources/pdb_relaxed/')
parser.add_argument('--metadata_dir', default='./resources/metadata/')
parser.add_argument('--output', default='./resources/xdb.json')
parser.add_argument('--aligned_pdb_dir', default='./resources/pdb_aligned/')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
XDBGenerator(
args.relaxed_pdbs_dir,
args.metadata_dir,
args.aligned_pdb_dir,
args.output
).run()
class XDBGenerator:
def __init__(
self,
relaxed_pdbs_dir,
metadata_dir,
aligned_pdb_dir,
out_file
):
self.relaxed_pdbs_dir = relaxed_pdbs_dir
module_types = ['doubles', 'singles', 'hubs']
make_dir(aligned_pdb_dir)
for mt in module_types:
make_dir(aligned_pdb_dir + '/{}/'.format(mt))
self.hub_info = read_json(metadata_dir + '/hub_info.json')
self.aligned_pdb_dir = aligned_pdb_dir
self.out_file = out_file
self.si = Bio.PDB.Superimposer()
self.modules = nested_dict()
self.n_to_c_tx = []
self.hub_tx = []
# Cache in memory because disk I/O is really heavy here
self.single_pdbs = defaultdict(dict)
self.double_pdbs = defaultdict(dict)
def find_tip(self, term, struct, chain_id):
term = term.lower()
assert(term in {'c', 'n'})
chain = get_chain(struct, chain_id=chain_id)
residues = chain.child_list
n = len(residues)
divider = 6 # The smaller the divider, the closer to terminus.
assert(n > 0)
if term == 'n':
start_idx, end_idx = 0, n//divider
else:
start_idx, end_idx = (divider-1)*n//divider, n
sum_coord = np.asarray([0., 0., 0.])
for r in residues[start_idx:end_idx]:
sum_coord += r['CA'].get_coord().astype('float64')
tip_vector = sum_coord/(end_idx - start_idx - 1)
return tip_vector.tolist()
def create_tx(self, mod_a, a_chain, mod_b, b_chain, rot, tran):
tx_entry = \
OrderedDict([
('mod_a', mod_a),
('mod_a_chain', a_chain),
('mod_b', mod_b),
('mod_b_chain', b_chain),
('rot', rot.tolist()),
('tran', np.asarray(tran).tolist())
])
return tx_entry
def process_hub(self, file_name):
"""Aligns a hub module to its A component (chain A), then computes the
transform for aligning itself to its other components.
"""
# Load structures
hub = read_pdb(file_name)
# Centre the hub
self.move_to_origin(hub)
hub_fusion_factor = 4
hub_name = os.path.basename(file_name).replace('.pdb', '')
hub_meta = self.hub_info.get(hub_name, None)
assert(hub_meta != None)
if hub_meta is None:
raise ValueError('Could not get hub metadata for hub {}\n'.format(hub_name))
# Create module entry first
comp_data = hub_meta['component_data']
del hub_meta['component_data']
hub_meta['chains'] = {
c.id: {
'single_name': comp_data[c.id]['single_name'],
'n': nested_dict(),
'n_tip': nested_dict(),
'c': nested_dict(),
'c_tip': nested_dict(),
'n_residues': len(c.child_list)
} for c in hub.get_chains()
}
hub_meta['radii'] = self.get_radii(hub)
self.modules['hubs'][hub_name] = hub_meta
# The current process does not allow hub to hub connections. Maybe this
# need to be changed?
for hub_chain_id in comp_data:
chain_data = comp_data[hub_chain_id]
comp_name = chain_data['single_name']
if chain_data['c_free']:
b_name_gen = (tx['mod_b'] for tx in self.n_to_c_tx if tx['mod_a'] == comp_name)
for single_b_name in b_name_gen:
# Compute the transformation required to move a single
# module B from its aligned position to the current hub's
# "finger tip".
#
# Here we do not use the second quadrant method, because during
# stitching none of the hubs' residues get changed. The stitching
# will take place at the end of the hub's component's terminal.
rc_hub_a = get_chain_residue_count(hub, hub_chain_id)
rc_dbl_a = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_dbl_a) / hub_fusion_factor)
double = self.double_pdbs[comp_name][single_b_name]
# Compute transformation matrix.
# Find transform between component single and single b.
hub_single_chain_id = \
list(self.single_pdbs[comp_name].get_chains())[0].id
single_b_chain_id = \
list(self.single_pdbs[single_b_name].get_chains())[0].id
dbl_tx_id = self.modules['singles'][comp_name]['chains'] \
[hub_single_chain_id]['c'] \
[single_b_name][single_b_chain_id]
assert(dbl_tx_id is not None)
dbl_n_to_c = self.n_to_c_tx[dbl_tx_id]
dbl_tx = np.vstack(
(np.hstack((dbl_n_to_c['rot'], np.transpose([dbl_n_to_c['tran']]))),
[0,0,0,1])
)
# Find transform from hub to single A.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=rc_hub_a - fusion_count,
moving_resi_offset=rc_dbl_a - fusion_count,
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
comp_to_single_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub's component frame.
# 2. Shift to double B frame.
dbl_raised_tx = np.matmul(comp_to_single_tx, dbl_tx);
# Decompose transform.
rot = dbl_raised_tx[:3, :3]
tran = dbl_raised_tx[:3, 3]
tx = self.create_tx(
hub_name,
hub_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c'] \
[single_b_name][single_b_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c_tip'] = \
self.find_tip('c', hub, hub_chain_id)
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'] \
[hub_name][hub_chain_id] = tx_id
self.hub_tx.append(tx)
if chain_data['n_free']:
a_name_gen = (tx['mod_a'] for tx in self.n_to_c_tx if tx['mod_b'] == comp_name)
for single_a_name in a_name_gen:
# Same as c_free except comp acts as single b
rc_a = get_pdb_residue_count(self.single_pdbs[single_a_name])
rc_b = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_b) / hub_fusion_factor)
double = self.double_pdbs[single_a_name][comp_name]
# Compute transformation matrix.
# Find transform from double component B to hub component.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=0, # start matching from the n-term of hub component, which is index 0
moving_resi_offset=rc_a, # start matching at the beginning of single b in the double
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
dbl_to_hub_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub frame - do nothing; just dbl_to_hub_tx.
# Decompose transform.
rot = dbl_to_hub_tx[:3, :3]
tran = dbl_to_hub_tx[:3, 3]
single_a_chain_id = \
list(self.single_pdbs[single_a_name].get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
hub_name,
hub_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'] \
[hub_name][hub_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n'] \
[single_a_name][single_a_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n_tip'] = \
self.find_tip('n', hub, hub_chain_id)
self.hub_tx.append(tx)
save_pdb(
struct=hub,
path=self.aligned_pdb_dir + '/hubs/' + hub_name + '.pdb'
)
def process_double(self, file_name):
"""Aligns a double module to its A component and then computes the transform
for aligning to its B component. Saves aligned structure to output folder.
"""
# Step 1: Load structures
double = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(double.get_chains())) == 1)
double_name = file_name.split('/')[-1].replace('.pdb', '')
single_a_name, single_b_name = double_name.split('-')
single_a = self.single_pdbs[single_a_name]
single_b = self.single_pdbs[single_b_name]
rc_a = get_pdb_residue_count(single_a)
rc_b = get_pdb_residue_count(single_b)
rc_double = get_pdb_residue_count(double)
rc_a_half = int_floor(float(rc_a)/2)
rc_b_half = int_ceil(float(rc_b)/2)
# -- About the "fusion count" variable --
#
# The fusion_count is the number of residues we use to align double to
# single_a. The higher this number is, the more global the alignment will
# be, which causes loop jumps (disconnections) in the chain. This is
# because in Stitch we're stitching atoms from different doubles into the
# same chain. Different doubles have their single components stuck
# together using an interface, the participation of which causes atom
# positions in a double's single component to differ from that of the
# original single module.
#
# When we fuse different doubles together, each double is cut at 25%
# and 75% of their sequence in order to be as far way to interfaces
# (0%, 50%, 100%) as possible.
#
# The fusion alignment here is about aligning subsequent doubles using a
# few residues before the 25% mark. The lower the fusion_count is, the
# fewer residues we use to align, the more local the alignment will be.
# However, if this number is too low the alignment could cause subsequent
# modules to overlap (shortsighted).
#
# Through some experients I found that using 1/8 of the length of the
# alignment target (single a or b) is a good balance between not causing
# discontinuities and also not creating atom overlaps.
dbl_fusion_factor = 8
fusion_count_a = int_ceil(float(rc_a) / dbl_fusion_factor)
fusion_count_b = int_ceil(float(rc_b) / dbl_fusion_factor)
# Step 2: Move double to align with the first single.
#
# This aligns double by superimposing double[0] with single_a. Only align
# double to the SECOND quardrant of single_a's atoms.
self.align(
moving=double,
fixed=single_a,
moving_resi_offset=rc_a_half - fusion_count_a,
fixed_resi_offset=rc_a_half - fusion_count_a,
match_count=fusion_count_a
)
# Step 3: Get COM of the single_b as seen in the double.
#
# Only align double to the SECOND quardrant of single_b.
com_b = self.get_centre_of_mass(
single_b,
mother=double,
child_resi_offset=rc_b_half - fusion_count_b,
mother_resi_offset=rc_a + rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Step 4: Get transformation of double to the second single.
#
# Double is already aligned to first single so there is no need for
# the first transformation.
#
# This can be varifyed by checking that self.get_rot_trans(double,
# single_a) has identity rotation and zero translation.
#
# Only align the second quardrant of single_b in order to be
# consistent with the Stitch script, where doubles are fused together
# by chopping the first and last quardrant of a double. This means the
# second half of single_b is chopped off during fusion, while the first
# quardrant of single_b participates in interfacing. Therefore we align
# by uperimposing just the second quardrant.
rot, tran = self.get_rot_trans(
moving=double,
fixed=single_b,
moving_resi_offset=rc_a + rc_b_half - fusion_count_b,
fixed_resi_offset=rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
# Inverse result transform.
tmp_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
inv_tx = np.linalg.inv(tmp_tx);
# Decompose transform.
rot = inv_tx[:3, :3]
tran = inv_tx[:3, 3]
# Step 5: Save the aligned molecules.
#
# Here the PDB format adds some slight floating point error. PDB is
# already phased out so and we should really consider using mmCIF for
# all modules.
save_pdb(
struct=double,
path=self.aligned_pdb_dir + '/doubles/' + double_name + '.pdb'
)
single_a_chain_id = list(single_a.get_chains())[0].id
single_b_chain_id = list(single_b.get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'][single_b_name][single_b_chain_id] = tx_id
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'][single_a_name][single_a_chain_id] = tx_id
self.n_to_c_tx.append(tx)
# Cache structure in memory
self.double_pdbs[single_a_name][single_b_name] = double
def process_single(self, file_name):
"""Centres a single module and saves to output folder."""
single_name = file_name.split('/')[-1].replace('.pdb', '')
single = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(single.get_chains())) == 1)
# Check that there is only one chain
chain_list = list(single.get_chains())
if len(chain_list) != 1:
raise ValueError('Single PDB contains {} chains!\n'.format(len(chain_list)))
self.move_to_origin(single)
save_pdb(
struct=single,
path=self.aligned_pdb_dir + '/singles/' + single_name + '.pdb'
)
self.modules['singles'][single_name] = {
'chains': {
chain_list[0].id: {
'n': nested_dict(),
'c': nested_dict(),
'n_residues': len(chain_list[0].child_list)
}
},
'radii': self.get_radii(single)
}
# Cache structure in memory
self.single_pdbs[single_name] = single
def dump_xdb(self):
"""Writes alignment data to a json file."""
to_dump = \
OrderedDict([
('modules', self.modules),
('n_to_c_tx', self.n_to_c_tx)
])
json.dump(to_dump,
open(self.out_file, 'w'),
separators=(',', ':'),
ensure_ascii=False,
indent=4)
def get_centre_of_mass(
self,
child,
mother=None,
child_resi_offset=0,
mother_resi_offset=0,
match_count=-1
):
"""Computes centre-of-mass coordinate of a Bio.PDB.Structure.Structure.
Args:
- child - Bio.PDB.Structure.Structure for which the centre-of-mass should
be calculated.
- mother - Bio.PDB.Structure.Structure onto which child is to be first
aligned.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
Returns:
- com - 3x1 numpy array of the centre-of-mass.
"""
CAs = [r['CA'].get_coord().astype('float64') for r in child.get_residues()]
com = np.mean(CAs, axis=0)
if mother is not None:
# This is for finding COM of a single inside a double
_, tran = self.get_rot_trans(
moving=child,
fixed=mother,
moving_resi_offset=child_resi_offset,
fixed_resi_offset=mother_resi_offset,
match_count=match_count
)
com += tran
return com
def get_radii(self, pose):
"""Computes three different measures of the radius.
Args:
- pose - Bio.PDB.Structure.Structure
Returns:
- _ - an dict containing: average of all atoms distances, max
carbon alpha distance, and max heavy atom distance, each calculated
against the centre-of-mass.
"""
if not pose.at_origin:
raise ValueError('get_radii() must be called with centered modules.')
natoms = 0;
rg_sum = 0;
max_ca_dist = 0;
nHeavy = 0;
max_heavy_dist = 0;
for a in pose.get_atoms():
dist = np.linalg.norm(
a.get_coord().astype('float64'));
rg_sum += dist;
if(a.name =='CA'):
max_ca_dist = max(max_ca_dist, dist);
if(a.element != 'H'):
max_heavy_dist = max(max_heavy_dist, dist);
nHeavy = nHeavy + 1;
natoms = natoms + 1;
average_all = rg_sum / natoms;
return {
'average_all': average_all,
'max_ca_dist': max_ca_dist,
'max_heavy_dist': max_heavy_dist
}
def move_to_origin(self, pdb):
"""Centres a Bio.PDB.Structure.Structure to the global origin."""
com = self.get_centre_of_mass(pdb)
# No rotation - just move to centre
pdb.transform([[1,0,0],[0,1,0],[0,0,1]], -com)
# Tag the pdb
pdb.at_origin = True
def align(
self,
**kwargs
):
"""Moves the moving Bio.PDB.Structure.Structure to the fixed
Bio.PDB.Structure.Structure.
"""
moving = kwargs.pop('moving')
fixed = kwargs.pop('fixed')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
rot, tran = self.get_rot_trans(
moving=moving,
fixed=fixed,
moving_resi_offset=moving_resi_offset,
fixed_resi_offset=fixed_resi_offset,
match_count=match_count
)
# BioPython's own transform() deals with the inversed rotation
# correctly.
moving.transform(rot, tran)
def get_rot_trans(
self,
**kwargs
):
"""Computes the rotation and transformation matrices using BioPython's
superimposer.
Args:
- moving - the Bio.PDB.Structure.Structure that is to move towards the
other (fixed).
- fixed - the Bio.PDB.Structure.Structure that the other (moving) is to
align to.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
----IMPORT NOTE----
The rotation from BioPython is the second dot operand instead of the
conventional first dot operand.
This means instead of the standard R*v + T, the actual transform is done
with v'*R + T.
Hence, the resultant rotation matrix might need transposing if not
passed back into BioPython.
----IMPORT NOTE----
Returns:
- (rot, tran) - a tuple containing the rotation and transformation
matrices.
"""
moving = kwargs.pop('moving')
moving_chain_id = kwargs.pop('moving_chain_id', 'A')
fixed = kwargs.pop('fixed')
fixed_chain_id = kwargs.pop('fixed_chain_id', 'A')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
moving_chain = get_chain(moving, chain_id=moving_chain_id)
moving_residues = moving_chain.child_list \
[moving_resi_offset:(moving_resi_offset+match_count)]
ma = [r['CA'] for r in moving_residues]
fixed_chain = get_chain(fixed, chain_id=fixed_chain_id)
fixed_residues = fixed_chain.child_list \
[fixed_resi_offset:(fixed_resi_offset+match_count)]
fa = [r['CA'] for r in fixed_residues]
self.si.set_atoms(fa, ma)
return self.si.rotran
def run(self):
"""Calls the processing functions for singles, doubles, and hubs in that
order. Dumps alignment data into json database.
"""
# Single modules
single_files = glob.glob(self.relaxed_pdbs_dir + '/singles/*.pdb')
n_singles = len(single_files)
for i in range(0, n_singles):
print('Centering single [{}/{}] {}' \
.format(i+1, n_singles, single_files[i]))
self.process_single(single_files[i])
# Double modules
double_files = glob.glob(self.relaxed_pdbs_dir + '/doubles/*.pdb')
nDoubles = len(double_files)
for i in range(0, nDoubles):
print('Aligning double [{}/{}] {}' \
.format(i+1, nDoubles, double_files[i]))
self.process_double(double_files[i])
# Hub modules
hub_files = glob.glob(self.relaxed_pdbs_dir + '/hubs/*.pdb')
nHubs = len(hub_files)
for i in range(0, nHubs):
print('Aligning hub [{}/{}] {}' \
.format(i+1, nHubs, hub_files[i]))
self.process_hub(hub_files[i])
self.n_to_c_tx += self.hub_tx
print('Total: {} singles, {} doubles, {} hubs'.format(n_singles, nDoubles, nHubs))
self.dump_xdb()
if __name__ =='__main__':
safe_exec(main) |
# coding=utf-8
"""
Copyright 2014 Xabier Crespo Álvarez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:Author:
Xabier Crespo Álvarez (xabicrespog@gmail.com)
"""
__author__ = 'xabicrespog@gmail.com'
from twisted.protocols import amp
from errors import *
"""
Commandes implemented by the N-server which will be invoked by a
G- or M- clients.
"""
class StartRemote(amp.Command):
arguments = [('iSlotId', amp.Integer())]
response = [('iResult', amp.Integer())]
errors = {
SlotErrorNotification: 'SLOT_ERROR_NOTIFICATION'}
"""
Invoked when a client wants to connect to an N-server. This shall be called
right after invoking login method.
:param iSlotId:
ID number of the slot which should have been previously reserved through
the web interface.
:type iSlotId:
L{int}
:returns iResult:
Raises an error if the slot is not available yet or if it isn't assigned to
the calling client. Otherwise, it may return one of the following codes:
(0) REMOTE_READY: the remote client is already connected to the server
(-1) CLIENTS_COINCIDE: the remote client is the same as the calling client
(-2) REMOTE_NOT_CONNECTED: indicates if the the remote client is not connected
In case that any of the previous cases are detected, the slotId is returned.
:rtype:
int or L{SlotNotAvailable}
"""
# Remote client ready
REMOTE_READY = 0
# Both MCC and GSS belong to the same client
CLIENTS_COINCIDE = -1
# Remote user not connected yet
REMOTE_NOT_CONNECTED = -2
class EndRemote(amp.Command):
arguments = []
requiresAnswer = False
"""
Invoked by a client whenever this one wants to finalize the remote operation.
"""
class SendMsg(amp.Command):
arguments = [('sMsg', amp.String()),
('iTimestamp', amp.Integer())]
response = [('bResult', amp.Boolean())]
errors = {
SlotErrorNotification: 'SLOT_ERROR_NOTIFICATION'}
"""
Invoked when a client wants to send a message to a remote entity. To use it, the
command StartRemote shall be invoked first.
:param sMsg:
String containing the message
:type sMsg:
L{String}
:param iDopplerShift:
Integer indicating the Doppler shift in kHz
:type iDopplerShift:
L{int}
:param iTimestamp:
Integer indicating the UTC timestamp at reception.
If the command is called before StartRemote raises SlotNotAvailable.
:type iTimestamp:
L{Integer} or L{SlotNotAvailable}
:returns bResult:
True if the command is successfully run
:rtype:
Boolean
"""
"""
Commandes implemented by G- or M- clients which will be invoked
by a N-server.
"""
class NotifyEvent(amp.Command):
arguments = [('iEvent', amp.Integer()),
('sDetails', amp.String(optional=True))]
requiresAnswer = False
"""
Used to inform a client about an event in the network.
:param iEvent:
Code indicating the event.There are three cases:
(-1) REMOTE_DISCONNECTED: notifies when the remote client has been disconnected
and it is not receiving the messages.
(-2) SLOT_END: notifies both clients about the slot end
(-3) END_REMOTE: notifies a client that the remote has finished the connection
(-4) REMOTE_CONNECTED: notifies a client when the remote has just connected
:type iEvent:
int
:param sDetails:
Details of the event. If it is REMOTE_CONNECTED this parameter is equal to
the username of the remote client. Otherwise the parameter is None
:type sDetails:
L{String} or None
"""
# Remote user not connected
REMOTE_DISCONNECTED = -1
# Both MCC and GSS belong to the same client
SLOT_END = -2
# Remote client finished connection
END_REMOTE = -3
# Remote client finished connection
REMOTE_CONNECTED = -4
class NotifyMsg(amp.Command):
arguments = [('sMsg', amp.String())]
requiresAnswer = False
"""
Used to send a message to a remote client.
:param sMsg:
Remote client identification number
:type sMsg:
L{String}
"""
Remove unnecessary files._commands.py
|
from __future__ import unicode_literals
__version__ = "0.1b2"
Changed version to 0.2rc2
from __future__ import unicode_literals
__version__ = "0.2rc2"
|
from app import app
import os
from flask import render_template, \
request, \
send_from_directory
from helpers import is_authenticated, \
parse_address
from database import db, \
Address, \
Domain
from mailgun import mailgun_explicit_whitelist
import json
from datetime import datetime
@app.route('/.well-known/acme-challenge/<filename>')
def letsencrypt(filename):
return send_from_directory(os.path.join(app.root_path, '../.well-known/acme-challenge/'), filename)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.png', mimetype='image/png')
@app.route('/', methods=['GET'])
def hello():
return render_template('hello.html')
@app.route('/whitelist/', methods=['POST'])
def whitelist_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
if not mailgun_explicit_whitelist(request.form['email'], request.form['destination']):
return app.response_class(response='{"error": "Could not whitelist email"}', mimetype='application/json', status=500)
return app.response_class(response='{"status": "ok"}', mimetype='application/json')
@app.route('/domain/', methods=['GET'])
def domain_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
domains = db.session.query(Domain).\
all()
response = []
for domain in domains:
response.append(domain.toObject())
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/domain/<int:domain_id>', methods=['GET', 'DELETE'])
def domain_item(domain_id):
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
row = db.session.query(Domain).\
filter(Domain.id == domain_id).\
first()
if not row:
return app.response_class(response='{"error": "Not found"}', mimetype='application/json', status=404)
response = row.toObject()
if request.method == 'DELETE':
db.session.delete(row)
db.session.commit()
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/address/', methods=['GET'])
def address_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
addresses = db.session.query(Address).\
all()
response = []
for address in addresses:
response.append(address.toObject())
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/address/<int:address_id>', methods=['GET', 'DELETE'])
def address_item(address_id):
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}' % request.headers['Authorization'], mimetype='application/json', status=403)
row = db.session.query(Address).\
filter(Address.id == address_id).\
first()
if not row:
return app.response_class(response='{"error": "Not found"}', mimetype='application/json', status=404)
response = row.toObject()
if request.method == 'DELETE':
db.session.delete(row)
db.session.commit()
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/email/', methods=['POST'])
def message_collection():
# Extract the local_part@domain_name
local_part, domain_name = parse_address(request.form.get('recipient'))
# Get the domain name
try:
domain = db.session.query(Domain).\
filter(Domain.name == domain_name).\
first()
except ValueError, e:
return app.response_class(response='{"error": "%s"}' % e, mimetype='application/json', status=400)
# Create the domain name if it doesn't exist
if domain is None:
domain = Domain(name=domain_name)
db.session.add(domain)
db.session.commit()
# Get the address
try:
address = db.session.query(Address).\
filter(Address.local == local_part).\
filter(Address.domain_id == domain.id).\
first()
except ValueError, e:
return app.response_class(response='{"error": "%s"}' % e, mimetype='application/json', status=400)
# Add the address if it doesn't exist
if address is None:
address = Address(local=local_part, domain_id=domain.id)
db.session.add(address)
db.session.commit()
# Set the last received date
address.date_last_received = datetime.utcnow()
# Add one to the total number of received emails
address.total_received += 1
# Add the current spam score to the total
address.total_spam_score += float(request.form.get('X-Mailgun-Sscore'))
db.session.add(address)
db.session.commit()
return app.response_class(response='{"status": "ok"}', mimetype='application/json')
@app.route('/list/<api_key>', methods=['GET'])
def spam_list(api_key):
if app.config['API_KEY'] != api_key:
abort(403)
addresses = db.session.query(Address).\
order_by(db.desc(Address.date_last_received)).\
all()
return render_template('list.html', addresses=addresses, api_key=api_key)
Added missing abort import, removed LetsEncrypt
from app import app
import os
from flask import abort, \
render_template, \
request, \
send_from_directory
from helpers import is_authenticated, \
parse_address
from database import db, \
Address, \
Domain
from mailgun import mailgun_explicit_whitelist
import json
from datetime import datetime
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.png', mimetype='image/png')
@app.route('/', methods=['GET'])
def hello():
return render_template('hello.html')
@app.route('/whitelist/', methods=['POST'])
def whitelist_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
if not mailgun_explicit_whitelist(request.form['email'], request.form['destination']):
return app.response_class(response='{"error": "Could not whitelist email"}', mimetype='application/json', status=500)
return app.response_class(response='{"status": "ok"}', mimetype='application/json')
@app.route('/domain/', methods=['GET'])
def domain_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
domains = db.session.query(Domain).\
all()
response = []
for domain in domains:
response.append(domain.toObject())
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/domain/<int:domain_id>', methods=['GET', 'DELETE'])
def domain_item(domain_id):
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
row = db.session.query(Domain).\
filter(Domain.id == domain_id).\
first()
if not row:
return app.response_class(response='{"error": "Not found"}', mimetype='application/json', status=404)
response = row.toObject()
if request.method == 'DELETE':
db.session.delete(row)
db.session.commit()
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/address/', methods=['GET'])
def address_collection():
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
addresses = db.session.query(Address).\
all()
response = []
for address in addresses:
response.append(address.toObject())
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/address/<int:address_id>', methods=['GET', 'DELETE'])
def address_item(address_id):
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}' % request.headers['Authorization'], mimetype='application/json', status=403)
row = db.session.query(Address).\
filter(Address.id == address_id).\
first()
if not row:
return app.response_class(response='{"error": "Not found"}', mimetype='application/json', status=404)
response = row.toObject()
if request.method == 'DELETE':
db.session.delete(row)
db.session.commit()
return app.response_class(response=json.dumps(response), mimetype='application/json')
@app.route('/email/', methods=['POST'])
def message_collection():
# Extract the local_part@domain_name
local_part, domain_name = parse_address(request.form.get('recipient'))
# Get the domain name
try:
domain = db.session.query(Domain).\
filter(Domain.name == domain_name).\
first()
except ValueError, e:
return app.response_class(response='{"error": "%s"}' % e, mimetype='application/json', status=400)
# Create the domain name if it doesn't exist
if domain is None:
domain = Domain(name=domain_name)
db.session.add(domain)
db.session.commit()
# Get the address
try:
address = db.session.query(Address).\
filter(Address.local == local_part).\
filter(Address.domain_id == domain.id).\
first()
except ValueError, e:
return app.response_class(response='{"error": "%s"}' % e, mimetype='application/json', status=400)
# Add the address if it doesn't exist
if address is None:
address = Address(local=local_part, domain_id=domain.id)
db.session.add(address)
db.session.commit()
# Set the last received date
address.date_last_received = datetime.utcnow()
# Add one to the total number of received emails
address.total_received += 1
# Add the current spam score to the total
address.total_spam_score += float(request.form.get('X-Mailgun-Sscore'))
db.session.add(address)
db.session.commit()
return app.response_class(response='{"status": "ok"}', mimetype='application/json')
@app.route('/list/<api_key>', methods=['GET'])
def spam_list(api_key):
if app.config['API_KEY'] != api_key:
abort(403)
addresses = db.session.query(Address).\
order_by(db.desc(Address.date_last_received)).\
all()
return render_template('list.html', addresses=addresses, api_key=api_key)
|
Rename option
|
# Copyright 2021 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=function-redefined (removes singledispatchmethod pylint errors)
import io
import logging
import sys
from contextlib import redirect_stdout
from typing import Any, Dict, Optional, Sequence, Union
import tempfile
from kubric.safeimport.bpy import bpy
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from singledispatchmethod import singledispatchmethod
import kubric as kb
from kubric import core
from kubric.core.assets import UndefinedAsset
from kubric.redirect_io import RedirectStream
from kubric.renderer import blender_utils
from kubric import file_io
from kubric.file_io import PathLike
logger = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
class Blender(core.View):
""" An implementation of a rendering backend in Blender/Cycles."""
def __init__(self,
scene: core.Scene,
scratch_dir=None,
adaptive_sampling=True,
use_denoising=True,
samples_per_pixel=128,
background_transparency=False,
verbose: bool = False,
custom_scene: Optional[str] = None,
):
"""
Args:
scene: the kubric scene this class will observe
scratch_dir: Blender always writes the rendered images to disk. The scratch_dir is the
(temporary) directory used for that. The results are read into memory by kubric,
immediately after the rendering is done, so the contents of this directory can
be discarded afterwards.
adaptive_sampling: Adjust the number of rays cast based on the complexity of the patch
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html)
use_denoising: Use the blender denoiser to improve the image quality.
(see https://docs.blender.org/manual/en/latest/render/layers/denoising.html#denoising)
samples_per_pixel: Number of rays cast per pixel
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html)
background_transparency: Render the background transparent.
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/film.html)
verbose: when False, blender stdout is redirected to stdnull
custom_scene: By default (None) Blender is initialized with an empty scene.
If this argument is set to the path for a `.blend` file, then that scene is loaded instead.
Note that this scene only affects the rendering output. It is not accessible from Kubric and
not taken into account by the simulator.
"""
self.scratch_dir = tempfile.mkdtemp() if scratch_dir is None else scratch_dir
self.ambient_node = None
self.ambient_hdri_node = None
self.illum_mapping_node = None
self.bg_node = None
self.bg_hdri_node = None
self.bg_mapping_node = None
self.verbose = verbose
# blender has a default scene on load, so we clear everything first
self.clear_and_reset_blender_scene(self.verbose, custom_scene=custom_scene)
self.blender_scene = bpy.context.scene
# the ray-tracing engine is set here because it affects the availability of some features
bpy.context.scene.render.engine = "CYCLES"
blender_utils.activate_render_passes(normal=True, optical_flow=True, segmentation=True, uv=True)
self._setup_scene_shading()
self.adaptive_sampling = adaptive_sampling # speeds up rendering
self.use_denoising = use_denoising # improves the output quality
self.samples_per_pixel = samples_per_pixel
self.background_transparency = background_transparency
self.exr_output_node = blender_utils.set_up_exr_output_node()
super().__init__(scene, scene_observers={
"frame_start": [AttributeSetter(self.blender_scene, "frame_start")],
"frame_end": [AttributeSetter(self.blender_scene, "frame_end")],
"frame_rate": [AttributeSetter(self.blender_scene.render, "fps")],
"resolution": [AttributeSetter(self.blender_scene.render, "resolution_x",
converter=lambda x: x[0]),
AttributeSetter(self.blender_scene.render, "resolution_y",
converter=lambda x: x[1])],
"camera": [AttributeSetter(self.blender_scene, "camera",
converter=self._convert_to_blender_object)],
"ambient_illumination": [lambda change: self._set_ambient_light_color(change.new)],
"background": [lambda change: self._set_background_color(change.new)],
})
@property
def scratch_dir(self) -> Union[PathLike, None]:
return self._scratch_dir
@scratch_dir.setter
def scratch_dir(self, value: Union[PathLike, None]):
if value is None:
self._scratch_dir = None
else:
self._scratch_dir = kb.as_path(value)
self._scratch_dir.mkdir(parents=True, exist_ok=True)
@property
def adaptive_sampling(self) -> bool:
return self.blender_scene.cycles.use_adaptive_sampling
@adaptive_sampling.setter
def adaptive_sampling(self, value: bool):
self.blender_scene.cycles.use_adaptive_sampling = value
@property
def use_denoising(self) -> bool:
return self.blender_scene.cycles.use_denoising
@use_denoising.setter
def use_denoising(self, value: bool):
self.blender_scene.cycles.use_denoising = value
self.blender_scene.cycles.denoiser = "NLM"
@property
def samples_per_pixel(self) -> int:
return self.blender_scene.cycles.samples
@samples_per_pixel.setter
def samples_per_pixel(self, nr: int):
self.blender_scene.cycles.samples = nr
@property
def background_transparency(self) -> bool:
return self.blender_scene.render.film_transparent
@background_transparency.setter
def background_transparency(self, value: bool):
self.blender_scene.render.film_transparent = value
def set_exr_output_path(self, path_prefix: Optional[PathLike]):
"""Set the target path prefix for EXR output.
The final filename for a frame will be "{path_prefix}{frame_nr:04d}.exr".
If path_prefix is None then EXR output is disabled.
"""
if path_prefix is None:
self.exr_output_node.mute = True
else:
self.exr_output_node.mute = False
self.exr_output_node.base_path = str(path_prefix)
def save_state(self, path: PathLike, pack_textures: bool = True):
"""Saves the '.blend' blender file to disk.
If a file with the same path exists, it is overwritten.
"""
# first write to a temporary file, and later copy
# (because blender cannot write to gcs buckets etc.)
tmp_path = self.scratch_dir / "scene.blend"
# ensure file does NOT exist (as otherwise "scene.blend1" is created instead of "scene.blend")
kb.as_path(tmp_path).unlink(missing_ok=True)
# --- ensure directory exists
parent = kb.as_path(tmp_path).parent
if not parent.exists():
parent.mkdir(parents=True)
# --- save the file; see https://github.com/google-research/kubric/issues/96
with RedirectStream(stream=sys.stdout, disabled=self.verbose):
with io.StringIO() as fstdout: # < scratch stdout buffer
with redirect_stdout(fstdout): # < also suppresses python stdout
if pack_textures:
bpy.ops.file.pack_all()
bpy.ops.wm.save_mainfile(filepath=str(tmp_path))
if self.verbose:
print(fstdout.getvalue())
# copy to target path
path = kb.as_path(path)
path.parent.mkdir(parents=True, exist_ok=True) # ensure directory exists
logger.info("Saving '%s'", path)
tf.io.gfile.copy(tmp_path, path, overwrite=True)
def render(self,
frames: Optional[Sequence[int]] = None,
ignore_missing_textures: bool = False,
) -> Dict[str, np.ndarray]:
"""Renders all frames (or a subset) of the animation and returns images as a dict of arrays.
Args:
frames: list of frames to render (defaults to range(scene.frame_start, scene.frame_end+1)).
ignore_missing_textures: if False then raise a RuntimeError when missing textures are
detected. Otherwise, proceed to render (with purple color instead of missing texture).
Returns:
A dictionary with the following entries:
- "rgba": shape = (nr_frames, height, width, 4)
- "segmentation": shape = (nr_frames, height, width, 1) (int)
- "backward_flow": shape = (nr_frames, height, width, 2)
- "forward_flow": shape = (nr_frames, height, width, 2)
- "depth": shape = (nr_frames, height, width, 1)
- "uv": shape = (nr_frames, height, width, 3)
- "normal": shape = (nr_frames, height, width, 3)
"""
logger.info("Using scratch rendering folder: '%s'", self.scratch_dir)
missing_textures = sorted({img.filepath for img in bpy.data.images if not img.has_data})
if missing_textures and not ignore_missing_textures:
raise RuntimeError(f"Missing textures: {missing_textures}")
self.set_exr_output_path(self.scratch_dir / "exr" / "frame_")
# --- starts rendering
if frames is None:
frames = range(self.scene.frame_start, self.scene.frame_end + 1)
with RedirectStream(stream=sys.stdout, disabled=self.verbose):
for frame_nr in frames:
bpy.context.scene.frame_set(frame_nr)
# When writing still images Blender doesn't append the frame number to the png path.
# (but for exr it does, so we only adjust the png path)
bpy.context.scene.render.filepath = str(
self.scratch_dir / "images" / f"frame_{frame_nr:04d}.png")
bpy.ops.render.render(animation=False, write_still=True)
logger.info("Rendered frame '%s'", bpy.context.scene.render.filepath)
# --- post process the rendered frames
return self.postprocess(self.scratch_dir)
def render_still(self, frame: Optional[int] = None):
"""Render a single frame (first frame by default).
Args:
frame: Which frame to render (defaults to scene.frame_start).
Returns:
A dictionary with the following entries:
- "rgba": shape = (height, width, 4)
- "segmentation": shape = (height, width, 1) (int)
- "backward_flow": shape = (height, width, 2)
- "forward_flow": shape = (height, width, 2)
- "depth": shape = (height, width, 1)
- "uv": shape = (height, width, 3)
- "normal": shape = (height, width, 3)
"""
frame = self.scene.frame_start if frame is None else frame
result = self.render(frames=[frame])
return {k: v[0] for k, v in result.items()}
def postprocess(self, from_dir: PathLike):
from_dir = tfds.core.as_path(from_dir)
# --- collect all layers for all frames
data_stack = {}
list_of_exr_frames = sorted((from_dir / "exr").glob("*.exr"))
for exr_filename in list_of_exr_frames:
png_filename = from_dir / "images" / (exr_filename.stem + ".png")
layers = blender_utils.get_render_layers_from_exr(exr_filename)
data = {k: layers[k] for k in
["backward_flow", "forward_flow", "depth", "uv", "normal", "object_coordinates"]}
# Use the contrast-normalized PNG instead of the EXR for RGBA.
data["rgba"] = file_io.read_png(png_filename)
data["segmentation"] = layers["segmentation_indices"][:, :, :1]
for key in data:
if key in data_stack:
data_stack[key].append(data[key])
else:
data_stack[key] = [data[key]]
for key in data_stack:
data_stack[key] = np.stack(data_stack[key], axis=0)
# map the Blender cryptomatte hashes to asset indices
data_stack["segmentation"] = blender_utils.replace_cryptomatte_hashes_by_asset_index(
data_stack["segmentation"], self.scene.assets)
# convert z values (distance to camera plane) into depth (distance to camera center)
data_stack["depth"] = self.scene.camera.z_to_depth(data_stack["depth"])
return data_stack
@staticmethod
def clear_and_reset_blender_scene(verbose: bool = False, custom_scene: str = None):
""" Resets Blender to an entirely empty scene (or a custom one)."""
with RedirectStream(stream=sys.stdout, disabled=verbose):
bpy.ops.wm.read_factory_settings(use_empty=True)
if custom_scene is None:
bpy.context.scene.world = bpy.data.worlds.new("World")
else:
logger.info("Loading scene from '%s'", custom_scene)
bpy.ops.wm.open_mainfile(filepath=custom_scene)
@singledispatchmethod
def add_asset(self, asset: core.Asset) -> Any:
raise NotImplementedError(f"Cannot add {asset!r}")
def remove_asset(self, asset: core.Asset) -> None:
if self in asset.linked_objects:
blender_obj = asset.linked_objects[self]
try:
if isinstance(blender_obj, bpy.types.Object):
bpy.data.objects.remove(blender_obj, do_unlink=True)
elif isinstance(blender_obj, bpy.types.Material):
bpy.data.materials.remove(blender_obj, do_unlink=True)
else:
raise NotImplementedError(f"Cannot remove {asset!r}")
except ReferenceError:
pass # In this case the object is already gone
@add_asset.register(core.Cube)
@blender_utils.prepare_blender_object
def _add_asset(self, asset: core.Cube):
bpy.ops.mesh.primitive_cube_add()
cube = bpy.context.active_object
register_object3d_setters(asset, cube)
asset.observe(AttributeSetter(cube, "active_material",
converter=self._convert_to_blender_object), "material")
asset.observe(AttributeSetter(cube, "scale"), "scale")
asset.observe(KeyframeSetter(cube, "scale"), "scale", type="keyframe")
return cube
@add_asset.register(core.Sphere)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.Sphere):
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=5)
bpy.ops.object.shade_smooth()
sphere = bpy.context.active_object
register_object3d_setters(obj, sphere)
obj.observe(AttributeSetter(sphere, "active_material",
converter=self._convert_to_blender_object), "material")
obj.observe(AttributeSetter(sphere, "scale"), "scale")
obj.observe(KeyframeSetter(sphere, "scale"), "scale", type="keyframe")
return sphere
@add_asset.register(core.FileBasedObject)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.FileBasedObject):
if obj.render_filename is None:
return None # if there is no render file, then ignore this object
_, _, extension = obj.render_filename.rpartition(".")
with RedirectStream(stream=sys.stdout, disabled=self.verbose): # reduce the logging noise
with io.StringIO() as fstdout: # < scratch stdout buffer
with redirect_stdout(fstdout): # < also suppresses python stdout
if extension == "obj":
bpy.ops.import_scene.obj(filepath=obj.render_filename,
use_split_objects=False,
**obj.render_import_kwargs)
elif extension in ["glb", "gltf"]:
bpy.ops.import_scene.gltf(filepath=obj.render_filename,
**obj.render_import_kwargs)
# gltf files often contain "Empty" objects as placeholders for camera / lights etc.
# here we are interested only in the meshes, so delete everything else
non_mesh_objects = [obj for obj in bpy.context.selected_objects if obj.type != "MESH"]
bpy.ops.object.delete({"selected_objects": non_mesh_objects})
bpy.ops.object.join()
# By default gltf objects are loaded with a different rotation than obj files
# here we compensate for that to ensure alignment between pybullet and blender
assert len(bpy.context.selected_objects) == 1
blender_obj = bpy.context.selected_objects[0]
blender_obj.rotation_quaternion = (0.707107, -0.707107, 0, 0)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
elif extension == "fbx":
bpy.ops.import_scene.fbx(filepath=obj.render_filename,
**obj.render_import_kwargs)
elif extension in ["x3d", "wrl"]:
bpy.ops.import_scene.x3d(filepath=obj.render_filename,
**obj.render_import_kwargs)
elif extension == "blend":
# for now we require the paths to be encoded in the render_import_kwargs. That is:
# - filepath = dir / "Object" / object_name
# - directory = dir / "Object"
# - filename = object_name
bpy.ops.wm.append(**obj.render_import_kwargs)
else:
raise ValueError(f"Unknown file-type: '{extension}' for {obj}")
assert len(bpy.context.selected_objects) == 1
blender_obj = bpy.context.selected_objects[0]
# deactivate auto_smooth because for some reason it lead to no smoothing at all
# TODO: make smoothing configurable
blender_obj.data.use_auto_smooth = False
register_object3d_setters(obj, blender_obj)
obj.observe(AttributeSetter(blender_obj, "active_material",
converter=self._convert_to_blender_object), "material")
obj.observe(AttributeSetter(blender_obj, "scale"), "scale")
obj.observe(KeyframeSetter(blender_obj, "scale"), "scale", type="keyframe")
return blender_obj
@add_asset.register(core.DirectionalLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.DirectionalLight): # pylint: disable=function-redefined
sun = bpy.data.lights.new(obj.uid, "SUN")
sun_obj = bpy.data.objects.new(obj.uid, sun)
register_object3d_setters(obj, sun_obj)
obj.observe(AttributeSetter(sun, "color"), "color")
obj.observe(KeyframeSetter(sun, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(sun, "energy"), "intensity")
obj.observe(KeyframeSetter(sun, "energy"), "intensity", type="keyframe")
return sun_obj
@add_asset.register(core.RectAreaLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.RectAreaLight):
area = bpy.data.lights.new(obj.uid, "AREA")
area_obj = bpy.data.objects.new(obj.uid, area)
register_object3d_setters(obj, area_obj)
obj.observe(AttributeSetter(area, "color"), "color")
obj.observe(KeyframeSetter(area, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(area, "energy"), "intensity")
obj.observe(KeyframeSetter(area, "energy"), "intensity", type="keyframe")
obj.observe(AttributeSetter(area, "size"), "width")
obj.observe(KeyframeSetter(area, "size"), "width", type="keyframe")
obj.observe(AttributeSetter(area, "size_y"), "height")
obj.observe(KeyframeSetter(area, "size_y"), "height", type="keyframe")
return area_obj
@add_asset.register(core.PointLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PointLight):
point_light = bpy.data.lights.new(obj.uid, "POINT")
point_light_obj = bpy.data.objects.new(obj.uid, point_light)
register_object3d_setters(obj, point_light_obj)
obj.observe(AttributeSetter(point_light, "color"), "color")
obj.observe(KeyframeSetter(point_light, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(point_light, "energy"), "intensity")
obj.observe(KeyframeSetter(point_light, "energy"), "intensity", type="keyframe")
return point_light_obj
@add_asset.register(core.PerspectiveCamera)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PerspectiveCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = "PERSP"
# fix sensor width and determine sensor height by the aspect ratio of the image:
camera.sensor_fit = "HORIZONTAL"
camera_obj = bpy.data.objects.new(obj.uid, camera)
register_object3d_setters(obj, camera_obj)
obj.observe(AttributeSetter(camera, "lens"), "focal_length")
obj.observe(KeyframeSetter(camera, "lens"), "focal_length", type="keyframe")
obj.observe(AttributeSetter(camera, "sensor_width"), "sensor_width")
obj.observe(KeyframeSetter(camera, "sensor_width"), "sensor_width", type="keyframe")
return camera_obj
@add_asset.register(core.OrthographicCamera)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.OrthographicCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = "ORTHO"
camera_obj = bpy.data.objects.new(obj.uid, camera)
register_object3d_setters(obj, camera_obj)
obj.observe(AttributeSetter(camera, "ortho_scale"), "orthographic_scale")
obj.observe(KeyframeSetter(camera, "ortho_scale"), "orthographic_scale", type="keyframe")
return camera_obj
@add_asset.register(core.PrincipledBSDFMaterial)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PrincipledBSDFMaterial):
mat = bpy.data.materials.new(obj.uid)
mat.use_nodes = True
bsdf_node = mat.node_tree.nodes["Principled BSDF"]
obj.observe(AttributeSetter(bsdf_node.inputs["Base Color"], "default_value"), "color")
obj.observe(KeyframeSetter(bsdf_node.inputs["Base Color"], "default_value"), "color",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Roughness"], "default_value"), "roughness")
obj.observe(KeyframeSetter(bsdf_node.inputs["Roughness"], "default_value"), "roughness",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Metallic"], "default_value"), "metallic")
obj.observe(KeyframeSetter(bsdf_node.inputs["Metallic"], "default_value"), "metallic",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Specular"], "default_value"), "specular")
obj.observe(KeyframeSetter(bsdf_node.inputs["Specular"], "default_value"), "specular",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Specular Tint"],
"default_value"), "specular_tint")
obj.observe(KeyframeSetter(bsdf_node.inputs["Specular Tint"], "default_value"), "specular_tint",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["IOR"], "default_value"), "ior")
obj.observe(KeyframeSetter(bsdf_node.inputs["IOR"], "default_value"), "ior",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Transmission"], "default_value"), "transmission")
obj.observe(KeyframeSetter(bsdf_node.inputs["Transmission"], "default_value"), "transmission",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Transmission Roughness"], "default_value"),
"transmission_roughness")
obj.observe(KeyframeSetter(bsdf_node.inputs["Transmission Roughness"], "default_value"),
"transmission_roughness", type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Emission"], "default_value"), "emission")
obj.observe(KeyframeSetter(bsdf_node.inputs["Emission"], "default_value"), "emission",
type="keyframe")
return mat
@add_asset.register(core.FlatMaterial)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.FlatMaterial):
# --- Create node-based material
mat = bpy.data.materials.new("Holdout")
mat.use_nodes = True
tree = mat.node_tree
tree.nodes.remove(tree.nodes["Principled BSDF"]) # remove the default shader
output_node = tree.nodes["Material Output"]
# This material is constructed from three different shaders:
# 1. if holdout=False then emission_node is responsible for giving the object a uniform color
# 2. if holdout=True, then the holdout_node is responsible for making the object transparent
# 3. if indirect_visibility=False then transparent_node makes the node invisible for indirect
# effects such as shadows or reflections
light_path_node = tree.nodes.new(type="ShaderNodeLightPath")
holdout_node = tree.nodes.new(type="ShaderNodeHoldout")
transparent_node = tree.nodes.new(type="ShaderNodeBsdfTransparent")
holdout_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
indirect_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
overall_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
emission_node = tree.nodes.new(type="ShaderNodeEmission")
tree.links.new(transparent_node.outputs["BSDF"], indirect_mix_node.inputs[1])
tree.links.new(emission_node.outputs["Emission"], indirect_mix_node.inputs[2])
tree.links.new(emission_node.outputs["Emission"], holdout_mix_node.inputs[1])
tree.links.new(holdout_node.outputs["Holdout"], holdout_mix_node.inputs[2])
tree.links.new(light_path_node.outputs["Is Camera Ray"], overall_mix_node.inputs["Fac"])
tree.links.new(indirect_mix_node.outputs["Shader"], overall_mix_node.inputs[1])
tree.links.new(holdout_mix_node.outputs["Shader"], overall_mix_node.inputs[2])
tree.links.new(overall_mix_node.outputs["Shader"], output_node.inputs["Surface"])
obj.observe(AttributeSetter(emission_node.inputs["Color"], "default_value"), "color")
obj.observe(KeyframeSetter(emission_node.inputs["Color"], "default_value"), "color",
type="keyframe")
obj.observe(AttributeSetter(holdout_mix_node.inputs["Fac"], "default_value"), "holdout")
obj.observe(KeyframeSetter(holdout_mix_node.inputs["Fac"], "default_value"), "holdout",
type="keyframe")
obj.observe(AttributeSetter(indirect_mix_node.inputs["Fac"], "default_value"),
"indirect_visibility")
obj.observe(KeyframeSetter(indirect_mix_node.inputs["Fac"], "default_value"),
"indirect_visibility", type="keyframe")
return mat
def _setup_scene_shading(self):
self.blender_scene.world.use_nodes = True
tree = self.blender_scene.world.node_tree
links = tree.links
# clear the tree
for node in tree.nodes.values():
tree.nodes.remove(node)
# create nodes
out_node = tree.nodes.new(type="ShaderNodeOutputWorld")
out_node.location = 1100, 0
mix_node = tree.nodes.new(type="ShaderNodeMixShader")
mix_node.location = 900, 0
lightpath_node = tree.nodes.new(type="ShaderNodeLightPath")
lightpath_node.location = 700, 350
self.ambient_node = tree.nodes.new(type="ShaderNodeBackground")
self.ambient_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.ambient_node.location = 700, 0
self.bg_node = tree.nodes.new(type="ShaderNodeBackground")
self.bg_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.bg_node.location = 700, -120
links.new(lightpath_node.outputs.get("Is Camera Ray"), mix_node.inputs.get("Fac"))
links.new(self.ambient_node.outputs.get("Background"), mix_node.inputs[1])
links.new(self.bg_node.outputs.get("Background"), mix_node.inputs[2])
links.new(mix_node.outputs.get("Shader"), out_node.inputs.get("Surface"))
# create nodes for HDRI images, but leave them disconnected until
# set_ambient_illumination or set_background
coord_node = tree.nodes.new(type="ShaderNodeTexCoord")
self.bg_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.bg_mapping_node.location = 200, 200
self.bg_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.bg_hdri_node.location = 400, 200
links.new(coord_node.outputs.get("Generated"), self.bg_mapping_node.inputs.get("Vector"))
links.new(self.bg_mapping_node.outputs.get("Vector"), self.bg_hdri_node.inputs.get("Vector"))
self.illum_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.illum_mapping_node.location = 200, -200
self.ambient_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.ambient_hdri_node.location = 400, -200
links.new(coord_node.outputs.get("Generated"), self.illum_mapping_node.inputs.get("Vector"))
links.new(self.illum_mapping_node.outputs.get("Vector"),
self.ambient_hdri_node.inputs.get("Vector"))
def _set_ambient_light_color(self, color=(0., 0., 0., 1.0)):
# disconnect incoming links from hdri node (if any)
for link in self.ambient_node.inputs["Color"].links:
self.blender_scene.world.node_tree.links.remove(link)
self.ambient_node.inputs["Color"].default_value = color
def _set_ambient_light_hdri(self, hdri_filepath=None, hdri_rotation=(0., 0., 0.), strength=1.0):
# ensure hdri_node is connected
self.blender_scene.world.node_tree.links.new(self.ambient_hdri_node.outputs.get("Color"),
self.ambient_node.inputs.get("Color"))
self.ambient_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.ambient_node.inputs["Strength"].default_value = strength
self.illum_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def _set_background_color(self, color=core.get_color("black")):
# disconnect incoming links from hdri node (if any)
for link in self.bg_node.inputs["Color"].links:
self.blender_scene.world.node_tree.links.remove(link)
# set color
self.bg_node.inputs["Color"].default_value = color
def _set_background_hdri(self, hdri_filepath=None, hdri_rotation=(0., 0., 0.)):
# ensure hdri_node is connected
self.blender_scene.world.node_tree.links.new(self.bg_hdri_node.outputs.get("Color"),
self.bg_node.inputs.get("Color"))
self.bg_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.bg_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def _convert_to_blender_object(self, asset: core.Asset):
return asset.linked_objects[self]
class AttributeSetter:
"""TODO(klausg): provide high-level description of observer implementation."""
def __init__(self, blender_obj, attribute: str, converter=None):
self.blender_obj = blender_obj
self.attribute = attribute
self.converter = converter
def __call__(self, change):
# change = {"type": "change", "new": (1., 1., 1.), "owner": obj}
new_value = change.new
if isinstance(new_value, UndefinedAsset):
return # ignore any Undefined values
if self.converter:
# use converter if given
new_value = self.converter(new_value)
setattr(self.blender_obj, self.attribute, new_value)
class KeyframeSetter:
def __init__(self, blender_obj, attribute_path: str):
self.attribute_path = attribute_path
self.blender_obj = blender_obj
def __call__(self, change):
self.blender_obj.keyframe_insert(self.attribute_path, frame=change.frame)
def register_object3d_setters(obj, blender_obj):
assert isinstance(obj, core.Object3D), f"{obj!r} is not an Object3D"
obj.observe(AttributeSetter(blender_obj, "location"), "position")
obj.observe(KeyframeSetter(blender_obj, "location"), "position", type="keyframe")
obj.observe(AttributeSetter(blender_obj, "rotation_quaternion"), "quaternion")
obj.observe(KeyframeSetter(blender_obj, "rotation_quaternion"), "quaternion", type="keyframe")
fixed check for missing textures
img.has_data is True only if the data of the image has been loaded into memory, which might or might not be the case for available textures.
So instead we ask for img.size, which forces loading the image.
(Afterwards we could use img.has_data, but img.size == (0, 0) is also a clear sign for missing data so we use that)
# Copyright 2021 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=function-redefined (removes singledispatchmethod pylint errors)
import io
import logging
import sys
from contextlib import redirect_stdout
from typing import Any, Dict, Optional, Sequence, Union
import tempfile
from kubric.safeimport.bpy import bpy
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from singledispatchmethod import singledispatchmethod
import kubric as kb
from kubric import core
from kubric.core.assets import UndefinedAsset
from kubric.redirect_io import RedirectStream
from kubric.renderer import blender_utils
from kubric import file_io
from kubric.file_io import PathLike
logger = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
class Blender(core.View):
""" An implementation of a rendering backend in Blender/Cycles."""
def __init__(self,
scene: core.Scene,
scratch_dir=None,
adaptive_sampling=True,
use_denoising=True,
samples_per_pixel=128,
background_transparency=False,
verbose: bool = False,
custom_scene: Optional[str] = None,
):
"""
Args:
scene: the kubric scene this class will observe
scratch_dir: Blender always writes the rendered images to disk. The scratch_dir is the
(temporary) directory used for that. The results are read into memory by kubric,
immediately after the rendering is done, so the contents of this directory can
be discarded afterwards.
adaptive_sampling: Adjust the number of rays cast based on the complexity of the patch
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html)
use_denoising: Use the blender denoiser to improve the image quality.
(see https://docs.blender.org/manual/en/latest/render/layers/denoising.html#denoising)
samples_per_pixel: Number of rays cast per pixel
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html)
background_transparency: Render the background transparent.
(see https://docs.blender.org/manual/en/latest/render/cycles/render_settings/film.html)
verbose: when False, blender stdout is redirected to stdnull
custom_scene: By default (None) Blender is initialized with an empty scene.
If this argument is set to the path for a `.blend` file, then that scene is loaded instead.
Note that this scene only affects the rendering output. It is not accessible from Kubric and
not taken into account by the simulator.
"""
self.scratch_dir = tempfile.mkdtemp() if scratch_dir is None else scratch_dir
self.ambient_node = None
self.ambient_hdri_node = None
self.illum_mapping_node = None
self.bg_node = None
self.bg_hdri_node = None
self.bg_mapping_node = None
self.verbose = verbose
# blender has a default scene on load, so we clear everything first
self.clear_and_reset_blender_scene(self.verbose, custom_scene=custom_scene)
self.blender_scene = bpy.context.scene
# the ray-tracing engine is set here because it affects the availability of some features
bpy.context.scene.render.engine = "CYCLES"
blender_utils.activate_render_passes(normal=True, optical_flow=True, segmentation=True, uv=True)
self._setup_scene_shading()
self.adaptive_sampling = adaptive_sampling # speeds up rendering
self.use_denoising = use_denoising # improves the output quality
self.samples_per_pixel = samples_per_pixel
self.background_transparency = background_transparency
self.exr_output_node = blender_utils.set_up_exr_output_node()
super().__init__(scene, scene_observers={
"frame_start": [AttributeSetter(self.blender_scene, "frame_start")],
"frame_end": [AttributeSetter(self.blender_scene, "frame_end")],
"frame_rate": [AttributeSetter(self.blender_scene.render, "fps")],
"resolution": [AttributeSetter(self.blender_scene.render, "resolution_x",
converter=lambda x: x[0]),
AttributeSetter(self.blender_scene.render, "resolution_y",
converter=lambda x: x[1])],
"camera": [AttributeSetter(self.blender_scene, "camera",
converter=self._convert_to_blender_object)],
"ambient_illumination": [lambda change: self._set_ambient_light_color(change.new)],
"background": [lambda change: self._set_background_color(change.new)],
})
@property
def scratch_dir(self) -> Union[PathLike, None]:
return self._scratch_dir
@scratch_dir.setter
def scratch_dir(self, value: Union[PathLike, None]):
if value is None:
self._scratch_dir = None
else:
self._scratch_dir = kb.as_path(value)
self._scratch_dir.mkdir(parents=True, exist_ok=True)
@property
def adaptive_sampling(self) -> bool:
return self.blender_scene.cycles.use_adaptive_sampling
@adaptive_sampling.setter
def adaptive_sampling(self, value: bool):
self.blender_scene.cycles.use_adaptive_sampling = value
@property
def use_denoising(self) -> bool:
return self.blender_scene.cycles.use_denoising
@use_denoising.setter
def use_denoising(self, value: bool):
self.blender_scene.cycles.use_denoising = value
self.blender_scene.cycles.denoiser = "NLM"
@property
def samples_per_pixel(self) -> int:
return self.blender_scene.cycles.samples
@samples_per_pixel.setter
def samples_per_pixel(self, nr: int):
self.blender_scene.cycles.samples = nr
@property
def background_transparency(self) -> bool:
return self.blender_scene.render.film_transparent
@background_transparency.setter
def background_transparency(self, value: bool):
self.blender_scene.render.film_transparent = value
def set_exr_output_path(self, path_prefix: Optional[PathLike]):
"""Set the target path prefix for EXR output.
The final filename for a frame will be "{path_prefix}{frame_nr:04d}.exr".
If path_prefix is None then EXR output is disabled.
"""
if path_prefix is None:
self.exr_output_node.mute = True
else:
self.exr_output_node.mute = False
self.exr_output_node.base_path = str(path_prefix)
def save_state(self, path: PathLike, pack_textures: bool = True):
"""Saves the '.blend' blender file to disk.
If a file with the same path exists, it is overwritten.
"""
# first write to a temporary file, and later copy
# (because blender cannot write to gcs buckets etc.)
tmp_path = self.scratch_dir / "scene.blend"
# ensure file does NOT exist (as otherwise "scene.blend1" is created instead of "scene.blend")
kb.as_path(tmp_path).unlink(missing_ok=True)
# --- ensure directory exists
parent = kb.as_path(tmp_path).parent
if not parent.exists():
parent.mkdir(parents=True)
# --- save the file; see https://github.com/google-research/kubric/issues/96
with RedirectStream(stream=sys.stdout, disabled=self.verbose):
with io.StringIO() as fstdout: # < scratch stdout buffer
with redirect_stdout(fstdout): # < also suppresses python stdout
if pack_textures:
bpy.ops.file.pack_all()
bpy.ops.wm.save_mainfile(filepath=str(tmp_path))
if self.verbose:
print(fstdout.getvalue())
# copy to target path
path = kb.as_path(path)
path.parent.mkdir(parents=True, exist_ok=True) # ensure directory exists
logger.info("Saving '%s'", path)
tf.io.gfile.copy(tmp_path, path, overwrite=True)
def render(self,
frames: Optional[Sequence[int]] = None,
ignore_missing_textures: bool = False,
) -> Dict[str, np.ndarray]:
"""Renders all frames (or a subset) of the animation and returns images as a dict of arrays.
Args:
frames: list of frames to render (defaults to range(scene.frame_start, scene.frame_end+1)).
ignore_missing_textures: if False then raise a RuntimeError when missing textures are
detected. Otherwise, proceed to render (with purple color instead of missing texture).
Returns:
A dictionary with the following entries:
- "rgba": shape = (nr_frames, height, width, 4)
- "segmentation": shape = (nr_frames, height, width, 1) (int)
- "backward_flow": shape = (nr_frames, height, width, 2)
- "forward_flow": shape = (nr_frames, height, width, 2)
- "depth": shape = (nr_frames, height, width, 1)
- "uv": shape = (nr_frames, height, width, 3)
- "normal": shape = (nr_frames, height, width, 3)
"""
logger.info("Using scratch rendering folder: '%s'", self.scratch_dir)
missing_textures = sorted({img.filepath for img in bpy.data.images
if tuple(img.size) == (0, 0)})
if missing_textures and not ignore_missing_textures:
raise RuntimeError(f"Missing textures: {missing_textures}")
self.set_exr_output_path(self.scratch_dir / "exr" / "frame_")
# --- starts rendering
if frames is None:
frames = range(self.scene.frame_start, self.scene.frame_end + 1)
with RedirectStream(stream=sys.stdout, disabled=self.verbose):
for frame_nr in frames:
bpy.context.scene.frame_set(frame_nr)
# When writing still images Blender doesn't append the frame number to the png path.
# (but for exr it does, so we only adjust the png path)
bpy.context.scene.render.filepath = str(
self.scratch_dir / "images" / f"frame_{frame_nr:04d}.png")
bpy.ops.render.render(animation=False, write_still=True)
logger.info("Rendered frame '%s'", bpy.context.scene.render.filepath)
# --- post process the rendered frames
return self.postprocess(self.scratch_dir)
def render_still(self, frame: Optional[int] = None):
"""Render a single frame (first frame by default).
Args:
frame: Which frame to render (defaults to scene.frame_start).
Returns:
A dictionary with the following entries:
- "rgba": shape = (height, width, 4)
- "segmentation": shape = (height, width, 1) (int)
- "backward_flow": shape = (height, width, 2)
- "forward_flow": shape = (height, width, 2)
- "depth": shape = (height, width, 1)
- "uv": shape = (height, width, 3)
- "normal": shape = (height, width, 3)
"""
frame = self.scene.frame_start if frame is None else frame
result = self.render(frames=[frame])
return {k: v[0] for k, v in result.items()}
def postprocess(self, from_dir: PathLike):
from_dir = tfds.core.as_path(from_dir)
# --- collect all layers for all frames
data_stack = {}
list_of_exr_frames = sorted((from_dir / "exr").glob("*.exr"))
for exr_filename in list_of_exr_frames:
png_filename = from_dir / "images" / (exr_filename.stem + ".png")
layers = blender_utils.get_render_layers_from_exr(exr_filename)
data = {k: layers[k] for k in
["backward_flow", "forward_flow", "depth", "uv", "normal", "object_coordinates"]}
# Use the contrast-normalized PNG instead of the EXR for RGBA.
data["rgba"] = file_io.read_png(png_filename)
data["segmentation"] = layers["segmentation_indices"][:, :, :1]
for key in data:
if key in data_stack:
data_stack[key].append(data[key])
else:
data_stack[key] = [data[key]]
for key in data_stack:
data_stack[key] = np.stack(data_stack[key], axis=0)
# map the Blender cryptomatte hashes to asset indices
data_stack["segmentation"] = blender_utils.replace_cryptomatte_hashes_by_asset_index(
data_stack["segmentation"], self.scene.assets)
# convert z values (distance to camera plane) into depth (distance to camera center)
data_stack["depth"] = self.scene.camera.z_to_depth(data_stack["depth"])
return data_stack
@staticmethod
def clear_and_reset_blender_scene(verbose: bool = False, custom_scene: str = None):
""" Resets Blender to an entirely empty scene (or a custom one)."""
with RedirectStream(stream=sys.stdout, disabled=verbose):
bpy.ops.wm.read_factory_settings(use_empty=True)
if custom_scene is None:
bpy.context.scene.world = bpy.data.worlds.new("World")
else:
logger.info("Loading scene from '%s'", custom_scene)
bpy.ops.wm.open_mainfile(filepath=custom_scene)
@singledispatchmethod
def add_asset(self, asset: core.Asset) -> Any:
raise NotImplementedError(f"Cannot add {asset!r}")
def remove_asset(self, asset: core.Asset) -> None:
if self in asset.linked_objects:
blender_obj = asset.linked_objects[self]
try:
if isinstance(blender_obj, bpy.types.Object):
bpy.data.objects.remove(blender_obj, do_unlink=True)
elif isinstance(blender_obj, bpy.types.Material):
bpy.data.materials.remove(blender_obj, do_unlink=True)
else:
raise NotImplementedError(f"Cannot remove {asset!r}")
except ReferenceError:
pass # In this case the object is already gone
@add_asset.register(core.Cube)
@blender_utils.prepare_blender_object
def _add_asset(self, asset: core.Cube):
bpy.ops.mesh.primitive_cube_add()
cube = bpy.context.active_object
register_object3d_setters(asset, cube)
asset.observe(AttributeSetter(cube, "active_material",
converter=self._convert_to_blender_object), "material")
asset.observe(AttributeSetter(cube, "scale"), "scale")
asset.observe(KeyframeSetter(cube, "scale"), "scale", type="keyframe")
return cube
@add_asset.register(core.Sphere)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.Sphere):
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=5)
bpy.ops.object.shade_smooth()
sphere = bpy.context.active_object
register_object3d_setters(obj, sphere)
obj.observe(AttributeSetter(sphere, "active_material",
converter=self._convert_to_blender_object), "material")
obj.observe(AttributeSetter(sphere, "scale"), "scale")
obj.observe(KeyframeSetter(sphere, "scale"), "scale", type="keyframe")
return sphere
@add_asset.register(core.FileBasedObject)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.FileBasedObject):
if obj.render_filename is None:
return None # if there is no render file, then ignore this object
_, _, extension = obj.render_filename.rpartition(".")
with RedirectStream(stream=sys.stdout, disabled=self.verbose): # reduce the logging noise
with io.StringIO() as fstdout: # < scratch stdout buffer
with redirect_stdout(fstdout): # < also suppresses python stdout
if extension == "obj":
bpy.ops.import_scene.obj(filepath=obj.render_filename,
use_split_objects=False,
**obj.render_import_kwargs)
elif extension in ["glb", "gltf"]:
bpy.ops.import_scene.gltf(filepath=obj.render_filename,
**obj.render_import_kwargs)
# gltf files often contain "Empty" objects as placeholders for camera / lights etc.
# here we are interested only in the meshes, so delete everything else
non_mesh_objects = [obj for obj in bpy.context.selected_objects if obj.type != "MESH"]
bpy.ops.object.delete({"selected_objects": non_mesh_objects})
bpy.ops.object.join()
# By default gltf objects are loaded with a different rotation than obj files
# here we compensate for that to ensure alignment between pybullet and blender
assert len(bpy.context.selected_objects) == 1
blender_obj = bpy.context.selected_objects[0]
blender_obj.rotation_quaternion = (0.707107, -0.707107, 0, 0)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
elif extension == "fbx":
bpy.ops.import_scene.fbx(filepath=obj.render_filename,
**obj.render_import_kwargs)
elif extension in ["x3d", "wrl"]:
bpy.ops.import_scene.x3d(filepath=obj.render_filename,
**obj.render_import_kwargs)
elif extension == "blend":
# for now we require the paths to be encoded in the render_import_kwargs. That is:
# - filepath = dir / "Object" / object_name
# - directory = dir / "Object"
# - filename = object_name
bpy.ops.wm.append(**obj.render_import_kwargs)
else:
raise ValueError(f"Unknown file-type: '{extension}' for {obj}")
assert len(bpy.context.selected_objects) == 1
blender_obj = bpy.context.selected_objects[0]
# deactivate auto_smooth because for some reason it lead to no smoothing at all
# TODO: make smoothing configurable
blender_obj.data.use_auto_smooth = False
register_object3d_setters(obj, blender_obj)
obj.observe(AttributeSetter(blender_obj, "active_material",
converter=self._convert_to_blender_object), "material")
obj.observe(AttributeSetter(blender_obj, "scale"), "scale")
obj.observe(KeyframeSetter(blender_obj, "scale"), "scale", type="keyframe")
return blender_obj
@add_asset.register(core.DirectionalLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.DirectionalLight): # pylint: disable=function-redefined
sun = bpy.data.lights.new(obj.uid, "SUN")
sun_obj = bpy.data.objects.new(obj.uid, sun)
register_object3d_setters(obj, sun_obj)
obj.observe(AttributeSetter(sun, "color"), "color")
obj.observe(KeyframeSetter(sun, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(sun, "energy"), "intensity")
obj.observe(KeyframeSetter(sun, "energy"), "intensity", type="keyframe")
return sun_obj
@add_asset.register(core.RectAreaLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.RectAreaLight):
area = bpy.data.lights.new(obj.uid, "AREA")
area_obj = bpy.data.objects.new(obj.uid, area)
register_object3d_setters(obj, area_obj)
obj.observe(AttributeSetter(area, "color"), "color")
obj.observe(KeyframeSetter(area, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(area, "energy"), "intensity")
obj.observe(KeyframeSetter(area, "energy"), "intensity", type="keyframe")
obj.observe(AttributeSetter(area, "size"), "width")
obj.observe(KeyframeSetter(area, "size"), "width", type="keyframe")
obj.observe(AttributeSetter(area, "size_y"), "height")
obj.observe(KeyframeSetter(area, "size_y"), "height", type="keyframe")
return area_obj
@add_asset.register(core.PointLight)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PointLight):
point_light = bpy.data.lights.new(obj.uid, "POINT")
point_light_obj = bpy.data.objects.new(obj.uid, point_light)
register_object3d_setters(obj, point_light_obj)
obj.observe(AttributeSetter(point_light, "color"), "color")
obj.observe(KeyframeSetter(point_light, "color"), "color", type="keyframe")
obj.observe(AttributeSetter(point_light, "energy"), "intensity")
obj.observe(KeyframeSetter(point_light, "energy"), "intensity", type="keyframe")
return point_light_obj
@add_asset.register(core.PerspectiveCamera)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PerspectiveCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = "PERSP"
# fix sensor width and determine sensor height by the aspect ratio of the image:
camera.sensor_fit = "HORIZONTAL"
camera_obj = bpy.data.objects.new(obj.uid, camera)
register_object3d_setters(obj, camera_obj)
obj.observe(AttributeSetter(camera, "lens"), "focal_length")
obj.observe(KeyframeSetter(camera, "lens"), "focal_length", type="keyframe")
obj.observe(AttributeSetter(camera, "sensor_width"), "sensor_width")
obj.observe(KeyframeSetter(camera, "sensor_width"), "sensor_width", type="keyframe")
return camera_obj
@add_asset.register(core.OrthographicCamera)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.OrthographicCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = "ORTHO"
camera_obj = bpy.data.objects.new(obj.uid, camera)
register_object3d_setters(obj, camera_obj)
obj.observe(AttributeSetter(camera, "ortho_scale"), "orthographic_scale")
obj.observe(KeyframeSetter(camera, "ortho_scale"), "orthographic_scale", type="keyframe")
return camera_obj
@add_asset.register(core.PrincipledBSDFMaterial)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.PrincipledBSDFMaterial):
mat = bpy.data.materials.new(obj.uid)
mat.use_nodes = True
bsdf_node = mat.node_tree.nodes["Principled BSDF"]
obj.observe(AttributeSetter(bsdf_node.inputs["Base Color"], "default_value"), "color")
obj.observe(KeyframeSetter(bsdf_node.inputs["Base Color"], "default_value"), "color",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Roughness"], "default_value"), "roughness")
obj.observe(KeyframeSetter(bsdf_node.inputs["Roughness"], "default_value"), "roughness",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Metallic"], "default_value"), "metallic")
obj.observe(KeyframeSetter(bsdf_node.inputs["Metallic"], "default_value"), "metallic",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Specular"], "default_value"), "specular")
obj.observe(KeyframeSetter(bsdf_node.inputs["Specular"], "default_value"), "specular",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Specular Tint"],
"default_value"), "specular_tint")
obj.observe(KeyframeSetter(bsdf_node.inputs["Specular Tint"], "default_value"), "specular_tint",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["IOR"], "default_value"), "ior")
obj.observe(KeyframeSetter(bsdf_node.inputs["IOR"], "default_value"), "ior",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Transmission"], "default_value"), "transmission")
obj.observe(KeyframeSetter(bsdf_node.inputs["Transmission"], "default_value"), "transmission",
type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Transmission Roughness"], "default_value"),
"transmission_roughness")
obj.observe(KeyframeSetter(bsdf_node.inputs["Transmission Roughness"], "default_value"),
"transmission_roughness", type="keyframe")
obj.observe(AttributeSetter(bsdf_node.inputs["Emission"], "default_value"), "emission")
obj.observe(KeyframeSetter(bsdf_node.inputs["Emission"], "default_value"), "emission",
type="keyframe")
return mat
@add_asset.register(core.FlatMaterial)
@blender_utils.prepare_blender_object
def _add_asset(self, obj: core.FlatMaterial):
# --- Create node-based material
mat = bpy.data.materials.new("Holdout")
mat.use_nodes = True
tree = mat.node_tree
tree.nodes.remove(tree.nodes["Principled BSDF"]) # remove the default shader
output_node = tree.nodes["Material Output"]
# This material is constructed from three different shaders:
# 1. if holdout=False then emission_node is responsible for giving the object a uniform color
# 2. if holdout=True, then the holdout_node is responsible for making the object transparent
# 3. if indirect_visibility=False then transparent_node makes the node invisible for indirect
# effects such as shadows or reflections
light_path_node = tree.nodes.new(type="ShaderNodeLightPath")
holdout_node = tree.nodes.new(type="ShaderNodeHoldout")
transparent_node = tree.nodes.new(type="ShaderNodeBsdfTransparent")
holdout_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
indirect_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
overall_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
emission_node = tree.nodes.new(type="ShaderNodeEmission")
tree.links.new(transparent_node.outputs["BSDF"], indirect_mix_node.inputs[1])
tree.links.new(emission_node.outputs["Emission"], indirect_mix_node.inputs[2])
tree.links.new(emission_node.outputs["Emission"], holdout_mix_node.inputs[1])
tree.links.new(holdout_node.outputs["Holdout"], holdout_mix_node.inputs[2])
tree.links.new(light_path_node.outputs["Is Camera Ray"], overall_mix_node.inputs["Fac"])
tree.links.new(indirect_mix_node.outputs["Shader"], overall_mix_node.inputs[1])
tree.links.new(holdout_mix_node.outputs["Shader"], overall_mix_node.inputs[2])
tree.links.new(overall_mix_node.outputs["Shader"], output_node.inputs["Surface"])
obj.observe(AttributeSetter(emission_node.inputs["Color"], "default_value"), "color")
obj.observe(KeyframeSetter(emission_node.inputs["Color"], "default_value"), "color",
type="keyframe")
obj.observe(AttributeSetter(holdout_mix_node.inputs["Fac"], "default_value"), "holdout")
obj.observe(KeyframeSetter(holdout_mix_node.inputs["Fac"], "default_value"), "holdout",
type="keyframe")
obj.observe(AttributeSetter(indirect_mix_node.inputs["Fac"], "default_value"),
"indirect_visibility")
obj.observe(KeyframeSetter(indirect_mix_node.inputs["Fac"], "default_value"),
"indirect_visibility", type="keyframe")
return mat
def _setup_scene_shading(self):
self.blender_scene.world.use_nodes = True
tree = self.blender_scene.world.node_tree
links = tree.links
# clear the tree
for node in tree.nodes.values():
tree.nodes.remove(node)
# create nodes
out_node = tree.nodes.new(type="ShaderNodeOutputWorld")
out_node.location = 1100, 0
mix_node = tree.nodes.new(type="ShaderNodeMixShader")
mix_node.location = 900, 0
lightpath_node = tree.nodes.new(type="ShaderNodeLightPath")
lightpath_node.location = 700, 350
self.ambient_node = tree.nodes.new(type="ShaderNodeBackground")
self.ambient_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.ambient_node.location = 700, 0
self.bg_node = tree.nodes.new(type="ShaderNodeBackground")
self.bg_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.bg_node.location = 700, -120
links.new(lightpath_node.outputs.get("Is Camera Ray"), mix_node.inputs.get("Fac"))
links.new(self.ambient_node.outputs.get("Background"), mix_node.inputs[1])
links.new(self.bg_node.outputs.get("Background"), mix_node.inputs[2])
links.new(mix_node.outputs.get("Shader"), out_node.inputs.get("Surface"))
# create nodes for HDRI images, but leave them disconnected until
# set_ambient_illumination or set_background
coord_node = tree.nodes.new(type="ShaderNodeTexCoord")
self.bg_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.bg_mapping_node.location = 200, 200
self.bg_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.bg_hdri_node.location = 400, 200
links.new(coord_node.outputs.get("Generated"), self.bg_mapping_node.inputs.get("Vector"))
links.new(self.bg_mapping_node.outputs.get("Vector"), self.bg_hdri_node.inputs.get("Vector"))
self.illum_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.illum_mapping_node.location = 200, -200
self.ambient_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.ambient_hdri_node.location = 400, -200
links.new(coord_node.outputs.get("Generated"), self.illum_mapping_node.inputs.get("Vector"))
links.new(self.illum_mapping_node.outputs.get("Vector"),
self.ambient_hdri_node.inputs.get("Vector"))
def _set_ambient_light_color(self, color=(0., 0., 0., 1.0)):
# disconnect incoming links from hdri node (if any)
for link in self.ambient_node.inputs["Color"].links:
self.blender_scene.world.node_tree.links.remove(link)
self.ambient_node.inputs["Color"].default_value = color
def _set_ambient_light_hdri(self, hdri_filepath=None, hdri_rotation=(0., 0., 0.), strength=1.0):
# ensure hdri_node is connected
self.blender_scene.world.node_tree.links.new(self.ambient_hdri_node.outputs.get("Color"),
self.ambient_node.inputs.get("Color"))
self.ambient_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.ambient_node.inputs["Strength"].default_value = strength
self.illum_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def _set_background_color(self, color=core.get_color("black")):
# disconnect incoming links from hdri node (if any)
for link in self.bg_node.inputs["Color"].links:
self.blender_scene.world.node_tree.links.remove(link)
# set color
self.bg_node.inputs["Color"].default_value = color
def _set_background_hdri(self, hdri_filepath=None, hdri_rotation=(0., 0., 0.)):
# ensure hdri_node is connected
self.blender_scene.world.node_tree.links.new(self.bg_hdri_node.outputs.get("Color"),
self.bg_node.inputs.get("Color"))
self.bg_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.bg_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def _convert_to_blender_object(self, asset: core.Asset):
return asset.linked_objects[self]
class AttributeSetter:
"""TODO(klausg): provide high-level description of observer implementation."""
def __init__(self, blender_obj, attribute: str, converter=None):
self.blender_obj = blender_obj
self.attribute = attribute
self.converter = converter
def __call__(self, change):
# change = {"type": "change", "new": (1., 1., 1.), "owner": obj}
new_value = change.new
if isinstance(new_value, UndefinedAsset):
return # ignore any Undefined values
if self.converter:
# use converter if given
new_value = self.converter(new_value)
setattr(self.blender_obj, self.attribute, new_value)
class KeyframeSetter:
def __init__(self, blender_obj, attribute_path: str):
self.attribute_path = attribute_path
self.blender_obj = blender_obj
def __call__(self, change):
self.blender_obj.keyframe_insert(self.attribute_path, frame=change.frame)
def register_object3d_setters(obj, blender_obj):
assert isinstance(obj, core.Object3D), f"{obj!r} is not an Object3D"
obj.observe(AttributeSetter(blender_obj, "location"), "position")
obj.observe(KeyframeSetter(blender_obj, "location"), "position", type="keyframe")
obj.observe(AttributeSetter(blender_obj, "rotation_quaternion"), "quaternion")
obj.observe(KeyframeSetter(blender_obj, "rotation_quaternion"), "quaternion", type="keyframe")
|
import os
import webbrowser
def pytest_unconfigure(config):
htmlcov_path = os.path.join("htmlcov", "index.html")
if 'html' in config.option.cov_report and os.path.isfile(htmlcov_path):
try:
webbrowser.open_new_tab(htmlcov_path)
except webbrowser.Error:
pass
conftest: Check if coverage is installed
Earlier, when pytest-cov is not installed an error used to show
after pytest was run. This ensures that doesn't happen anymore.
import os
import webbrowser
def pytest_unconfigure(config):
htmlcov_path = os.path.join("htmlcov", "index.html")
if (hasattr(config.option, "cov_report") and
'html' in config.option.cov_report and
os.path.isfile(htmlcov_path)):
try:
webbrowser.open_new_tab(htmlcov_path)
except webbrowser.Error:
pass
|
import geopy
from django.views import generic
from django.conf import settings
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.contrib.auth import get_user_model, authenticate, login
from django.template.loader import render_to_string, get_template
from django.core.mail import EmailMessage
from django.utils.translation import ugettext_lazy as _
from braces.views import AnonymousRequiredMixin, LoginRequiredMixin
from .models import Profile, Place, Phone, Condition
from .forms import (UserRegistrationForm, ProfileSettingsForm,
ProfileForm, PlaceForm, PlaceCreateForm, PhoneForm, AuthorizeUserForm,
FamilyMemberForm, FamilyMemberCreateForm)
from .utils import extend_bbox
User = get_user_model()
lang = settings.LANGUAGE_CODE
class DeleteMixin(object):
def delete(self, request, *args, **kwargs):
"""
Set the flag 'deleted' to True on the object
and then redirects to the success URL
"""
self.object = self.get_object()
self.object.deleted = True
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class HomeView(generic.TemplateView):
template_name = 'hosting/home.html'
home = HomeView.as_view()
class RegisterView(AnonymousRequiredMixin, generic.CreateView):
model = User
template_name = 'registration/register.html'
form_class = UserRegistrationForm
success_url = reverse_lazy('profile_create')
authenticated_redirect_url = reverse_lazy('profile_detail')
def form_valid(self, form):
self.object = form.save()
# Keeping this on ice; it interferes with the inline login, probably by wiping the session vars.
result = super(RegisterView, self).form_valid(form)
# Log in user
user = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
login(self.request, user)
messages.success(self.request, "You are logged in.")
return result
register = RegisterView.as_view()
class ProfileCreateView(LoginRequiredMixin, generic.CreateView):
model = Profile
form_class = ProfileForm
success_url = reverse_lazy('profile_detail')
def get_form(self, form_class):
user = self.request.user
return form_class(user=user, **self.get_form_kwargs())
profile_create = ProfileCreateView.as_view()
class ProfileUpdateView(LoginRequiredMixin, generic.UpdateView):
form_class = ProfileForm
success_url = reverse_lazy('profile_detail')
def get_object(self):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def get_form(self, form_class):
user = self.request.user
return form_class(user=user, **self.get_form_kwargs())
profile_update = ProfileUpdateView.as_view()
class ProfileDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
form_class = ProfileForm
success_url = reverse_lazy('logout')
def get_object(self):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def delete(self, request, *args, **kwargs):
"""
Set the flag 'deleted' to True on the profile
and some associated objects,
desactivate the linked user,
and then redirects to the success URL
"""
for place in self.object.places.all():
place.deleted = True
place.save()
for phone in self.object.phones.all():
phone.deleted = True
phone.save()
for member in self.family_members:
member.deleted = True
member.save()
self.object.user.is_active = False
self.object.user.save()
return super(ProfileDeleteView, self).delete(request, *args, **kwargs)
profile_delete = ProfileDeleteView.as_view()
class ProfileDetailView(LoginRequiredMixin, generic.DetailView):
model = Profile
success_url = reverse_lazy('logout')
def get_object(self, queryset=None):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def get_context_data(self, **kwargs):
context = super(ProfileDetailView, self).get_context_data(**kwargs)
context['places'] = self.object.owned_places.all().filter(deleted=False)
context['phones'] = self.object.phones.all().filter(deleted=False)
return context
profile_detail = ProfileDetailView.as_view()
class ProfileSettingsView(LoginRequiredMixin, generic.UpdateView):
model = User
template_name = 'hosting/base_form.html'
form_class = ProfileSettingsForm
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
return self.request.user
profile_settings = ProfileSettingsView.as_view()
class PlaceCreateView(LoginRequiredMixin, generic.CreateView):
model = Place
form_class = PlaceCreateForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PlaceCreateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
place_create = PlaceCreateView.as_view()
class PlaceUpdateView(LoginRequiredMixin, generic.UpdateView):
success_url = reverse_lazy('profile_detail')
form_class = PlaceForm
def get_form_kwargs(self):
kwargs = super(PlaceUpdateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
def get_object(self, queryset=None):
pk = self.kwargs['pk']
profile = self.request.user.profile
return get_object_or_404(Place, pk=pk, owner=profile)
place_update = PlaceUpdateView.as_view()
class PlaceDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
pk = self.kwargs['pk']
profile = self.request.user.profile
return get_object_or_404(Place, pk=pk, owner=profile)
place_delete = PlaceDeleteView.as_view()
class PlaceDetailView(generic.DetailView):
model = Place
def get_context_data(self, **kwargs):
context = super(PlaceDetailView, self).get_context_data(**kwargs)
context['form'] = UserRegistrationForm
return context
place_detail = PlaceDetailView.as_view()
class PhoneCreateView(LoginRequiredMixin, generic.CreateView):
model = Phone
form_class = PhoneForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PhoneCreateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
phone_create = PhoneCreateView.as_view()
class PhoneUpdateView(LoginRequiredMixin, generic.UpdateView):
form_class = PhoneForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PhoneUpdateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
def get_object(self, queryset=None):
number = '+' + self.kwargs['num']
profile = self.request.user.profile
return get_object_or_404(Phone, number=number, profile=profile)
phone_update = PhoneUpdateView.as_view()
class PhoneDeleteView(LoginRequiredMixin, generic.DeleteView):
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
number = '+' + self.kwargs['num']
profile = self.request.user.profile
return get_object_or_404(Phone, number=number, profile=profile)
phone_delete = PhoneDeleteView.as_view()
class SearchView(generic.ListView):
model = Place
def get(self, request, *args, **kwargs):
self.query = request.GET.get('q')
if self.query:
try:
geocoder = geopy.geocoders.Nominatim(timeout=5)
self.location = geocoder.geocode(self.query, language=lang,
exactly_one=True, addressdetails=True)
except geopy.exc.GeocoderTimedOut:
self.location = None
self.timedout = True
return super(SearchView, self).get(request, *args, **kwargs)
def get_queryset(self):
"""Find location by bounding box. Filters also by country,
because some bbox for some countres are huge (e.g. France, USA).
"""
if not self.query or not self.location:
return Place.objects.none()
bbox = self.location.raw['boundingbox']
country_code = self.location.raw['address'].get('country_code')
lats, lngs = bbox[:2], bbox[2:]
qs = Place.objects.filter(available=True)
qs = qs.filter(latitude__range=lats, longitude__range=lngs)
qs = qs.filter(country=country_code.upper()) if country_code else qs
if not qs.count():
"""If there is no result, extends the bbox."""
bbox = extend_bbox(bbox)
lats, lngs = bbox[:2], bbox[2:]
qs = Place.objects.filter(available=True)
qs = qs.filter(latitude__range=lats, longitude__range=lngs)
return qs
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['query'] = self.query
if self.query:
context['location'] = getattr(self.location, 'raw', '')
context['timedout'] = getattr(self, 'timedout', False)
return context
search = SearchView.as_view()
class AuthorizeUserView(LoginRequiredMixin, generic.FormView):
"""Form view to add a user to the list of authorized users
for a place to be able to see more details."""
template_name = 'hosting/place_authorized_users.html'
form_class = AuthorizeUserForm
def dispatch(self, request, *args, **kwargs):
self.place = get_object_or_404(Place,
pk=self.kwargs['pk'],
owner=self.request.user)
return super(AuthorizeUserView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AuthorizeUserView, self).get_context_data(**kwargs)
context['place'] = self.place
return context
def form_valid(self, form):
user = get_object_or_404(User, username=form.cleaned_data['user'])
if user not in self.place.authorized_users.all():
self.place.authorized_users.add(user)
self.send_email(user, self.place)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('authorize_user', kwargs={'pk': self.kwargs['pk']})
def send_email(self, user, place):
subject = _("[Pasporta Servo] You received an Authorization")
to = [user.email]
email_template = 'hosting/emails/new_authorization.txt'
email_context = {
'user_first_name': user.profile.first_name or user.username,
'owner_name': place.owner.full_name,
'place_id': place.pk,
'site_domain': self.request.get_host(),
'site_name': settings.SITE_NAME,
}
message = render_to_string(email_template, email_context)
EmailMessage(subject, message, to=to).send()
authorize_user = AuthorizeUserView.as_view()
class AuthorizeUserLinkView(LoginRequiredMixin, generic.View):
"""Add (or remove if present) a user to the list of authorized users
for a place to be able to see more details."""
def dispatch(self, request, *args, **kwargs):
self.user = get_object_or_404(User, username=kwargs['user'])
self.place = get_object_or_404(Place, pk=kwargs['pk'], owner=request.user)
if self.user in self.place.authorized_users.all():
self.place.authorized_users.remove(self.user)
else:
self.place.authorized_users.add(self.user)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('authorized_users', kwargs={'pk': self.kwargs['pk']})
authorize_user_link = AuthorizeUserLinkView.as_view()
class FamilyMemberCreateView(LoginRequiredMixin, generic.CreateView):
model = Profile
form_class = FamilyMemberCreateForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(FamilyMemberCreate, self).get_form_kwargs()
kwargs['place'] = get_object_or_404(Place, pk=self.kwargs['pk'])
return kwargs
family_member_create = FamilyMemberCreateView.as_view()
class FamilyMemberAddMeView(LoginRequiredMixin, generic.FormView):
success_url = reverse_lazy('profile_detail')
def post(self, request, *args, **kwargs):
place = get_object_or_404(Place, pk=kwargs['place_pk'])
place.family_members.add(request.user.profile)
return HttpResponseRedirect(self.success_url)
family_member_add_me = FamilyMemberAddMeView.as_view()
class FamilyMemberUpdateView(LoginRequiredMixin, generic.UpdateView):
model = Profile
form_class = FamilyMemberForm
success_url = reverse_lazy('profile_detail')
family_member_update = FamilyMemberUpdateView.as_view()
class FamilyMemberRemoveView(LoginRequiredMixin, generic.DeleteView):
"""Remove the family member for the Place."""
model = Profile
template_name = 'hosting/family_member_confirm_delete.html'
success_url = reverse_lazy('profile_detail')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.place = get_object_or_404(Place, pk=self.kwargs['place_pk'])
self.place.family_members.remove(self.object)
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(FamilyMemberRemoveView, self).get_context_data(**kwargs)
context['place'] = get_object_or_404(Place, pk=self.kwargs['place_pk'])
return context
family_member_remove = FamilyMemberRemoveView.as_view()
class FamilyMemberDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
"""Remove the family member for the Place and delete it."""
model = Profile
success_url = reverse_lazy('profile_detail')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.place = get_object_or_404(Place, pk=self.kwargs['place_pk'])
self.place.family_members.remove(self.object)
return super(FamilyMemberDeleteView, self).delete(request, *args, **kwargs)
family_member_delete = FamilyMemberDeleteView.as_view()
Fixed FamilyMemberCreateView.
import geopy
from django.views import generic
from django.conf import settings
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.contrib.auth import get_user_model, authenticate, login
from django.template.loader import render_to_string, get_template
from django.core.mail import EmailMessage
from django.utils.translation import ugettext_lazy as _
from braces.views import AnonymousRequiredMixin, LoginRequiredMixin
from .models import Profile, Place, Phone, Condition
from .forms import (UserRegistrationForm, ProfileSettingsForm,
ProfileForm, PlaceForm, PlaceCreateForm, PhoneForm, AuthorizeUserForm,
FamilyMemberForm, FamilyMemberCreateForm)
from .utils import extend_bbox
User = get_user_model()
lang = settings.LANGUAGE_CODE
class DeleteMixin(object):
def delete(self, request, *args, **kwargs):
"""
Set the flag 'deleted' to True on the object
and then redirects to the success URL
"""
self.object = self.get_object()
self.object.deleted = True
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class HomeView(generic.TemplateView):
template_name = 'hosting/home.html'
home = HomeView.as_view()
class RegisterView(AnonymousRequiredMixin, generic.CreateView):
model = User
template_name = 'registration/register.html'
form_class = UserRegistrationForm
success_url = reverse_lazy('profile_create')
authenticated_redirect_url = reverse_lazy('profile_detail')
def form_valid(self, form):
self.object = form.save()
# Keeping this on ice; it interferes with the inline login, probably by wiping the session vars.
result = super(RegisterView, self).form_valid(form)
# Log in user
user = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
login(self.request, user)
messages.success(self.request, "You are logged in.")
return result
register = RegisterView.as_view()
class ProfileCreateView(LoginRequiredMixin, generic.CreateView):
model = Profile
form_class = ProfileForm
success_url = reverse_lazy('profile_detail')
def get_form(self, form_class):
user = self.request.user
return form_class(user=user, **self.get_form_kwargs())
profile_create = ProfileCreateView.as_view()
class ProfileUpdateView(LoginRequiredMixin, generic.UpdateView):
form_class = ProfileForm
success_url = reverse_lazy('profile_detail')
def get_object(self):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def get_form(self, form_class):
user = self.request.user
return form_class(user=user, **self.get_form_kwargs())
profile_update = ProfileUpdateView.as_view()
class ProfileDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
form_class = ProfileForm
success_url = reverse_lazy('logout')
def get_object(self):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def delete(self, request, *args, **kwargs):
"""
Set the flag 'deleted' to True on the profile
and some associated objects,
desactivate the linked user,
and then redirects to the success URL
"""
for place in self.object.places.all():
place.deleted = True
place.save()
for phone in self.object.phones.all():
phone.deleted = True
phone.save()
for member in self.family_members:
member.deleted = True
member.save()
self.object.user.is_active = False
self.object.user.save()
return super(ProfileDeleteView, self).delete(request, *args, **kwargs)
profile_delete = ProfileDeleteView.as_view()
class ProfileDetailView(LoginRequiredMixin, generic.DetailView):
model = Profile
success_url = reverse_lazy('logout')
def get_object(self, queryset=None):
return get_object_or_404(Profile, user=self.request.user, deleted=False)
def get_context_data(self, **kwargs):
context = super(ProfileDetailView, self).get_context_data(**kwargs)
context['places'] = self.object.owned_places.all().filter(deleted=False)
context['phones'] = self.object.phones.all().filter(deleted=False)
return context
profile_detail = ProfileDetailView.as_view()
class ProfileSettingsView(LoginRequiredMixin, generic.UpdateView):
model = User
template_name = 'hosting/base_form.html'
form_class = ProfileSettingsForm
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
return self.request.user
profile_settings = ProfileSettingsView.as_view()
class PlaceCreateView(LoginRequiredMixin, generic.CreateView):
model = Place
form_class = PlaceCreateForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PlaceCreateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
place_create = PlaceCreateView.as_view()
class PlaceUpdateView(LoginRequiredMixin, generic.UpdateView):
success_url = reverse_lazy('profile_detail')
form_class = PlaceForm
def get_form_kwargs(self):
kwargs = super(PlaceUpdateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
def get_object(self, queryset=None):
pk = self.kwargs['pk']
profile = self.request.user.profile
return get_object_or_404(Place, pk=pk, owner=profile)
place_update = PlaceUpdateView.as_view()
class PlaceDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
pk = self.kwargs['pk']
profile = self.request.user.profile
return get_object_or_404(Place, pk=pk, owner=profile)
place_delete = PlaceDeleteView.as_view()
class PlaceDetailView(generic.DetailView):
model = Place
def get_context_data(self, **kwargs):
context = super(PlaceDetailView, self).get_context_data(**kwargs)
context['form'] = UserRegistrationForm
return context
place_detail = PlaceDetailView.as_view()
class PhoneCreateView(LoginRequiredMixin, generic.CreateView):
model = Phone
form_class = PhoneForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PhoneCreateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
phone_create = PhoneCreateView.as_view()
class PhoneUpdateView(LoginRequiredMixin, generic.UpdateView):
form_class = PhoneForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(PhoneUpdateView, self).get_form_kwargs()
kwargs['profile'] = self.request.user.profile
return kwargs
def get_object(self, queryset=None):
number = '+' + self.kwargs['num']
profile = self.request.user.profile
return get_object_or_404(Phone, number=number, profile=profile)
phone_update = PhoneUpdateView.as_view()
class PhoneDeleteView(LoginRequiredMixin, generic.DeleteView):
success_url = reverse_lazy('profile_detail')
def get_object(self, queryset=None):
number = '+' + self.kwargs['num']
profile = self.request.user.profile
return get_object_or_404(Phone, number=number, profile=profile)
phone_delete = PhoneDeleteView.as_view()
class SearchView(generic.ListView):
model = Place
def get(self, request, *args, **kwargs):
self.query = request.GET.get('q')
if self.query:
try:
geocoder = geopy.geocoders.Nominatim(timeout=5)
self.location = geocoder.geocode(self.query, language=lang,
exactly_one=True, addressdetails=True)
except geopy.exc.GeocoderTimedOut:
self.location = None
self.timedout = True
return super(SearchView, self).get(request, *args, **kwargs)
def get_queryset(self):
"""Find location by bounding box. Filters also by country,
because some bbox for some countres are huge (e.g. France, USA).
"""
if not self.query or not self.location:
return Place.objects.none()
bbox = self.location.raw['boundingbox']
country_code = self.location.raw['address'].get('country_code')
lats, lngs = bbox[:2], bbox[2:]
qs = Place.objects.filter(available=True)
qs = qs.filter(latitude__range=lats, longitude__range=lngs)
qs = qs.filter(country=country_code.upper()) if country_code else qs
if not qs.count():
"""If there is no result, extends the bbox."""
bbox = extend_bbox(bbox)
lats, lngs = bbox[:2], bbox[2:]
qs = Place.objects.filter(available=True)
qs = qs.filter(latitude__range=lats, longitude__range=lngs)
return qs
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['query'] = self.query
if self.query:
context['location'] = getattr(self.location, 'raw', '')
context['timedout'] = getattr(self, 'timedout', False)
return context
search = SearchView.as_view()
class AuthorizeUserView(LoginRequiredMixin, generic.FormView):
"""Form view to add a user to the list of authorized users
for a place to be able to see more details."""
template_name = 'hosting/place_authorized_users.html'
form_class = AuthorizeUserForm
def dispatch(self, request, *args, **kwargs):
self.place = get_object_or_404(Place,
pk=self.kwargs['pk'],
owner=self.request.user)
return super(AuthorizeUserView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AuthorizeUserView, self).get_context_data(**kwargs)
context['place'] = self.place
return context
def form_valid(self, form):
user = get_object_or_404(User, username=form.cleaned_data['user'])
if user not in self.place.authorized_users.all():
self.place.authorized_users.add(user)
self.send_email(user, self.place)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('authorize_user', kwargs={'pk': self.kwargs['pk']})
def send_email(self, user, place):
subject = _("[Pasporta Servo] You received an Authorization")
to = [user.email]
email_template = 'hosting/emails/new_authorization.txt'
email_context = {
'user_first_name': user.profile.first_name or user.username,
'owner_name': place.owner.full_name,
'place_id': place.pk,
'site_domain': self.request.get_host(),
'site_name': settings.SITE_NAME,
}
message = render_to_string(email_template, email_context)
EmailMessage(subject, message, to=to).send()
authorize_user = AuthorizeUserView.as_view()
class AuthorizeUserLinkView(LoginRequiredMixin, generic.View):
"""Add (or remove if present) a user to the list of authorized users
for a place to be able to see more details."""
def dispatch(self, request, *args, **kwargs):
self.user = get_object_or_404(User, username=kwargs['user'])
self.place = get_object_or_404(Place, pk=kwargs['pk'], owner=request.user)
if self.user in self.place.authorized_users.all():
self.place.authorized_users.remove(self.user)
else:
self.place.authorized_users.add(self.user)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('authorized_users', kwargs={'pk': self.kwargs['pk']})
authorize_user_link = AuthorizeUserLinkView.as_view()
class FamilyMemberCreateView(LoginRequiredMixin, generic.CreateView):
model = Profile
form_class = FamilyMemberCreateForm
success_url = reverse_lazy('profile_detail')
def get_form_kwargs(self):
kwargs = super(FamilyMemberCreateView, self).get_form_kwargs()
kwargs['place'] = get_object_or_404(Place, pk=self.kwargs['pk'])
return kwargs
family_member_create = FamilyMemberCreateView.as_view()
class FamilyMemberAddMeView(LoginRequiredMixin, generic.FormView):
success_url = reverse_lazy('profile_detail')
def post(self, request, *args, **kwargs):
place = get_object_or_404(Place, pk=kwargs['place_pk'])
place.family_members.add(request.user.profile)
return HttpResponseRedirect(self.success_url)
family_member_add_me = FamilyMemberAddMeView.as_view()
class FamilyMemberUpdateView(LoginRequiredMixin, generic.UpdateView):
model = Profile
form_class = FamilyMemberForm
success_url = reverse_lazy('profile_detail')
family_member_update = FamilyMemberUpdateView.as_view()
class FamilyMemberRemoveView(LoginRequiredMixin, generic.DeleteView):
"""Remove the family member for the Place."""
model = Profile
template_name = 'hosting/family_member_confirm_delete.html'
success_url = reverse_lazy('profile_detail')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.place = get_object_or_404(Place, pk=self.kwargs['place_pk'])
self.place.family_members.remove(self.object)
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(FamilyMemberRemoveView, self).get_context_data(**kwargs)
context['place'] = get_object_or_404(Place, pk=self.kwargs['place_pk'])
return context
family_member_remove = FamilyMemberRemoveView.as_view()
class FamilyMemberDeleteView(LoginRequiredMixin, DeleteMixin, generic.DeleteView):
"""Remove the family member for the Place and delete it."""
model = Profile
success_url = reverse_lazy('profile_detail')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.place = get_object_or_404(Place, pk=self.kwargs['place_pk'])
self.place.family_members.remove(self.object)
return super(FamilyMemberDeleteView, self).delete(request, *args, **kwargs)
family_member_delete = FamilyMemberDeleteView.as_view()
|
from __future__ import print_function
import sys
import inspect
import numpy as np
from warnings import warn
from datetime import datetime
from netCDF4 import Dataset, MFDataset, num2date
class ncwrite():
"""
Save data in a dict to a netCDF file.
Notes
-----
1. Unlimited dimension (None) can only be time and MUST be the 1st
dimension in the variable dimensions list (or tuple).
2. Variable dimensions HAVE to BE lists ['time']
Parameters
----------
data : dict
Dict of dicts with keys 'dimension', 'variables' and
'global_attributes'.
file : str
Path to output file name.
Author(s)
---------
Stephane Saux-Picart
Pierre Cazenave
Examples
--------
>>> lon = np.arange(-10, 10)
>>> lat = np.arange(50, 60)
>>> Times = ['2010-02-11 00:10:00.000000', '2010-02-21 00:10:00.000000']
>>> p90 = np.sin(400).reshape(20, 10, 2)
>>> data = {}
>>> data['dimensions'] = {
... 'lat': np.size(lat),
... 'lon':np.size(lon),
... 'time':np.shape(timeStr)[1],
... 'DateStrLen':26
... }
>>> data['variables'] = {
... 'latitude':{'data':lat,
... 'dimensions':['lat'],
... 'attributes':{'units':'degrees north'}
... },
... 'longitude':{
... 'data':lon,
... 'dimensions':['lon'],
... 'attributes':{'units':'degrees east'}
... },
... 'Times':{
... 'data':timeStr,
... 'dimensions':['time','DateStrLen'],
... 'attributes':{'units':'degrees east'},
... 'fill_value':-999.0,
... 'data_type':'c'
... },
... 'p90':{'data':data,
... 'dimensions':['lat','lon'],
... 'attributes':{'units':'mgC m-3'}}}
... data['global attributes'] = {
... 'description': 'P90 chlorophyll',
... 'source':'netCDF3 python',
... 'history':'Created {}'.format(time.ctime(time.time()))
... }
>>> ncwrite(data, 'test.nc')
"""
def __init__(self, input_dict, filename_out, Quiet=False):
self.filename_out = filename_out
self.input_dict = input_dict
self.Quiet = Quiet
self.createNCDF()
def createNCDF(self):
"""
Function to create and write the data to the specified netCDF file.
"""
rootgrp = Dataset(self.filename_out, 'w', format='NETCDF3_CLASSIC', clobber=True)
# Create dimensions.
if 'dimensions' in self.input_dict:
for k, v in self.input_dict['dimensions'].iteritems():
rootgrp.createDimension(k, v)
else:
if not self.Quiet:
print('No netCDF created:')
print(' No dimension key found (!! has to be \"dimensions\"!!!)')
return()
# Create global attributes.
if 'global attributes' in self.input_dict:
for k, v in self.input_dict['global attributes'].iteritems():
rootgrp.setncattr(k, v)
else:
if not self.Quiet:
print(' No global attribute key found (!! has to be \"global attributes\"!!!)')
# Create variables.
for k, v in self.input_dict['variables'].iteritems():
dims = self.input_dict['variables'][k]['dimensions']
data = v['data']
# Create correct data type if provided
if 'data_type' in self.input_dict['variables'][k]:
data_type = self.input_dict['variables'][k]['data_type']
else:
data_type = 'f4'
# Check whether we've been given a fill value.
if 'fill_value' in self.input_dict['variables'][k]:
fill_value = self.input_dict['variables'][k]['fill_value']
else:
fill_value = None
# Create ncdf variable
if not self.Quiet:
print(' Creating variable: {} {} {}'.format(k, data_type, dims))
var = rootgrp.createVariable(k, data_type, dims, fill_value=fill_value)
if len(dims) > np.ndim(data):
# If number of dimensions given to netCDF is greater than the
# number of dimension of the data, then fill the netCDF
# variable accordingly.
if 'time' in dims:
# Check for presence of time dimension (which can be
# unlimited variable: defined by None).
try:
var[:] = data
except IndexError:
raise(IndexError(('Supplied data shape {} does not match the specified'
' dimensions {}, for variable \'{}\'.'.format(data.shape, var.shape, k))))
else:
if not self.Quiet:
print('Problem in the number of dimensions')
else:
try:
var[:] = data
except IndexError:
raise(IndexError(('Supplied data shape {} does not match the specified'
' dimensions {}, for variable \'{}\'.'.format(data.shape, var.shape, k))))
# Create attributes for variables
if 'attributes' in self.input_dict['variables'][k]:
for ka, va in self.input_dict['variables'][k]['attributes'].iteritems():
var.setncattr(ka, va)
rootgrp.close()
def ncread(file, vars=None, dims=False, noisy=False, atts=False, datetimes=False):
"""
Read in the FVCOM results file and spit out numpy arrays for each of the
variables specified in the vars list.
Optionally specify a dict with keys whose names match the dimension names
in the netCDF file and whose values are strings specifying alternative
ranges or lists of indices. For example, to extract the first hundred time
steps, supply dims as:
dims = {'time':'0:100'}
To extract the first, 400th and 10,000th values of any array with nodes:
dims = {'node':'[0, 3999, 9999]'}
Any dimension not given in dims will be extracted in full.
Specify atts=True to extract the variable attributes. Set datetimes=True
to convert the FVCOM Modified Julian Day values to python datetime objects.
Parameters
----------
file : str, list
If a string, the full path to an FVCOM netCDF output file. If a list,
a series of files to be loaded. Data will be concatenated into a single
dict.
vars : list, optional
List of variable names to be extracted. If omitted, all variables are
returned.
dims : dict, optional
Dict whose keys are dimensions and whose values are a string of either
a range (e.g. {'time':'0:100'}) or a list of individual indices (e.g.
{'time':'[0, 1, 80, 100]'}). Slicing is supported (::5 for every fifth
value).
noisy : bool, optional
Set to True to enable verbose output.
atts : bool, optional
Set to True to enable output of the attributes (defaults to False).
datetimes : bool, optional
Set to True to convert FVCOM Modified Julian Days to Python datetime
objects (creates a new `datetime' key in the output dict. Only
applies if `vars' includes either the `Times' or `time' variables.
Note: if FVCOM has been run with single precision output, then the
conversion of the `time' values to a datetime object suffers rounding
errors. It's best to either run FVCOM in double precision or specify
only the `Times' data in the `vars' list.
Returns
-------
FVCOM : dict
Dict of data extracted from the netCDF file. Keys are those given in
vars and the data are stored as ndarrays. If `datetimes' is True,
then this also includes a `datetime' key in which is the FVCOM
Modified Julian Day time series converted to Python datetime objects.
attributes : dict, optional
If atts=True, returns the attributes as a dict for each
variable in vars. The key `dims' contains the array dimensions (each
variable contains the names of its dimensions) as well as the shape of
the dimensions defined in the netCDF file. The key `global' contains
the global attributes.
See Also
--------
read_probes : read in FVCOM ASCII probes output files.
"""
# Set to True when we've converted from Modified Julian Day so we don't
# end up doing the conversion twice, once for `Times' and again for
# `time' if both variables have been requested in `vars'.
done_datetimes = False
# Check whether we'll be able to fulfill the datetime request.
if datetimes and vars and not list(set(vars) & set(('Times', 'time'))):
raise ValueError("Conversion from Modified Julian Day to python "
"datetimes has been requested but no time variable "
"(`Times' or `time') has been requested in vars.")
# If we have a list, assume it's lots of files and load them all.
if isinstance(file, list):
try:
try:
rootgrp = MFDataset(file, 'r')
except IOError as msg:
raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))
except:
# Try aggregating along a 'time' dimension (for POLCOMS,
# for example).
try:
rootgrp = MFDataset(file, 'r', aggdim='time')
except IOError as msg:
raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))
else:
rootgrp = Dataset(file, 'r')
# Create a dict of the dimension names and their current sizes
read_dims = {}
for key, var in list(rootgrp.dimensions.items()):
# Make the dimensions ranges so we can use them to extract all the
# values.
read_dims[key] = '0:{}'.format(str(len(var)))
# Compare the dimensions in the netCDF file with those provided. If we've
# been given a dict of dimensions which differs from those in the netCDF
# file, then use those.
if dims:
commonKeys = set(read_dims).intersection(list(dims.keys()))
for k in commonKeys:
read_dims[k] = dims[k]
if noisy:
print("File format: {}".format(rootgrp.file_format))
if not vars:
vars = iter(list(rootgrp.variables.keys()))
FVCOM = {}
# Save the dimensions in the attributes dict.
if atts:
attributes = {}
attributes['dims'] = read_dims
attributes['global'] = {}
for g in rootgrp.ncattrs():
attributes['global'][g] = getattr(rootgrp, g)
for key, var in list(rootgrp.variables.items()):
if noisy:
print('Found {}'.format(key), end=' ')
sys.stdout.flush()
if key in vars:
vDims = rootgrp.variables[key].dimensions
toExtract = [read_dims[d] for d in vDims]
# If we have no dimensions, we must have only a single value, in
# which case set the dimensions to empty and append the function to
# extract the value.
if not toExtract:
toExtract = '.getValue()'
# Thought I'd finally figured out how to replace the eval approach,
# but I still can't get past the indexing needed to be able to
# subset the data.
# FVCOM[key] = rootgrp.variables.get(key)[0:-1]
# I know, I know, eval() is evil.
getData = 'rootgrp.variables[\'{}\']{}'.format(key, str(toExtract).replace('\'', ''))
FVCOM[key] = eval(getData)
# Get all attributes for this variable.
if atts:
attributes[key] = {}
# Grab all the attributes for this variable.
for varatt in rootgrp.variables[key].ncattrs():
attributes[key][varatt] = rootgrp.variables[key].getncattr(varatt)
if datetimes and key in ('Times', 'time') and not done_datetimes:
# Convert the time data to datetime objects. How we do this
# depends on which we hit first - `Times' or `time'. For the
# former, we need to parse the strings, for the latter we can
# leverage num2date from the netCDF4 module and use the time
# units attribute.
if key == 'Times':
try:
FVCOM['datetime'] = [datetime.strptime(''.join(i), '%Y-%m-%dT%H:%M:%S.%f') for i in FVCOM[key].astype(str)]
except ValueError:
# Try a different format before bailing out.
FVCOM['datetime'] = [datetime.strptime(''.join(i), '%Y/%m/%d %H:%M:%S.%f') for i in FVCOM[key].astype(str)]
done_datetimes = True
elif key == 'time':
FVCOM['datetime'] = num2date(FVCOM[key],
rootgrp.variables[key].units)
done_datetimes = True
if noisy:
if len(str(toExtract)) < 60:
print('(extracted {})'.format(str(toExtract).replace('\'', '')))
else:
print('(extracted given indices)')
elif noisy:
print()
# Close the open file.
rootgrp.close()
if atts:
return FVCOM, attributes
else:
return FVCOM
def read_probes(files, noisy=False, locations=False):
"""
Read in FVCOM probes output files. Reads both 1 and 2D outputs. Currently
only sensible to import a single station with this function since all data
is output in a single array.
Parameters
----------
files : list, tuple
List of file paths to load.
noisy : bool, optional
Set to True to enable verbose output.
locations : bool, optional
Set to True to export position and depth data for the sites.
Returns
-------
times : ndarray
Modified Julian Day times for the extracted time series.
values : ndarray
Array of the extracted time series values.
positions : ndarray, optional
If locations has been set to True, return an array of the positions
(lon, lat, depth) for each site.
See Also
--------
ncread : read in FVCOM netCDF output.
TODO
----
Add support to multiple sites with a single call. Perhaps returning a dict
with the keys based on the file name is most sensible here?
"""
if len(files) == 0:
raise Exception('No files provided.')
if not (isinstance(files, list) or isinstance(files, tuple)):
files = [files]
for i, file in enumerate(files):
if noisy:
print('Loading file {} of {}...'.format(i + 1, len(files)), end=' ')
# Get the header so we can extract the position data.
with open(file, 'r') as f:
# Latitude and longitude is stored at line 15 (14 in sPpython
# counting). Eastings and northings are at 13 (12 in Python
# indexing).
lonlatz = [float(pos.strip()) for pos in filter(None, f.readlines()[14].split(' '))]
data = np.genfromtxt(file, skip_header=18)
if i == 0:
try:
times = data[:, 0]
values = data[:, 1:]
except IndexError:
times = data[0]
values = data[1:]
positions = lonlatz
else:
times = np.hstack((times, data[:, 0]))
values = np.vstack((values, data[:, 1:]))
positions = np.vstack((positions, lonlatz))
if noisy:
print('done.')
# It may be the case that the files have been supplied in a random order,
# so sort the values by time here.
sidx = np.argsort(times)
times = times[sidx]
values = values[sidx, ...] # support both 1 and 2D data
if locations:
return times, values, positions
else:
return times, values
def write_probes(file, mjd, timeseries, datatype, site, depth, sigma=(-1, -1), lonlat=(0, 0), xy=(0, 0), datestr=None):
"""
Writes out an FVCOM-formatted time series at a specific location.
Parameters
----------
mjd : ndarray, list, tuple
Date/time in Modified Julian Day
timeseries : ndarray
Data to write out (vector/array for 1D/2D). Shape should be
[time, values], where values can be 1D or 2D.
datatype : tuple, list, tuple
List with the metadata. Give the long name (e.g. `Temperature') and the
units (e.g. `Celsius').
site : str
Name of the output location.
depth : float
Depth at the time series location.
sigma : ndarray, list, tupel, optional
Start and end indices of the sigma layer of time series (if
depth-resolved, -1 otherwise).
lonlat : ndarray, list, optional
Coordinates (spherical)
xy : ndarray, list, optional
Coordinates (cartesian)
datestr : str, optional
Date at which the model was run (contained in the main FVCOM netCDF
output in the history global variable). If omitted, uses the current
local date and time. Format is ISO 8601 (YYYY-MM-DDThh:mm:ss.mmmmmm)
(e.g. 2005-05-25T12:09:56.553467).
See Also
--------
read_probes : read in FVCOM probes output.
ncread : read in FVCOM netCDF output.
"""
if not datestr:
datestr = datetime.now().isoformat()
day = np.floor(mjd[0])
usec = (mjd[0] - day) * 24.0 * 3600.0 * 1000.0 * 1000.0
with open(file, 'w') as f:
# Write the header.
f.write('{} at {}\n'.format(datatype[0], site))
f.write('{} ({})\n'.format(datatype[0], datatype[1]))
f.write('\n')
f.write(' !========MODEL START DATE==========\n')
f.write(' ! Day # : 57419\n'.format(day))
f.write(' ! MicroSecond #: {}\n'.format(usec))
f.write(' ! (Date Time={}Z)\n'.format(datestr))
f.write(' !==========================\n')
f.write(' \n')
f.write(' K1 K2\n'.format())
f.write(' {} {}\n'.format(*sigma))
f.write(' X(M) Y(M) DEPTH(M)\n')
f.write(' {:.3f} {:.3f} {z:.3f}\n'.format(*xy, z=depth))
f.write(' LON LAT DEPTH(M)\n')
f.write(' {:.3f} {:.3f} {z:.3f}\n'.format(*lonlat, z=depth))
f.write('\n')
f.write(' DATA FOLLOWS:\n')
f.write(' Time(days) Data...\n')
# Generate the line format based on the data we've got.
if np.max(sigma) < 0 or np.min(sigma) - np.max(sigma) == 0:
# 1D data, so simple time, value format.
fmt = '{:.5f} {:.3f}\n'
else:
# 2D data, so build the string to match the vertical layers.
fmt = '{:.5f} '
for sig in range(np.shape(timeseries)[-1]):
fmt += '{:.3f} '
fmt = '{}\n'.format(fmt.strip())
# Dump the data (this may be slow).
for line in np.column_stack((mjd, timeseries)):
f.write(fmt.format(*line))
def elems2nodes(elems, tri, nvert=None):
"""
Calculate a nodal value based on the average value for the elements
of which it a part. This necessarily involves an average, so the
conversion from nodes2elems and elems2nodes is not reversible.
Parameters
----------
elems : ndarray
Array of unstructured grid element values to move to the element
nodes.
tri : ndarray
Array of shape (nelem, 3) comprising the list of connectivity
for each element.
nvert : int, optional
Number of nodes (vertices) in the unstructured grid.
Returns
-------
nodes : ndarray
Array of values at the grid nodes.
"""
if not nvert:
nvert = np.max(tri) + 1
count = np.zeros(nvert, dtype=int)
# Deal with 1D and 2D element arrays separately
if np.ndim(elems) == 1:
nodes = np.zeros(nvert)
for i, indices in enumerate(tri):
n0, n1, n2 = indices
nodes[n0] = nodes[n0] + elems[i]
nodes[n1] = nodes[n1] + elems[i]
nodes[n2] = nodes[n2] + elems[i]
count[n0] = count[n0] + 1
count[n1] = count[n1] + 1
count[n2] = count[n2] + 1
elif np.ndim(elems) > 1:
# Horrible hack alert to get the output array shape for multiple
# dimensions.
nodes = np.zeros((list(np.shape(elems)[:-1]) + [nvert]))
for i, indices in enumerate(tri):
n0, n1, n2 = indices
nodes[..., n0] = nodes[..., n0] + elems[..., i]
nodes[..., n1] = nodes[..., n1] + elems[..., i]
nodes[..., n2] = nodes[..., n2] + elems[..., i]
count[n0] = count[n0] + 1
count[n1] = count[n1] + 1
count[n2] = count[n2] + 1
# Now calculate the average for each node based on the number of
# elements of which it is a part.
nodes /= count
return nodes
def nodes2elems(nodes, tri):
"""
Calculate a element centre value based on the average value for the
nodes from which it is formed. This necessarily involves an average,
so the conversion from nodes2elems and elems2nodes is not
necessarily reversible.
Parameters
----------
nodes : ndarray
Array of unstructured grid node values to move to the element
centres.
tri : ndarray
Array of shape (nelem, 3) comprising the list of connectivity
for each element.
Returns
-------
elems : ndarray
Array of values at the grid nodes.
"""
if np.ndim(nodes) == 1:
elems = nodes[tri].mean(axis=-1)
elif np.ndim(nodes) == 2:
elems = nodes[..., tri].mean(axis=-1)
else:
raise Exception('Too many dimensions (maximum of two)')
return elems
# For backwards compatibility.
def readFVCOM(file, varList=None, clipDims=False, noisy=False, atts=False):
warn('{} is deprecated. Use ncread instead.'.format(inspect.stack()[0][3]))
F = ncread(file, vars=varList, dims=clipDims, noisy=noisy, atts=atts)
return F
def readProbes(*args, **kwargs):
warn('{} is deprecated. Use read_probes instead.'.format(inspect.stack()[0][3]))
return read_probes(*args, **kwargs)
def writeProbes(*args, **kwargs):
warn('{} is deprecated. Use write_probes instead.'.format(inspect.stack()[0][3]))
return write_probes(*args, **kwargs)
Fix single time probes files again.
from __future__ import print_function
import sys
import inspect
import numpy as np
from warnings import warn
from datetime import datetime
from netCDF4 import Dataset, MFDataset, num2date
class ncwrite():
"""
Save data in a dict to a netCDF file.
Notes
-----
1. Unlimited dimension (None) can only be time and MUST be the 1st
dimension in the variable dimensions list (or tuple).
2. Variable dimensions HAVE to BE lists ['time']
Parameters
----------
data : dict
Dict of dicts with keys 'dimension', 'variables' and
'global_attributes'.
file : str
Path to output file name.
Author(s)
---------
Stephane Saux-Picart
Pierre Cazenave
Examples
--------
>>> lon = np.arange(-10, 10)
>>> lat = np.arange(50, 60)
>>> Times = ['2010-02-11 00:10:00.000000', '2010-02-21 00:10:00.000000']
>>> p90 = np.sin(400).reshape(20, 10, 2)
>>> data = {}
>>> data['dimensions'] = {
... 'lat': np.size(lat),
... 'lon':np.size(lon),
... 'time':np.shape(timeStr)[1],
... 'DateStrLen':26
... }
>>> data['variables'] = {
... 'latitude':{'data':lat,
... 'dimensions':['lat'],
... 'attributes':{'units':'degrees north'}
... },
... 'longitude':{
... 'data':lon,
... 'dimensions':['lon'],
... 'attributes':{'units':'degrees east'}
... },
... 'Times':{
... 'data':timeStr,
... 'dimensions':['time','DateStrLen'],
... 'attributes':{'units':'degrees east'},
... 'fill_value':-999.0,
... 'data_type':'c'
... },
... 'p90':{'data':data,
... 'dimensions':['lat','lon'],
... 'attributes':{'units':'mgC m-3'}}}
... data['global attributes'] = {
... 'description': 'P90 chlorophyll',
... 'source':'netCDF3 python',
... 'history':'Created {}'.format(time.ctime(time.time()))
... }
>>> ncwrite(data, 'test.nc')
"""
def __init__(self, input_dict, filename_out, Quiet=False):
self.filename_out = filename_out
self.input_dict = input_dict
self.Quiet = Quiet
self.createNCDF()
def createNCDF(self):
"""
Function to create and write the data to the specified netCDF file.
"""
rootgrp = Dataset(self.filename_out, 'w', format='NETCDF3_CLASSIC', clobber=True)
# Create dimensions.
if 'dimensions' in self.input_dict:
for k, v in self.input_dict['dimensions'].iteritems():
rootgrp.createDimension(k, v)
else:
if not self.Quiet:
print('No netCDF created:')
print(' No dimension key found (!! has to be \"dimensions\"!!!)')
return()
# Create global attributes.
if 'global attributes' in self.input_dict:
for k, v in self.input_dict['global attributes'].iteritems():
rootgrp.setncattr(k, v)
else:
if not self.Quiet:
print(' No global attribute key found (!! has to be \"global attributes\"!!!)')
# Create variables.
for k, v in self.input_dict['variables'].iteritems():
dims = self.input_dict['variables'][k]['dimensions']
data = v['data']
# Create correct data type if provided
if 'data_type' in self.input_dict['variables'][k]:
data_type = self.input_dict['variables'][k]['data_type']
else:
data_type = 'f4'
# Check whether we've been given a fill value.
if 'fill_value' in self.input_dict['variables'][k]:
fill_value = self.input_dict['variables'][k]['fill_value']
else:
fill_value = None
# Create ncdf variable
if not self.Quiet:
print(' Creating variable: {} {} {}'.format(k, data_type, dims))
var = rootgrp.createVariable(k, data_type, dims, fill_value=fill_value)
if len(dims) > np.ndim(data):
# If number of dimensions given to netCDF is greater than the
# number of dimension of the data, then fill the netCDF
# variable accordingly.
if 'time' in dims:
# Check for presence of time dimension (which can be
# unlimited variable: defined by None).
try:
var[:] = data
except IndexError:
raise(IndexError(('Supplied data shape {} does not match the specified'
' dimensions {}, for variable \'{}\'.'.format(data.shape, var.shape, k))))
else:
if not self.Quiet:
print('Problem in the number of dimensions')
else:
try:
var[:] = data
except IndexError:
raise(IndexError(('Supplied data shape {} does not match the specified'
' dimensions {}, for variable \'{}\'.'.format(data.shape, var.shape, k))))
# Create attributes for variables
if 'attributes' in self.input_dict['variables'][k]:
for ka, va in self.input_dict['variables'][k]['attributes'].iteritems():
var.setncattr(ka, va)
rootgrp.close()
def ncread(file, vars=None, dims=False, noisy=False, atts=False, datetimes=False):
"""
Read in the FVCOM results file and spit out numpy arrays for each of the
variables specified in the vars list.
Optionally specify a dict with keys whose names match the dimension names
in the netCDF file and whose values are strings specifying alternative
ranges or lists of indices. For example, to extract the first hundred time
steps, supply dims as:
dims = {'time':'0:100'}
To extract the first, 400th and 10,000th values of any array with nodes:
dims = {'node':'[0, 3999, 9999]'}
Any dimension not given in dims will be extracted in full.
Specify atts=True to extract the variable attributes. Set datetimes=True
to convert the FVCOM Modified Julian Day values to python datetime objects.
Parameters
----------
file : str, list
If a string, the full path to an FVCOM netCDF output file. If a list,
a series of files to be loaded. Data will be concatenated into a single
dict.
vars : list, optional
List of variable names to be extracted. If omitted, all variables are
returned.
dims : dict, optional
Dict whose keys are dimensions and whose values are a string of either
a range (e.g. {'time':'0:100'}) or a list of individual indices (e.g.
{'time':'[0, 1, 80, 100]'}). Slicing is supported (::5 for every fifth
value).
noisy : bool, optional
Set to True to enable verbose output.
atts : bool, optional
Set to True to enable output of the attributes (defaults to False).
datetimes : bool, optional
Set to True to convert FVCOM Modified Julian Days to Python datetime
objects (creates a new `datetime' key in the output dict. Only
applies if `vars' includes either the `Times' or `time' variables.
Note: if FVCOM has been run with single precision output, then the
conversion of the `time' values to a datetime object suffers rounding
errors. It's best to either run FVCOM in double precision or specify
only the `Times' data in the `vars' list.
Returns
-------
FVCOM : dict
Dict of data extracted from the netCDF file. Keys are those given in
vars and the data are stored as ndarrays. If `datetimes' is True,
then this also includes a `datetime' key in which is the FVCOM
Modified Julian Day time series converted to Python datetime objects.
attributes : dict, optional
If atts=True, returns the attributes as a dict for each
variable in vars. The key `dims' contains the array dimensions (each
variable contains the names of its dimensions) as well as the shape of
the dimensions defined in the netCDF file. The key `global' contains
the global attributes.
See Also
--------
read_probes : read in FVCOM ASCII probes output files.
"""
# Set to True when we've converted from Modified Julian Day so we don't
# end up doing the conversion twice, once for `Times' and again for
# `time' if both variables have been requested in `vars'.
done_datetimes = False
# Check whether we'll be able to fulfill the datetime request.
if datetimes and vars and not list(set(vars) & set(('Times', 'time'))):
raise ValueError("Conversion from Modified Julian Day to python "
"datetimes has been requested but no time variable "
"(`Times' or `time') has been requested in vars.")
# If we have a list, assume it's lots of files and load them all.
if isinstance(file, list):
try:
try:
rootgrp = MFDataset(file, 'r')
except IOError as msg:
raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))
except:
# Try aggregating along a 'time' dimension (for POLCOMS,
# for example).
try:
rootgrp = MFDataset(file, 'r', aggdim='time')
except IOError as msg:
raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))
else:
rootgrp = Dataset(file, 'r')
# Create a dict of the dimension names and their current sizes
read_dims = {}
for key, var in list(rootgrp.dimensions.items()):
# Make the dimensions ranges so we can use them to extract all the
# values.
read_dims[key] = '0:{}'.format(str(len(var)))
# Compare the dimensions in the netCDF file with those provided. If we've
# been given a dict of dimensions which differs from those in the netCDF
# file, then use those.
if dims:
commonKeys = set(read_dims).intersection(list(dims.keys()))
for k in commonKeys:
read_dims[k] = dims[k]
if noisy:
print("File format: {}".format(rootgrp.file_format))
if not vars:
vars = iter(list(rootgrp.variables.keys()))
FVCOM = {}
# Save the dimensions in the attributes dict.
if atts:
attributes = {}
attributes['dims'] = read_dims
attributes['global'] = {}
for g in rootgrp.ncattrs():
attributes['global'][g] = getattr(rootgrp, g)
for key, var in list(rootgrp.variables.items()):
if noisy:
print('Found {}'.format(key), end=' ')
sys.stdout.flush()
if key in vars:
vDims = rootgrp.variables[key].dimensions
toExtract = [read_dims[d] for d in vDims]
# If we have no dimensions, we must have only a single value, in
# which case set the dimensions to empty and append the function to
# extract the value.
if not toExtract:
toExtract = '.getValue()'
# Thought I'd finally figured out how to replace the eval approach,
# but I still can't get past the indexing needed to be able to
# subset the data.
# FVCOM[key] = rootgrp.variables.get(key)[0:-1]
# I know, I know, eval() is evil.
getData = 'rootgrp.variables[\'{}\']{}'.format(key, str(toExtract).replace('\'', ''))
FVCOM[key] = eval(getData)
# Get all attributes for this variable.
if atts:
attributes[key] = {}
# Grab all the attributes for this variable.
for varatt in rootgrp.variables[key].ncattrs():
attributes[key][varatt] = rootgrp.variables[key].getncattr(varatt)
if datetimes and key in ('Times', 'time') and not done_datetimes:
# Convert the time data to datetime objects. How we do this
# depends on which we hit first - `Times' or `time'. For the
# former, we need to parse the strings, for the latter we can
# leverage num2date from the netCDF4 module and use the time
# units attribute.
if key == 'Times':
try:
FVCOM['datetime'] = [datetime.strptime(''.join(i), '%Y-%m-%dT%H:%M:%S.%f') for i in FVCOM[key].astype(str)]
except ValueError:
# Try a different format before bailing out.
FVCOM['datetime'] = [datetime.strptime(''.join(i), '%Y/%m/%d %H:%M:%S.%f') for i in FVCOM[key].astype(str)]
done_datetimes = True
elif key == 'time':
FVCOM['datetime'] = num2date(FVCOM[key],
rootgrp.variables[key].units)
done_datetimes = True
if noisy:
if len(str(toExtract)) < 60:
print('(extracted {})'.format(str(toExtract).replace('\'', '')))
else:
print('(extracted given indices)')
elif noisy:
print()
# Close the open file.
rootgrp.close()
if atts:
return FVCOM, attributes
else:
return FVCOM
def read_probes(files, noisy=False, locations=False):
"""
Read in FVCOM probes output files. Reads both 1 and 2D outputs. Currently
only sensible to import a single station with this function since all data
is output in a single array.
Parameters
----------
files : list, tuple
List of file paths to load.
noisy : bool, optional
Set to True to enable verbose output.
locations : bool, optional
Set to True to export position and depth data for the sites.
Returns
-------
times : ndarray
Modified Julian Day times for the extracted time series.
values : ndarray
Array of the extracted time series values.
positions : ndarray, optional
If locations has been set to True, return an array of the positions
(lon, lat, depth) for each site.
See Also
--------
ncread : read in FVCOM netCDF output.
TODO
----
Add support to multiple sites with a single call. Perhaps returning a dict
with the keys based on the file name is most sensible here?
"""
if len(files) == 0:
raise Exception('No files provided.')
if not (isinstance(files, list) or isinstance(files, tuple)):
files = [files]
for i, file in enumerate(files):
if noisy:
print('Loading file {} of {}...'.format(i + 1, len(files)), end=' ')
# Get the header so we can extract the position data.
with open(file, 'r') as f:
# Latitude and longitude is stored at line 15 (14 in sPpython
# counting). Eastings and northings are at 13 (12 in Python
# indexing).
lonlatz = [float(pos.strip()) for pos in filter(None, f.readlines()[14].split(' '))]
data = np.genfromtxt(file, skip_header=18)
if i == 0:
try:
times = data[:, 0]
values = data[:, 1:]
except IndexError:
times = data[0]
values = data[1:]
positions = lonlatz
else:
try:
times = np.hstack((times, data[:, 0]))
values = np.vstack((values, data[:, 1:]))
except IndexError:
times = np.hstack((times, data[0]))
values = np.vstack((values, data[1:]))
positions = np.vstack((positions, lonlatz))
if noisy:
print('done.')
# It may be the case that the files have been supplied in a random order,
# so sort the values by time here.
sidx = np.argsort(times)
times = times[sidx]
values = values[sidx, ...] # support both 1 and 2D data
if locations:
return times, values, positions
else:
return times, values
def write_probes(file, mjd, timeseries, datatype, site, depth, sigma=(-1, -1), lonlat=(0, 0), xy=(0, 0), datestr=None):
"""
Writes out an FVCOM-formatted time series at a specific location.
Parameters
----------
mjd : ndarray, list, tuple
Date/time in Modified Julian Day
timeseries : ndarray
Data to write out (vector/array for 1D/2D). Shape should be
[time, values], where values can be 1D or 2D.
datatype : tuple, list, tuple
List with the metadata. Give the long name (e.g. `Temperature') and the
units (e.g. `Celsius').
site : str
Name of the output location.
depth : float
Depth at the time series location.
sigma : ndarray, list, tupel, optional
Start and end indices of the sigma layer of time series (if
depth-resolved, -1 otherwise).
lonlat : ndarray, list, optional
Coordinates (spherical)
xy : ndarray, list, optional
Coordinates (cartesian)
datestr : str, optional
Date at which the model was run (contained in the main FVCOM netCDF
output in the history global variable). If omitted, uses the current
local date and time. Format is ISO 8601 (YYYY-MM-DDThh:mm:ss.mmmmmm)
(e.g. 2005-05-25T12:09:56.553467).
See Also
--------
read_probes : read in FVCOM probes output.
ncread : read in FVCOM netCDF output.
"""
if not datestr:
datestr = datetime.now().isoformat()
day = np.floor(mjd[0])
usec = (mjd[0] - day) * 24.0 * 3600.0 * 1000.0 * 1000.0
with open(file, 'w') as f:
# Write the header.
f.write('{} at {}\n'.format(datatype[0], site))
f.write('{} ({})\n'.format(datatype[0], datatype[1]))
f.write('\n')
f.write(' !========MODEL START DATE==========\n')
f.write(' ! Day # : 57419\n'.format(day))
f.write(' ! MicroSecond #: {}\n'.format(usec))
f.write(' ! (Date Time={}Z)\n'.format(datestr))
f.write(' !==========================\n')
f.write(' \n')
f.write(' K1 K2\n'.format())
f.write(' {} {}\n'.format(*sigma))
f.write(' X(M) Y(M) DEPTH(M)\n')
f.write(' {:.3f} {:.3f} {z:.3f}\n'.format(*xy, z=depth))
f.write(' LON LAT DEPTH(M)\n')
f.write(' {:.3f} {:.3f} {z:.3f}\n'.format(*lonlat, z=depth))
f.write('\n')
f.write(' DATA FOLLOWS:\n')
f.write(' Time(days) Data...\n')
# Generate the line format based on the data we've got.
if np.max(sigma) < 0 or np.min(sigma) - np.max(sigma) == 0:
# 1D data, so simple time, value format.
fmt = '{:.5f} {:.3f}\n'
else:
# 2D data, so build the string to match the vertical layers.
fmt = '{:.5f} '
for sig in range(np.shape(timeseries)[-1]):
fmt += '{:.3f} '
fmt = '{}\n'.format(fmt.strip())
# Dump the data (this may be slow).
for line in np.column_stack((mjd, timeseries)):
f.write(fmt.format(*line))
def elems2nodes(elems, tri, nvert=None):
"""
Calculate a nodal value based on the average value for the elements
of which it a part. This necessarily involves an average, so the
conversion from nodes2elems and elems2nodes is not reversible.
Parameters
----------
elems : ndarray
Array of unstructured grid element values to move to the element
nodes.
tri : ndarray
Array of shape (nelem, 3) comprising the list of connectivity
for each element.
nvert : int, optional
Number of nodes (vertices) in the unstructured grid.
Returns
-------
nodes : ndarray
Array of values at the grid nodes.
"""
if not nvert:
nvert = np.max(tri) + 1
count = np.zeros(nvert, dtype=int)
# Deal with 1D and 2D element arrays separately
if np.ndim(elems) == 1:
nodes = np.zeros(nvert)
for i, indices in enumerate(tri):
n0, n1, n2 = indices
nodes[n0] = nodes[n0] + elems[i]
nodes[n1] = nodes[n1] + elems[i]
nodes[n2] = nodes[n2] + elems[i]
count[n0] = count[n0] + 1
count[n1] = count[n1] + 1
count[n2] = count[n2] + 1
elif np.ndim(elems) > 1:
# Horrible hack alert to get the output array shape for multiple
# dimensions.
nodes = np.zeros((list(np.shape(elems)[:-1]) + [nvert]))
for i, indices in enumerate(tri):
n0, n1, n2 = indices
nodes[..., n0] = nodes[..., n0] + elems[..., i]
nodes[..., n1] = nodes[..., n1] + elems[..., i]
nodes[..., n2] = nodes[..., n2] + elems[..., i]
count[n0] = count[n0] + 1
count[n1] = count[n1] + 1
count[n2] = count[n2] + 1
# Now calculate the average for each node based on the number of
# elements of which it is a part.
nodes /= count
return nodes
def nodes2elems(nodes, tri):
"""
Calculate a element centre value based on the average value for the
nodes from which it is formed. This necessarily involves an average,
so the conversion from nodes2elems and elems2nodes is not
necessarily reversible.
Parameters
----------
nodes : ndarray
Array of unstructured grid node values to move to the element
centres.
tri : ndarray
Array of shape (nelem, 3) comprising the list of connectivity
for each element.
Returns
-------
elems : ndarray
Array of values at the grid nodes.
"""
if np.ndim(nodes) == 1:
elems = nodes[tri].mean(axis=-1)
elif np.ndim(nodes) == 2:
elems = nodes[..., tri].mean(axis=-1)
else:
raise Exception('Too many dimensions (maximum of two)')
return elems
# For backwards compatibility.
def readFVCOM(file, varList=None, clipDims=False, noisy=False, atts=False):
warn('{} is deprecated. Use ncread instead.'.format(inspect.stack()[0][3]))
F = ncread(file, vars=varList, dims=clipDims, noisy=noisy, atts=atts)
return F
def readProbes(*args, **kwargs):
warn('{} is deprecated. Use read_probes instead.'.format(inspect.stack()[0][3]))
return read_probes(*args, **kwargs)
def writeProbes(*args, **kwargs):
warn('{} is deprecated. Use write_probes instead.'.format(inspect.stack()[0][3]))
return write_probes(*args, **kwargs)
|
Open FTP session on the last moment possible
Used to open fp = open(file_path,'wb') a few lines before it was needed. This shouldn't be too much of a problem but opening and closing it right after eachother keeps the session open for less time and there are less chances on failure.
|
import logging
import os
from abc import ABCMeta, abstractmethod
from pathlib import Path
from yurl import URL
from tumdlr.downloader import sanitize_filename, download
"""
Post Containers
---
Classes that extend the TumblrPost class are used for parsing and storing post metadata using API response data.
They do not provide any methods for downloading posts directly. Instead, these classes should contain sub-container
objects for their associated post types, which are described in more detail below.
"""
class TumblrPost:
"""
This is the base container class for all Tumblr post types. It contains data that is always available with any
type of post.
Additional supported post types may extend this class to provide additional metadata parsing
"""
def __init__(self, post, blog):
"""
Args:
post(dict): API response
blog(tumdlr.api.TumblrBlog): Parent blog
"""
self._post = post
self.blog = blog
self.log = logging.getLogger('tumdlr.containers.post')
self.id = None # type: int
self.type = None # type: str
self.url = None # type: URL
self.tags = set()
self.post_date = None # type: str
self.note_count = None # type: int
self.files = []
self._parse_post()
@property
def is_text(self):
"""
Returns:
bool
"""
return self.type == 'text'
@property
def is_photo(self):
"""
Returns:
bool
"""
return self.type in ['photo', 'link']
@property
def is_video(self):
"""
Returns:
bool
"""
return self.type == 'video'
def _parse_post(self):
self.id = self._post['id']
self.type = self._post['type']
self.url = URL(self._post['post_url']) if 'post_url' in self._post else None
self.tags = set(self._post.get('tags', []))
self.note_count = self._post.get('note_count')
self.post_date = self._post['date']
def __repr__(self):
return "<TumblrPost id='{id}' type='{type}' url='{url}'>"\
.format(id=self.id, type=self.type, url=self.url)
def __str__(self):
return self.url.as_string() if self.url else ''
class TumblrPhotoSet(TumblrPost):
"""
Container class for Photo and Photo Link post types
"""
def __init__(self, post, blog):
"""
Args:
post(dict): API response
blog(tumdlr.api.blog.TumblrBlog): Parent blog
"""
self.log = logging.getLogger('tumdlr.containers.post')
super().__init__(post, blog)
self.title = None
def _parse_post(self):
"""
Parse all available photos using the best image sizes available
"""
super()._parse_post()
self.title = self._post.get('caption', self._post.get('title')) # title else summary else id
photos = self._post.get('photos', [])
is_photoset = (len(photos) > 1)
for page_no, photo in enumerate(photos, 1):
best_size = photo.get('original_size') or max(photo['alt_sizes'], key='width')
best_size['page_no'] = page_no if is_photoset else False
self.files.append(TumblrPhoto(best_size, self))
def __repr__(self):
return "<TumblrPhotoSet title='{title}' id='{id}' photos='{count}'>"\
.format(title=self.title.split("\n")[0].strip(), id=self.id, count=len(self.files))
def __str__(self):
return self.url.as_string()
class TumblrFile(metaclass=ABCMeta):
"""
This is the base container class for all downloadable resources associated with Tumblr posts.
"""
def __init__(self, data, container):
"""
Args:
data(dict): API response data
container(TumblrPost): Parent container
"""
self.log = logging.getLogger('tumdlr.containers.file')
self._data = data
self.container = container
self.type = 'misc'
self.url = URL(self._data['url'])
def download(self, context, **kwargs):
"""
Args:
context(tumdlr.main.Context): CLI request context
kwargs(dict): Additional arguments to send with the download request
Returns:
str: Path to the saved file
"""
download(self.url.as_string(), str(self.filepath(context, kwargs)), **kwargs)
@abstractmethod
def filepath(self, context, request_data):
"""
Args:
context(tumdlr.main.Context): CLI request context
request_data(Optional[dict]): Additional arguments to send with the download request
Returns:
Path
"""
# Construct the save basedir
basedir = Path(context.config['Tumdlr']['SavePath'])
# Are we categorizing by user?
if context.config['Categorization']['User']:
self.log.debug('Categorizing by user: %s', self.container.blog.name)
basedir = basedir.joinpath(sanitize_filename(self.container.blog.name))
# Are we categorizing by post type?
if context.config['Categorization']['PostType']:
self.log.debug('Categorizing by type: photos')
basedir = basedir.joinpath('photos')
self.log.debug('Basedir constructed: %s', basedir)
return basedir
class TumblrPhoto(TumblrFile):
def __init__(self, photo, photoset):
"""
Args:
photo(dict): Photo API data
photoset(TumblrPhotoSet): Parent container
"""
super().__init__(photo, photoset)
self.type = 'photos'
self.width = self._data.get('width')
self.height = self._data.get('height')
self.page_no = self._data.get('page_no', False)
def filepath(self, context, request_data):
"""
Get the full file path to save the downloaded file to
Args:
context(tumdlr.main.Context): CLI request context
request_data(Optional[dict]): Additional arguments to send with the download request
Returns:
Path
"""
assert isinstance(self.container, TumblrPhotoSet)
filepath = super().filepath(context)
request_data['progress_data']['Caption'] = self.container.title
# Are we categorizing by photosets?
if self.page_no and context.config['Categorization']['Photosets']:
self.log.debug('Categorizing by photoset: %s', self.container.id)
filepath = filepath.joinpath(sanitize_filename(str(self.container.id)))
# Prepend the page number for photosets
if self.page_no:
filepath = filepath.joinpath(sanitize_filename('p{pn}_{pt}'.format(pn=self.page_no,
pt=self.container.title)))
request_data['progress_data']['Photoset Page'] = '{cur} / {tot}'\
.format(cur=self.page_no, tot=len(self.container.files))
else:
filepath = filepath.joinpath(sanitize_filename(self.container.title))
# Work out the file extension and return
return str(filepath) + os.path.splitext(self.url.as_string())[1]
def __repr__(self):
return "<TumblrPhoto url='{url}' width='{w}' height='{h}'>".format(url=self.url, w=self.width, h=self.height)
def __str__(self):
return self.url.as_string()
TumblrFile type changed to constant, variable typo fix
import logging
import os
from abc import ABCMeta, abstractmethod
from pathlib import Path
from yurl import URL
from tumdlr.downloader import sanitize_filename, download
"""
Post Containers
---
Classes that extend the TumblrPost class are used for parsing and storing post metadata using API response data.
They do not provide any methods for downloading posts directly. Instead, these classes should contain sub-container
objects for their associated post types, which are described in more detail below.
"""
class TumblrPost:
"""
This is the base container class for all Tumblr post types. It contains data that is always available with any
type of post.
Additional supported post types may extend this class to provide additional metadata parsing
"""
def __init__(self, post, blog):
"""
Args:
post(dict): API response
blog(tumdlr.api.TumblrBlog): Parent blog
"""
self._post = post
self.blog = blog
self.log = logging.getLogger('tumdlr.containers.post')
self.id = None # type: int
self.type = None # type: str
self.url = None # type: URL
self.tags = set()
self.post_date = None # type: str
self.note_count = None # type: int
self.files = []
self._parse_post()
@property
def is_text(self):
"""
Returns:
bool
"""
return self.type == 'text'
@property
def is_photo(self):
"""
Returns:
bool
"""
return self.type in ['photo', 'link']
@property
def is_video(self):
"""
Returns:
bool
"""
return self.type == 'video'
def _parse_post(self):
self.id = self._post['id']
self.type = self._post['type']
self.url = URL(self._post['post_url']) if 'post_url' in self._post else None
self.tags = set(self._post.get('tags', []))
self.note_count = self._post.get('note_count')
self.post_date = self._post['date']
def __repr__(self):
return "<TumblrPost id='{id}' type='{type}' url='{url}'>"\
.format(id=self.id, type=self.type, url=self.url)
def __str__(self):
return self.url.as_string() if self.url else ''
class TumblrPhotoSet(TumblrPost):
"""
Container class for Photo and Photo Link post types
"""
def __init__(self, post, blog):
"""
Args:
post(dict): API response
blog(tumdlr.api.blog.TumblrBlog): Parent blog
"""
self.log = logging.getLogger('tumdlr.containers.post')
super().__init__(post, blog)
self.title = None
def _parse_post(self):
"""
Parse all available photos using the best image sizes available
"""
super()._parse_post()
self.title = self._post.get('caption', self._post.get('title')) # title else summary else id
photos = self._post.get('photos', [])
is_photoset = (len(photos) > 1)
for page_no, photo in enumerate(photos, 1):
best_size = photo.get('original_size') or max(photo['alt_sizes'], key='width')
best_size['page_no'] = page_no if is_photoset else False
self.files.append(TumblrPhoto(best_size, self))
def __repr__(self):
return "<TumblrPhotoSet title='{title}' id='{id}' photos='{count}'>"\
.format(title=self.title.split("\n")[0].strip(), id=self.id, count=len(self.files))
def __str__(self):
return self.url.as_string()
class TumblrFile(metaclass=ABCMeta):
"""
This is the base container class for all downloadable resources associated with Tumblr posts.
"""
FILE_TYPE = 'misc'
def __init__(self, data, container):
"""
Args:
data(dict): API response data
container(TumblrPost): Parent container
"""
self.log = logging.getLogger('tumdlr.containers.file')
self._data = data
self.container = container
self.url = URL(self._data['url'])
def download(self, context, **kwargs):
"""
Args:
context(tumdlr.main.Context): CLI request context
kwargs(dict): Additional arguments to send with the download request
Returns:
str: Path to the saved file
"""
download(self.url.as_string(), str(self.filepath(context, kwargs)), **kwargs)
@abstractmethod
def filepath(self, context, request_data):
"""
Args:
context(tumdlr.main.Context): CLI request context
request_data(Optional[dict]): Additional arguments to send with the download request
Returns:
Path
"""
# Construct the save basedir
basedir = Path(context.config['Tumdlr']['SavePath'])
# Are we categorizing by user?
if context.config['Categorization']['User']:
self.log.debug('Categorizing by user: %s', self.container.blog.name)
basedir = basedir.joinpath(sanitize_filename(self.container.blog.name))
# Are we categorizing by post type?
if context.config['Categorization']['PostType']:
self.log.debug('Categorizing by type: %s', self.FILE_TYPE)
basedir = basedir.joinpath(self.FILE_TYPE)
self.log.debug('Basedir constructed: %s', basedir)
return basedir
class TumblrPhoto(TumblrFile):
FILE_TYPE = 'photos'
def __init__(self, photo, photoset):
"""
Args:
photo(dict): Photo API data
photoset(TumblrPhotoSet): Parent container
"""
super().__init__(photo, photoset)
self.width = self._data.get('width')
self.height = self._data.get('height')
self.page_no = self._data.get('page_no', False)
def filepath(self, context, request_data):
"""
Get the full file path to save the downloaded file to
Args:
context(tumdlr.main.Context): CLI request context
request_data(Optional[dict]): Additional arguments to send with the download request
Returns:
Path
"""
assert isinstance(self.container, TumblrPhotoSet)
filepath = super().filepath(context)
request_data['progress_data']['Caption'] = self.container.title
# Are we categorizing by photosets?
if self.page_no and context.config['Categorization']['Photosets']:
self.log.debug('Categorizing by photoset: %s', self.container.id)
filepath = filepath.joinpath(sanitize_filename(str(self.container.id)))
# Prepend the page number for photosets
if self.page_no:
filepath = filepath.joinpath(sanitize_filename('p{pn}_{pt}'.format(pn=self.page_no,
pt=self.container.title)))
request_data['progress_data']['Photoset Page'] = '{cur} / {tot}'\
.format(cur=self.page_no, tot=len(self.container.files))
else:
filepath = filepath.joinpath(sanitize_filename(self.container.title))
# Work out the file extension and return
return str(filepath) + os.path.splitext(self.url.as_string())[1]
def __repr__(self):
return "<TumblrPhoto url='{url}' width='{w}' height='{h}'>".format(url=self.url, w=self.width, h=self.height)
def __str__(self):
return self.url.as_string()
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, six, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException) as pe:
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(name, attrs,
connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(name, value,
connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
Remove bare exception
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, six, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException) as pe:
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(name, attrs,
connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(name, value,
connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
|
import settings
import elife_fi_test_data as elife
import fluidinfo
import urllib
import json
import logging
from time import sleep
# Configure logging to console
logger = logging.getLogger('myapp')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Test connection
fluidinfo.instance = settings.instance
fluidinfo.login(settings.username, settings.password)
# Object if you have one for testing already, otherwise leave blank and createObject
def main():
# Create tags, namespaces are created automatically
#createTags()
#updateValues(elife.objects)
createTag('/gnott/elife_v1/component/doi', '')
createTag('/gnott/elife_v1/component/type', '')
createTag('/gnott/elife_v1/component/content', '')
createTag('/gnott/elife_v1/component/article_doi', '')
# Clean up by deleting tags and namespaces
#deleteValues(elife.objects)
#deleteTags()
#deleteNamespaces()
#deleteNamespace(namespace = '/' + settings.namespace)
# 1. Create object, with no about tag
def createObject():
headers, content = fluidinfo.post('/objects')
obj = content['id']
return obj
def findObject(key, value):
"""
Look for the object(s) using the key and key value
If found, return the ids
"""
query = settings.namespace + '/' + key + ' = "' + value + '"'
headers, content = fluidinfo.get('/objects', query = query)
if(len(content['ids']) > 0):
return content['ids']
return None
def createTag(tag, desc, indexed=True):
namespace, sep, tagname = tag.rpartition('/')
headers, content = fluidinfo.post('/tags' + namespace, body={"description": desc, "indexed": indexed, "name": tagname})
if(int(headers['status']) == 201):
logger.info('created tag: ' + tag)
elif (int(headers['status']) == 412):
logger.info('tag exists: ' + tag)
else:
logger.warn('unhandled HTTP status for create tag: ' + tag + ', ' + headers['status'])
def deleteTag(tag):
headers, content = fluidinfo.delete('/tags' + tag)
if(int(headers['status']) == 204):
logger.info('deleted tag: ' + tag)
elif (int(headers['status']) == 404):
logger.info('tag did not exist: ' + tag)
else:
logger.warn('unhandled HTTP status for delete tag: ' + tag + ', ' + headers['status'])
def deleteNamespace(namespace):
headers, content = fluidinfo.delete('/namespaces' + namespace)
if(int(headers['status']) == 204):
logger.info('deleted namespace: ' + namespace)
elif (int(headers['status']) == 404):
logger.info('namespace did not exist: ' + namespace)
elif (int(headers['status']) == 412):
logger.info('namespace was not empty, did not delete: ' + namespace)
else:
logger.warn('unhandled HTTP status for delete namespace: ' + namespace + ', ' + headers['status'])
def createTags():
""" Create all tags required for the namespace.
Does not check if they exist first before trying to create them
"""
for t in elife.tags:
createTag(tag = '/' + settings.namespace + '/' + t['tag'], desc=t['desc'])
def deleteTags():
""" Delete all tags
"""
for t in elife.tags:
deleteTag(tag = '/' + settings.namespace + '/' + t['tag'])
def deleteNamespaces():
""" Delete all namespaces
"""
for t in elife.namespaces:
deleteNamespace(namespace = '/' + settings.namespace + '/' + t['namespace'])
def updateValues(objects):
# Update values of tags for the object
for objrow in objects:
# 1. If object ID is not specified, create one
key = objrow['key']
if(len(objrow['obj']) == 0):
# First try to find an existing object based on a key query
ids = findObject(key, objrow[key])
if(ids == None):
obj = createObject()
logger.info('created object: ' + obj)
else:
# Use the first object found
obj = ids[0]
logger.info('using existing object: ' + obj)
else:
obj = objrow['obj']
logger.info('using existing object: ' + obj)
# 2. Update the primary key value, doi for articles, etc.
objpath = '/objects/' + obj + '/' + settings.namespace + '/' + key
headers, content = fluidinfo.put(objpath, objrow[key])
if(int(headers['status']) == 204):
logger.info('added ' + key + ': ' + objrow[key])
else:
logger.warn('unhandled HTTP status for added ' + key + ': ' + objpath + ', ' + headers['status'])
# Wait time: if the object id is returned, then fluidinfo is aware of the object
# if no object id is returned, then fluidinfo is not aware yet, and therefore
# wait for a little while before issuing a query based on the tag
loop = True
maxTries = 20
sleepSeconds = 2
i = 0
while(loop == True):
ids = findObject(key, objrow[key])
logger.info('checking for object: ' + objrow[key])
if(ids == None):
logger.info('sleeping: ' + str(sleepSeconds) + ' seconds (' + str(sleepSeconds * (i+1)) + ' sec total)' )
sleep(sleepSeconds)
else:
logger.info('object found: ' + objrow[key])
loop = False
i = i+1
if(i >= maxTries):
loop = False
# 3. Assuming success at this point, issue the single loader query
headers, content = fluidinfo.put('/values', body=objrow['query'])
if(int(headers['status']) == 204):
logger.info('query successful on: ' + objpath + ' = ' + objrow[key])
else:
logger.warn('error in query on: ' + objpath + ' = ' + objrow[key])
def deleteValues(objects):
# Delete values of tags for the object
# matching about/doi, and using all schema tags as those to delete
for objrow in objects:
key = objrow['key']
path = settings.namespace + '/' + key + ' = "' + objrow[key] + '"'
tags = []
for tag in elife.tags:
tags.append(settings.namespace + '/' + tag['tag'])
headers, content = fluidinfo.delete('/values', query=path, tags=tags)
if(int(headers['status']) == 204):
logger.info('delete query successful on: ' + key + ' = ' + objrow[key])
else:
logger.warn('error in delete query on: ' + key + ' = ' + objrow[key])
# Call main, since all in one file it must be at the bottom
main()
Revert mess left in test script.
import settings
import elife_fi_test_data as elife
import fluidinfo
import urllib
import json
import logging
from time import sleep
# Configure logging to console
logger = logging.getLogger('myapp')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Test connection
fluidinfo.instance = settings.instance
fluidinfo.login(settings.username, settings.password)
# Object if you have one for testing already, otherwise leave blank and createObject
def main():
# Create tags, namespaces are created automatically
#createTags()
updateValues(elife.objects)
# Clean up by deleting tags and namespaces
#deleteValues(elife.objects)
#deleteTags()
#deleteNamespaces()
#deleteNamespace(namespace = '/' + settings.namespace)
# 1. Create object, with no about tag
def createObject():
headers, content = fluidinfo.post('/objects')
obj = content['id']
return obj
def findObject(key, value):
"""
Look for the object(s) using the key and key value
If found, return the ids
"""
query = settings.namespace + '/' + key + ' = "' + value + '"'
headers, content = fluidinfo.get('/objects', query = query)
if(len(content['ids']) > 0):
return content['ids']
return None
def createTag(tag, desc, indexed=True):
namespace, sep, tagname = tag.rpartition('/')
headers, content = fluidinfo.post('/tags' + namespace, body={"description": desc, "indexed": indexed, "name": tagname})
if(int(headers['status']) == 201):
logger.info('created tag: ' + tag)
elif (int(headers['status']) == 412):
logger.info('tag exists: ' + tag)
else:
logger.warn('unhandled HTTP status for create tag: ' + tag + ', ' + headers['status'])
def deleteTag(tag):
headers, content = fluidinfo.delete('/tags' + tag)
if(int(headers['status']) == 204):
logger.info('deleted tag: ' + tag)
elif (int(headers['status']) == 404):
logger.info('tag did not exist: ' + tag)
else:
logger.warn('unhandled HTTP status for delete tag: ' + tag + ', ' + headers['status'])
def deleteNamespace(namespace):
headers, content = fluidinfo.delete('/namespaces' + namespace)
if(int(headers['status']) == 204):
logger.info('deleted namespace: ' + namespace)
elif (int(headers['status']) == 404):
logger.info('namespace did not exist: ' + namespace)
elif (int(headers['status']) == 412):
logger.info('namespace was not empty, did not delete: ' + namespace)
else:
logger.warn('unhandled HTTP status for delete namespace: ' + namespace + ', ' + headers['status'])
def createTags():
""" Create all tags required for the namespace.
Does not check if they exist first before trying to create them
"""
for t in elife.tags:
createTag(tag = '/' + settings.namespace + '/' + t['tag'], desc=t['desc'])
def deleteTags():
""" Delete all tags
"""
for t in elife.tags:
deleteTag(tag = '/' + settings.namespace + '/' + t['tag'])
def deleteNamespaces():
""" Delete all namespaces
"""
for t in elife.namespaces:
deleteNamespace(namespace = '/' + settings.namespace + '/' + t['namespace'])
def updateValues(objects):
# Update values of tags for the object
for objrow in objects:
# 1. If object ID is not specified, create one
key = objrow['key']
if(len(objrow['obj']) == 0):
# First try to find an existing object based on a key query
ids = findObject(key, objrow[key])
if(ids == None):
obj = createObject()
logger.info('created object: ' + obj)
else:
# Use the first object found
obj = ids[0]
logger.info('using existing object: ' + obj)
else:
obj = objrow['obj']
logger.info('using existing object: ' + obj)
# 2. Update the primary key value, doi for articles, etc.
objpath = '/objects/' + obj + '/' + settings.namespace + '/' + key
headers, content = fluidinfo.put(objpath, objrow[key])
if(int(headers['status']) == 204):
logger.info('added ' + key + ': ' + objrow[key])
else:
logger.warn('unhandled HTTP status for added ' + key + ': ' + objpath + ', ' + headers['status'])
# Wait time: if the object id is returned, then fluidinfo is aware of the object
# if no object id is returned, then fluidinfo is not aware yet, and therefore
# wait for a little while before issuing a query based on the tag
loop = True
maxTries = 20
sleepSeconds = 2
i = 0
while(loop == True):
ids = findObject(key, objrow[key])
logger.info('checking for object: ' + objrow[key])
if(ids == None):
logger.info('sleeping: ' + str(sleepSeconds) + ' seconds (' + str(sleepSeconds * (i+1)) + ' sec total)' )
sleep(sleepSeconds)
else:
logger.info('object found: ' + objrow[key])
loop = False
i = i+1
if(i >= maxTries):
loop = False
# 3. Assuming success at this point, issue the single loader query
headers, content = fluidinfo.put('/values', body=objrow['query'])
if(int(headers['status']) == 204):
logger.info('query successful on: ' + objpath + ' = ' + objrow[key])
else:
logger.warn('error in query on: ' + objpath + ' = ' + objrow[key])
def deleteValues(objects):
# Delete values of tags for the object
# matching about/doi, and using all schema tags as those to delete
for objrow in objects:
key = objrow['key']
path = settings.namespace + '/' + key + ' = "' + objrow[key] + '"'
tags = []
for tag in elife.tags:
tags.append(settings.namespace + '/' + tag['tag'])
headers, content = fluidinfo.delete('/values', query=path, tags=tags)
if(int(headers['status']) == 204):
logger.info('delete query successful on: ' + key + ' = ' + objrow[key])
else:
logger.warn('error in delete query on: ' + key + ' = ' + objrow[key])
# Call main, since all in one file it must be at the bottom
main()
|
__author__ = 'jeddy'
import sys, os, re, argparse, time
"""generate_fc_batch_submit
This script is used to generate a batch submit file for Globus Genomics Galaxy,
with all parameters specified for the selected Workflow for each library
(sample).
Inputs:
-u / --unalignedDir : directory with unaligned FASTQs from BaseSpace for
current project libraries; expected folder hierarchy
is as follows:
<flowcell-directory>/
|----Unaligned/
|----<project-directory> # aka "unalignedDir"
|----<lib-directory>
|----<fastq.gz-file-for-each-lane>
-t / --workflowTemplate: empty batch submit file from Globus Genomics
Galaxy to be used as a template for specifying
parameters for the current project; each template
corresponds to a Workflow within Galaxy
Example:
$ python generateBatchSubmitParams.py \
-u /mnt/genomics/Illumina/150615_D00565_0087_AC6VG0ANXX/Unaligned/P43-12-23224208 \
-t /mnt/genomics/galaxyWorkflows/Galaxy-API-Workflow-alignCount_truSeq_single_GRCh38_v1.txt
"""
### FUNCTIONS
# Parse flowcell folder from filepath part to get flowcell ID
def get_fc_tag(fc_str):
fc_str = re.sub('EXTERNAL_[A-B]', 'EXTERNAL_', fc_str)
fc_re = re.compile('((?<=(EXTERNAL_))|(?<=(_[A-B]))).*XX')
try:
fc_tag = '_' + fc_re.search(fc_str).group()
return fc_tag
except AttributeError:
return ''
# Parse project folder from filepath part to get project ID
def get_proj(proj_str):
proj_re = re.compile('P+[0-9]+(-[0-9]+){,1}')
try:
proj = proj_re.search(proj_str).group()
return proj
except AttributeError:
return proj_str
# Break unaligned filepath into relevant parts and parse to get flowcell and
# project IDs
def parse_unaligned_path(unaligned_dir):
path_parts = re.split('/Unaligned/', unaligned_dir)
fc_str = re.split('/', path_parts[0])[-1]
proj_str = path_parts[-1]
fc_tag = get_fc_tag(fc_str)
proj = get_proj(proj_str)
return (fc_tag, proj)
def get_unique_keys(keys, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in keys:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Parse workflow template to get keys for parameters as well as all other
# metadata/comment lines from the template file
def parse_workflow_template(batch_workflow_template):
template_lines = file(batch_workflow_template).readlines()
header_line = [line for line in template_lines if 'SampleName' in line][0]
headers = header_line.rstrip('\t').split('\t')
header_keys = [header.split('##')[0] for header in headers]
param_names = [header.split('::')[-1] for header in headers]
lane_order = [re.search('[1-9]', p).group() \
for p in param_names if re.search('from_path[1-8]', p) ]
return get_unique_keys(header_keys), lane_order, template_lines
# Replace root directory with /~/ for compatibility with Globus transfer
def format_endpoint_dir(local_dir):
endpoint_dir = re.sub('.*(?=(/genomics))', '/~', local_dir)
return endpoint_dir
# Specify appropriate reference/annotation files for corresponding parameters
def build_ref_path(param, build = 'GRCh38'):
ref_dict = {}
ref_dict['GRCh38'] = dict([('gtf', 'GRCh38/Homo_sapiens.GRCh38.77.gtf'),
('refflat', 'GRCh38/Homo_sapiens.GRCh38.77.refflat.txt'),
('ribosomal_intervals',
'GRCh38/Homo_sapiens.GRCh38.77.ribosomalIntervalsWheader_reorder.txt'),
('adapters', 'adapters/smarter_adapter_seqs_3p_5p.fasta')])
ref_dict['NCBIM37'] = dict([('gtf', 'NCBIM37/Mus_musculus.NCBIM37.67.gtf'),
('refflat', 'NCBIM37/Mus_musculus.NCBIM37.67.refflat.txt'),
('ribosomal_intervals',
'NCBIM37/Mus_musculus.NCBIM37.67.ribosomalIntervalsWheader_reorder.txt'),
('adapters', 'adapters/smarter_adapter_seqs_3p_5p.fasta')])
ref_type = re.sub('^annotation_', '', param)
ref_path = 'library::annotation::' + ref_dict[build].get(ref_type)
return ref_path
# Create label for processed output folder
def prep_output_directory(unaligned_dir, proj):
path_parts = re.split('/Unaligned/', unaligned_dir)
fc_dir = path_parts[0]
date_tag = time.strftime("%y%m%d", time.gmtime())
target_dir = '%s/Project_%sProcessed_globus_%s' % (fc_dir, proj, date_tag)
fastq_dir = os.path.join(target_dir, 'inputFastqs')
if not os.path.isdir(fastq_dir):
os.makedirs(fastq_dir)
return (target_dir, date_tag)
# Create output subdirectories for each Workflow result type
def prep_output_subdir(target_dir, result_type):
result_subdir = os.path.join(target_dir, result_type)
if not os.path.isdir(result_subdir):
os.makedirs(result_subdir)
return result_subdir
# Parse library folder from filepath to get library ID (libID)
def parse_lib_path(lib_dir):
try:
lib_id = re.search('lib[0-9]+', lib_dir).group()
return lib_id
except AttributeError:
return re.search('Sample_.*[0-9]+', lib_dir).group()
# Get the location of the gzipped FASTQ file for the current lib and lane
def get_lane_fastq(lib_dir, lane):
lane_re = re.compile('L00' + lane)
lane_fastq = [os.path.join(lib_dir, fastq)
for fastq in os.listdir(lib_dir)
if lane_re.search(fastq)]
if len(lane_fastq):
lane_fastq = lane_fastq[0]
# create empty file if no FASTQ exists for current lane
else:
empty_fastq = 'empty_L00' + lane + '.fastq.gz'
lane_fastq = os.path.join(lib_dir, empty_fastq)
# if not os.path.exists(lane_fastq):
# open(lane_fastq, 'a').close()
return format_endpoint_dir(lane_fastq)
# Create output file path corresponding to the current parameter / result type
def build_result_path(lib, target_dir, param):
result_types = ['trimmed', 'counts', 'alignments', 'metrics',
'QC', 'Trinity', 'log']
result_type = [r_type for r_type in result_types
if r_type.lower() in param][0]
result_subdir = prep_output_subdir(target_dir, result_type)
out_file = re.sub('_out$', '', param)
out_file = re.sub('_(?=([a-z]+$))', '.', out_file)
out_file = lib + '_' + out_file
result_path = os.path.join(format_endpoint_dir(result_subdir), out_file)
return result_path
# Fill in parameter values for current lib based on the keys from the template
def build_lib_param_list(lib, endpoint, target_dir, header_keys, lane_order, fc_tag, build='GRCh38'):
lib_params = []
lib_id = parse_lib_path(lib)
target_lib = lib_id + fc_tag
for param in header_keys:
if 'SampleName' in param:
lib_params.append(target_lib)
elif 'fastq_in' in param:
lib_params.append(endpoint)
for lane in lane_order:
lib_params.append(get_lane_fastq(lib, lane))
elif 'annotation' in param:
ref_path = build_ref_path(param, build)
lib_params.append(ref_path)
elif 'out' in param:
if re.search('^fastq_out', param):
final_fastq = '%s_R1-final.fastq.gz' % target_lib
result_path = os.path.join(format_endpoint_dir(target_dir),
'inputFastqs', final_fastq)
else:
result_path = build_result_path(target_lib, target_dir, param)
lib_params.append(endpoint)
lib_params.append(result_path)
return lib_params
def get_project_params(endpoint, header_keys, lane_order, unaligned_dir,
project_lines=None, N=None, sort=False, build='GRCh38'):
if project_lines is None:
project_lines = []
fc_tag,proj = parse_unaligned_path(unaligned_dir)
unaligned_libs = [os.path.join(unaligned_dir, entry)
for entry in os.listdir(unaligned_dir)
if os.path.isdir(os.path.join(unaligned_dir, entry))]
# temp kluge to select just one lib
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib6(830|922)', lib) ] # for P43-12
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib6(830|822|605)', lib) ] # for P43-12/13 or P109-1
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib(9497|9555)', lib) ] # for P43-12/13 or P109-1
# unalignedLibs = [ lib for lib in unalignedLibs if re.search('lib66(05|20)', lib) ] # for P109-1
if sort:
unaligned_libs = sorted(unaligned_libs,
key=lambda x: sum(os.path.getsize(os.path.join(x, f))
for f in os.listdir(x)))
if N is not None:
unaligned_libs = unaligned_libs[0:N] # first N libs, any project
target_dir,date_tag = prep_output_directory(unaligned_dir, proj)
for lib in unaligned_libs:
if "Undetermined" not in lib:
lib_params = build_lib_param_list(lib, endpoint, target_dir,
header_keys, lane_order, fc_tag,
build)
project_lines.append(('\t').join(lib_params) + '\n')
return (proj, fc_tag, project_lines, date_tag)
# Parse template and fill in appropriate parameter values for all project libs
def create_workflow_file(endpoint, workflow_template, project_list, N=None, sort=False):
header_keys,lane_order,template_lines = parse_workflow_template(workflow_template)
workflow_lines = template_lines
workflow_lines[-1] = re.sub('\t$', '\n', workflow_lines[-1])
builds = ['GRCh38', 'NCBIM37']
build = [b for b in builds if re.search(b.lower(), workflow_template)][0]
proj_list = []
for unaligned_project in project_list:
proj,fc_tag,proj_lines,date_tag = get_project_params(endpoint, header_keys,
lane_order, unaligned_project,
N=N, sort=sort, build=build)
proj_list.append(proj)
workflow_lines += proj_lines
submit_tag = '%s_%s%s' % (date_tag, ('_').join(proj_list), fc_tag)
proj_line_num = [idx for idx,line in enumerate(template_lines)
if 'Project Name' in line][0]
workflow_lines[proj_line_num] = re.sub('<Your_project_name>', submit_tag,
template_lines[proj_line_num])
return (workflow_lines, submit_tag)
# Write completed batch workflow to file and return formatted path
def write_batch_workflow(workflow_lines, flowcell_dir, workflow_template, submit_tag):
target_dir = os.path.join(flowcell_dir, 'globus_batch_submission')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
template_file = os.path.basename(workflow_template)
workflow_file = submit_tag + '_' + template_file
workflow_path = os.path.join(target_dir, workflow_file)
w_file = open(workflow_path, 'w+')
w_file.writelines(workflow_lines)
w_file.close()
print "Batch file path: \n%s" % format_endpoint_dir(workflow_path)
# Prompt user to specify workflow for each flowcell project
def build_submit_dict(flowcell_dir, workflow_dir, optimized_only=False):
flowcell_projects = [os.path.join(flowcell_dir, 'Unaligned', p)
for p in os.listdir(os.path.join(flowcell_dir, 'Unaligned'))
if '.' not in p]
flowcell_projects.sort()
workflow_choices = [os.path.join(workflow_dir, f)
for f in os.listdir(workflow_dir)
if 'Galaxy-API' not in f]
workflow_choices.sort()
if optimized_only:
workflow_choices = [f for f in workflow_choices
if re.search('optimized', f)]
ps_cont = True
submit_dict = {}
while ps_cont:
print(submit_dict)
print "\nFound the following projects: [current workflows selected]"
for i, p in enumerate(flowcell_projects):
workflow_nums = [w for w, k in enumerate(workflow_choices)
if p in submit_dict.get(k, [])]
print "%3d : %s %s" % (i, os.path.basename(p), str(workflow_nums))
p_i = raw_input("\nType the number of the project you wish to select or hit enter to finish: ")
if len(p_i):
selected_project = flowcell_projects[int(p_i)]
for j, w in enumerate(workflow_choices):
print "%3d : %s" % (j, os.path.basename(w))
w_j = raw_input("\nSelect the number of the workflow to use for project %s: "
% os.path.basename(selected_project))
selected_workflow = workflow_choices[int(w_j)]
submit_dict.setdefault(selected_workflow, []).append(selected_project)
else:
ps_cont = False
return submit_dict
# Parse input arguments and call above functions to generate batch submit file
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--endpoint',
required=True,
default=None,
help=("specify the Globus Online endpoint "
"where input data is stored / results are "
"to be saved - e.g., "
"jeddy#srvgridftp01"))
parser.add_argument('-f', '--flowcell_dir',
required=True,
default=None,
help=("specify the directory of unaligned library "
"FASTQs to be processed - e.g., "
"/mnt/genomics/Illumina/"
"150615_D00565_0087_AC6VG0ANXX/Unaligned"
"P43-12-23224208"))
parser.add_argument('-w', '--workflow_dir',
default=None,
help=("specify batch submission template "
"for the Galaxy Workflow to be used for"
"processing the current project - e.g., "
"/mnt/genomics/galaxyWorkflows/"
"Galaxy-API-Workflow-alignCount_truSeq_"
"single_GRCh38_v1.txt"))
parser.add_argument('-N', '--first_N',
type=int,
default=None),
parser.add_argument('-s', '--sort_libs',
action='store_true',
help=("sort libraries from smallest to largest"))
parser.add_argument('-o', '--optimized_only',
action='store_true',
help=("show only optimized workflows"))
args = parser.parse_args()
endpoint = args.endpoint
flowcell_dir = args.flowcell_dir
workflow_dir = args.workflow_dir
N = args.first_N
sort_libs = args.sort_libs
optimized_only = args.optimized_only
submit_dict = build_submit_dict(flowcell_dir, workflow_dir, optimized_only)
for w in submit_dict:
workflow_lines,submit_tag = create_workflow_file(endpoint,
w, submit_dict[w],
N, sort_libs)
write_batch_workflow(workflow_lines, flowcell_dir, w, submit_tag)
if __name__ == "__main__":
main(sys.argv[1:])
add empty fastq creation back for now; removal seemed to cause error with globus transfer
__author__ = 'jeddy'
import sys, os, re, argparse, time
"""generate_fc_batch_submit
This script is used to generate a batch submit file for Globus Genomics Galaxy,
with all parameters specified for the selected Workflow for each library
(sample).
Inputs:
-u / --unalignedDir : directory with unaligned FASTQs from BaseSpace for
current project libraries; expected folder hierarchy
is as follows:
<flowcell-directory>/
|----Unaligned/
|----<project-directory> # aka "unalignedDir"
|----<lib-directory>
|----<fastq.gz-file-for-each-lane>
-t / --workflowTemplate: empty batch submit file from Globus Genomics
Galaxy to be used as a template for specifying
parameters for the current project; each template
corresponds to a Workflow within Galaxy
Example:
$ python generateBatchSubmitParams.py \
-u /mnt/genomics/Illumina/150615_D00565_0087_AC6VG0ANXX/Unaligned/P43-12-23224208 \
-t /mnt/genomics/galaxyWorkflows/Galaxy-API-Workflow-alignCount_truSeq_single_GRCh38_v1.txt
"""
### FUNCTIONS
# Parse flowcell folder from filepath part to get flowcell ID
def get_fc_tag(fc_str):
fc_str = re.sub('EXTERNAL_[A-B]', 'EXTERNAL_', fc_str)
fc_re = re.compile('((?<=(EXTERNAL_))|(?<=(_[A-B]))).*XX')
try:
fc_tag = '_' + fc_re.search(fc_str).group()
return fc_tag
except AttributeError:
return ''
# Parse project folder from filepath part to get project ID
def get_proj(proj_str):
proj_re = re.compile('P+[0-9]+(-[0-9]+){,1}')
try:
proj = proj_re.search(proj_str).group()
return proj
except AttributeError:
return proj_str
# Break unaligned filepath into relevant parts and parse to get flowcell and
# project IDs
def parse_unaligned_path(unaligned_dir):
path_parts = re.split('/Unaligned/', unaligned_dir)
fc_str = re.split('/', path_parts[0])[-1]
proj_str = path_parts[-1]
fc_tag = get_fc_tag(fc_str)
proj = get_proj(proj_str)
return (fc_tag, proj)
def get_unique_keys(keys, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in keys:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Parse workflow template to get keys for parameters as well as all other
# metadata/comment lines from the template file
def parse_workflow_template(batch_workflow_template):
template_lines = file(batch_workflow_template).readlines()
header_line = [line for line in template_lines if 'SampleName' in line][0]
headers = header_line.rstrip('\t').split('\t')
header_keys = [header.split('##')[0] for header in headers]
param_names = [header.split('::')[-1] for header in headers]
lane_order = [re.search('[1-9]', p).group() \
for p in param_names if re.search('from_path[1-8]', p) ]
return get_unique_keys(header_keys), lane_order, template_lines
# Replace root directory with /~/ for compatibility with Globus transfer
def format_endpoint_dir(local_dir):
endpoint_dir = re.sub('.*(?=(/genomics))', '/~', local_dir)
return endpoint_dir
# Specify appropriate reference/annotation files for corresponding parameters
def build_ref_path(param, build = 'GRCh38'):
ref_dict = {}
ref_dict['GRCh38'] = dict([('gtf', 'GRCh38/Homo_sapiens.GRCh38.77.gtf'),
('refflat', 'GRCh38/Homo_sapiens.GRCh38.77.refflat.txt'),
('ribosomal_intervals',
'GRCh38/Homo_sapiens.GRCh38.77.ribosomalIntervalsWheader_reorder.txt'),
('adapters', 'adapters/smarter_adapter_seqs_3p_5p.fasta')])
ref_dict['NCBIM37'] = dict([('gtf', 'NCBIM37/Mus_musculus.NCBIM37.67.gtf'),
('refflat', 'NCBIM37/Mus_musculus.NCBIM37.67.refflat.txt'),
('ribosomal_intervals',
'NCBIM37/Mus_musculus.NCBIM37.67.ribosomalIntervalsWheader_reorder.txt'),
('adapters', 'adapters/smarter_adapter_seqs_3p_5p.fasta')])
ref_type = re.sub('^annotation_', '', param)
ref_path = 'library::annotation::' + ref_dict[build].get(ref_type)
return ref_path
# Create label for processed output folder
def prep_output_directory(unaligned_dir, proj):
path_parts = re.split('/Unaligned/', unaligned_dir)
fc_dir = path_parts[0]
date_tag = time.strftime("%y%m%d", time.gmtime())
target_dir = '%s/Project_%sProcessed_globus_%s' % (fc_dir, proj, date_tag)
fastq_dir = os.path.join(target_dir, 'inputFastqs')
if not os.path.isdir(fastq_dir):
os.makedirs(fastq_dir)
return (target_dir, date_tag)
# Create output subdirectories for each Workflow result type
def prep_output_subdir(target_dir, result_type):
result_subdir = os.path.join(target_dir, result_type)
if not os.path.isdir(result_subdir):
os.makedirs(result_subdir)
return result_subdir
# Parse library folder from filepath to get library ID (libID)
def parse_lib_path(lib_dir):
try:
lib_id = re.search('lib[0-9]+', lib_dir).group()
return lib_id
except AttributeError:
return re.search('Sample_.*[0-9]+', lib_dir).group()
# Get the location of the gzipped FASTQ file for the current lib and lane
def get_lane_fastq(lib_dir, lane):
lane_re = re.compile('L00' + lane)
lane_fastq = [os.path.join(lib_dir, fastq)
for fastq in os.listdir(lib_dir)
if lane_re.search(fastq)]
if len(lane_fastq):
lane_fastq = lane_fastq[0]
# create empty file if no FASTQ exists for current lane
else:
empty_fastq = 'empty_L00' + lane + '.fastq.gz'
lane_fastq = os.path.join(lib_dir, empty_fastq)
if not os.path.exists(lane_fastq):
open(lane_fastq, 'a').close()
return format_endpoint_dir(lane_fastq)
# Create output file path corresponding to the current parameter / result type
def build_result_path(lib, target_dir, param):
result_types = ['trimmed', 'counts', 'alignments', 'metrics',
'QC', 'Trinity', 'log']
result_type = [r_type for r_type in result_types
if r_type.lower() in param][0]
result_subdir = prep_output_subdir(target_dir, result_type)
out_file = re.sub('_out$', '', param)
out_file = re.sub('_(?=([a-z]+$))', '.', out_file)
out_file = lib + '_' + out_file
result_path = os.path.join(format_endpoint_dir(result_subdir), out_file)
return result_path
# Fill in parameter values for current lib based on the keys from the template
def build_lib_param_list(lib, endpoint, target_dir, header_keys, lane_order, fc_tag, build='GRCh38'):
lib_params = []
lib_id = parse_lib_path(lib)
target_lib = lib_id + fc_tag
for param in header_keys:
if 'SampleName' in param:
lib_params.append(target_lib)
elif 'fastq_in' in param:
lib_params.append(endpoint)
for lane in lane_order:
lib_params.append(get_lane_fastq(lib, lane))
elif 'annotation' in param:
ref_path = build_ref_path(param, build)
lib_params.append(ref_path)
elif 'out' in param:
if re.search('^fastq_out', param):
final_fastq = '%s_R1-final.fastq.gz' % target_lib
result_path = os.path.join(format_endpoint_dir(target_dir),
'inputFastqs', final_fastq)
else:
result_path = build_result_path(target_lib, target_dir, param)
lib_params.append(endpoint)
lib_params.append(result_path)
return lib_params
def get_project_params(endpoint, header_keys, lane_order, unaligned_dir,
project_lines=None, N=None, sort=False, build='GRCh38'):
if project_lines is None:
project_lines = []
fc_tag,proj = parse_unaligned_path(unaligned_dir)
unaligned_libs = [os.path.join(unaligned_dir, entry)
for entry in os.listdir(unaligned_dir)
if os.path.isdir(os.path.join(unaligned_dir, entry))]
# temp kluge to select just one lib
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib6(830|922)', lib) ] # for P43-12
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib6(830|822|605)', lib) ] # for P43-12/13 or P109-1
# unaligned_libs = [ lib for lib in unaligned_libs if re.search('lib(9497|9555)', lib) ] # for P43-12/13 or P109-1
# unalignedLibs = [ lib for lib in unalignedLibs if re.search('lib66(05|20)', lib) ] # for P109-1
if sort:
unaligned_libs = sorted(unaligned_libs,
key=lambda x: sum(os.path.getsize(os.path.join(x, f))
for f in os.listdir(x)))
if N is not None:
unaligned_libs = unaligned_libs[0:N] # first N libs, any project
target_dir,date_tag = prep_output_directory(unaligned_dir, proj)
for lib in unaligned_libs:
if "Undetermined" not in lib:
lib_params = build_lib_param_list(lib, endpoint, target_dir,
header_keys, lane_order, fc_tag,
build)
project_lines.append(('\t').join(lib_params) + '\n')
return (proj, fc_tag, project_lines, date_tag)
# Parse template and fill in appropriate parameter values for all project libs
def create_workflow_file(endpoint, workflow_template, project_list, N=None, sort=False):
header_keys,lane_order,template_lines = parse_workflow_template(workflow_template)
workflow_lines = template_lines
workflow_lines[-1] = re.sub('\t$', '\n', workflow_lines[-1])
builds = ['GRCh38', 'NCBIM37']
build = [b for b in builds if re.search(b.lower(), workflow_template)][0]
proj_list = []
for unaligned_project in project_list:
proj,fc_tag,proj_lines,date_tag = get_project_params(endpoint, header_keys,
lane_order, unaligned_project,
N=N, sort=sort, build=build)
proj_list.append(proj)
workflow_lines += proj_lines
submit_tag = '%s_%s%s' % (date_tag, ('_').join(proj_list), fc_tag)
proj_line_num = [idx for idx,line in enumerate(template_lines)
if 'Project Name' in line][0]
workflow_lines[proj_line_num] = re.sub('<Your_project_name>', submit_tag,
template_lines[proj_line_num])
return (workflow_lines, submit_tag)
# Write completed batch workflow to file and return formatted path
def write_batch_workflow(workflow_lines, flowcell_dir, workflow_template, submit_tag):
target_dir = os.path.join(flowcell_dir, 'globus_batch_submission')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
template_file = os.path.basename(workflow_template)
workflow_file = submit_tag + '_' + template_file
workflow_path = os.path.join(target_dir, workflow_file)
w_file = open(workflow_path, 'w+')
w_file.writelines(workflow_lines)
w_file.close()
print "Batch file path: \n%s" % format_endpoint_dir(workflow_path)
# Prompt user to specify workflow for each flowcell project
def build_submit_dict(flowcell_dir, workflow_dir, optimized_only=False):
flowcell_projects = [os.path.join(flowcell_dir, 'Unaligned', p)
for p in os.listdir(os.path.join(flowcell_dir, 'Unaligned'))
if '.' not in p]
flowcell_projects.sort()
workflow_choices = [os.path.join(workflow_dir, f)
for f in os.listdir(workflow_dir)
if 'Galaxy-API' not in f]
workflow_choices.sort()
if optimized_only:
workflow_choices = [f for f in workflow_choices
if re.search('optimized', f)]
ps_cont = True
submit_dict = {}
while ps_cont:
print(submit_dict)
print "\nFound the following projects: [current workflows selected]"
for i, p in enumerate(flowcell_projects):
workflow_nums = [w for w, k in enumerate(workflow_choices)
if p in submit_dict.get(k, [])]
print "%3d : %s %s" % (i, os.path.basename(p), str(workflow_nums))
p_i = raw_input("\nType the number of the project you wish to select or hit enter to finish: ")
if len(p_i):
selected_project = flowcell_projects[int(p_i)]
for j, w in enumerate(workflow_choices):
print "%3d : %s" % (j, os.path.basename(w))
w_j = raw_input("\nSelect the number of the workflow to use for project %s: "
% os.path.basename(selected_project))
selected_workflow = workflow_choices[int(w_j)]
submit_dict.setdefault(selected_workflow, []).append(selected_project)
else:
ps_cont = False
return submit_dict
# Parse input arguments and call above functions to generate batch submit file
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--endpoint',
required=True,
default=None,
help=("specify the Globus Online endpoint "
"where input data is stored / results are "
"to be saved - e.g., "
"jeddy#srvgridftp01"))
parser.add_argument('-f', '--flowcell_dir',
required=True,
default=None,
help=("specify the directory of unaligned library "
"FASTQs to be processed - e.g., "
"/mnt/genomics/Illumina/"
"150615_D00565_0087_AC6VG0ANXX/Unaligned"
"P43-12-23224208"))
parser.add_argument('-w', '--workflow_dir',
default=None,
help=("specify batch submission template "
"for the Galaxy Workflow to be used for"
"processing the current project - e.g., "
"/mnt/genomics/galaxyWorkflows/"
"Galaxy-API-Workflow-alignCount_truSeq_"
"single_GRCh38_v1.txt"))
parser.add_argument('-N', '--first_N',
type=int,
default=None),
parser.add_argument('-s', '--sort_libs',
action='store_true',
help=("sort libraries from smallest to largest"))
parser.add_argument('-o', '--optimized_only',
action='store_true',
help=("show only optimized workflows"))
args = parser.parse_args()
endpoint = args.endpoint
flowcell_dir = args.flowcell_dir
workflow_dir = args.workflow_dir
N = args.first_N
sort_libs = args.sort_libs
optimized_only = args.optimized_only
submit_dict = build_submit_dict(flowcell_dir, workflow_dir, optimized_only)
for w in submit_dict:
workflow_lines,submit_tag = create_workflow_file(endpoint,
w, submit_dict[w],
N, sort_libs)
write_batch_workflow(workflow_lines, flowcell_dir, w, submit_tag)
if __name__ == "__main__":
main(sys.argv[1:])
|
# coding=UTF-8
from django.db.models import Q
from ladder.models import Challenge, Rank
from math import ceil
def _open_challenges_exist(user, ladder):
"""Returns True if there are challenges open in the provided ladder for the user."""
open_challenges = Challenge.objects.filter( (Q(challenger=user)|Q(challengee=user)) & (Q(accepted = Challenge.STATUS_ACCEPTED) | Q(accepted = Challenge.STATUS_NOT_ACCEPTED)) & Q(ladder = ladder) )
if open_challenges.count() > 0:
return True
else:
return False
def _get_user_challenges(user, ladder = None, statuses = None):
"""Get all the challenges from a specified user (challenger or challengee). When no ladder statuses passed along, returns all challenges.
user = User object
ladder = Ladder object (optional)
statuses = Tuple of challenge statuses (see ladder views)
"""
# Grab the challenges from a user without filters
open_challenges = Challenge.objects.filter((Q(challengee = user) | Q(challenger = user)))
# Narrow it down to a single ladder if provided.
if ladder is not None:
open_challenges = open_challenges.filter( ladder = ladder )
# Narrow it down to statuses requested
if statuses is not None:
for status in statuses:
open_challenges = open_challenges.filter( accepted = status )
return open_challenges
def _get_valid_targets(user, user_rank, allTargets, ladder):
"""Takes a Rank QueryObject and returns a list of challengable ranks in the ladder.
You are allowed to challenge if:
- User is on the ladder. (checked beforehand)
- User has no open challenges in this ladder.
- User's (/w ▲) target is within current rank - UPARROW range.
- User's (/w ▼) target is within current rank + DNARROW range.
- User has not challenged target since TIMEOUT time has passed. *NOT IMPLEMENTED
"""
# list of ranks player can challenge
challengables = []
# user has no open challenges in this ladder
open_challenges = _get_user_challenges(user, ladder, (Challenge.STATUS_NOT_ACCEPTED, Challenge.STATUS_ACCEPTED)).count()
# Get user's arrow and rank
user_arrow = user_rank.arrow
user_nrank = user_rank.rank
# get the constraints for this ladder
up_distance = ladder.up_arrow
dn_distance = ladder.down_arrow
# Get the range of ranks to search between
if user_arrow == Rank.ARROW_UP :
r_range = (user_nrank - up_distance, user_nrank - 1)
elif user_arrow == Rank.ARROW_DOWN :
r_range = (user_nrank + 1, user_nrank + dn_distance)
else :
raise ValueError( 'Rank.arrow can be either "0" (Up Arrow) or "1" (Down Arrow), but was "{}"'.format( user_arrow ) )
# Get all ranks on the ladder within our target range
for target_rank in Rank.objects.filter(ladder = ladder,rank__range = r_range) :
challengables.append(target_rank.rank)
return challengables
# TODO: test this
# It should wrap a view function with automatic paging support
class PagingInfo :
def __init__( self, page, page_length ) :
self.page = page
self.page_length = page_length
self.page_list = None
def set_item_count( self, item_count ) :
# We add 2 to the range end because ranges are exclusive and we're 1 based
range_end = ceil( item_count / self.page_length ) + 3
self.page_list = range( 1, int( range_end ) )
return self.page_list
def get_item_slice( self ) :
firstel = ( self.page - 1 ) * self.page_length
return slice( firstel, firstel + self.page_length )
def paged( fn ) :
def _paged_viewfn( request, *args, **kwargs ) :
page = request.GET['p'] if request.method == "GET" and request.GET.has_key( 'p' ) else 1
page_length = request.GET['l'] if request.method == "GET" and request.GET.has_key( 'l' ) else 25
page_info = PagingInfo( page = page, page_length = page_length )
return fn( request, page_info = page_info, *args, **kwargs )
return _paged_viewfn
Oops that's a two
# coding=UTF-8
from django.db.models import Q
from ladder.models import Challenge, Rank
from math import ceil
def _open_challenges_exist(user, ladder):
"""Returns True if there are challenges open in the provided ladder for the user."""
open_challenges = Challenge.objects.filter( (Q(challenger=user)|Q(challengee=user)) & (Q(accepted = Challenge.STATUS_ACCEPTED) | Q(accepted = Challenge.STATUS_NOT_ACCEPTED)) & Q(ladder = ladder) )
if open_challenges.count() > 0:
return True
else:
return False
def _get_user_challenges(user, ladder = None, statuses = None):
"""Get all the challenges from a specified user (challenger or challengee). When no ladder statuses passed along, returns all challenges.
user = User object
ladder = Ladder object (optional)
statuses = Tuple of challenge statuses (see ladder views)
"""
# Grab the challenges from a user without filters
open_challenges = Challenge.objects.filter((Q(challengee = user) | Q(challenger = user)))
# Narrow it down to a single ladder if provided.
if ladder is not None:
open_challenges = open_challenges.filter( ladder = ladder )
# Narrow it down to statuses requested
if statuses is not None:
for status in statuses:
open_challenges = open_challenges.filter( accepted = status )
return open_challenges
def _get_valid_targets(user, user_rank, allTargets, ladder):
"""Takes a Rank QueryObject and returns a list of challengable ranks in the ladder.
You are allowed to challenge if:
- User is on the ladder. (checked beforehand)
- User has no open challenges in this ladder.
- User's (/w ▲) target is within current rank - UPARROW range.
- User's (/w ▼) target is within current rank + DNARROW range.
- User has not challenged target since TIMEOUT time has passed. *NOT IMPLEMENTED
"""
# list of ranks player can challenge
challengables = []
# user has no open challenges in this ladder
open_challenges = _get_user_challenges(user, ladder, (Challenge.STATUS_NOT_ACCEPTED, Challenge.STATUS_ACCEPTED)).count()
# Get user's arrow and rank
user_arrow = user_rank.arrow
user_nrank = user_rank.rank
# get the constraints for this ladder
up_distance = ladder.up_arrow
dn_distance = ladder.down_arrow
# Get the range of ranks to search between
if user_arrow == Rank.ARROW_UP :
r_range = (user_nrank - up_distance, user_nrank - 1)
elif user_arrow == Rank.ARROW_DOWN :
r_range = (user_nrank + 1, user_nrank + dn_distance)
else :
raise ValueError( 'Rank.arrow can be either "0" (Up Arrow) or "1" (Down Arrow), but was "{}"'.format( user_arrow ) )
# Get all ranks on the ladder within our target range
for target_rank in Rank.objects.filter(ladder = ladder,rank__range = r_range) :
challengables.append(target_rank.rank)
return challengables
# TODO: test this
# It should wrap a view function with automatic paging support
class PagingInfo :
def __init__( self, page, page_length ) :
self.page = page
self.page_length = page_length
self.page_list = None
def set_item_count( self, item_count ) :
# We add 2 to the range end because ranges are exclusive and we're 1 based
range_end = ceil( item_count / self.page_length ) + 2
self.page_list = range( 1, int( range_end ) )
return self.page_list
def get_item_slice( self ) :
firstel = ( self.page - 1 ) * self.page_length
return slice( firstel, firstel + self.page_length )
def paged( fn ) :
def _paged_viewfn( request, *args, **kwargs ) :
page = request.GET['p'] if request.method == "GET" and request.GET.has_key( 'p' ) else 1
page_length = request.GET['l'] if request.method == "GET" and request.GET.has_key( 'l' ) else 25
page_info = PagingInfo( page = page, page_length = page_length )
return fn( request, page_info = page_info, *args, **kwargs )
return _paged_viewfn |
from time import time
from app import db
from flask import render_template, g, request, session, url_for, Blueprint
mod_root = Blueprint('root', __name__, url_prefix='/')
@mod_root.route('/')
def index():
return render_template('index.html')
@mod_root.before_app_request
def before_request():
# Save current time to be used after the request to display the time the request took to complete.
g.start_time = time()
@mod_root.after_app_request
def after_request(response):
""" Replace the string __EXECUTION_TIME__ in the reponse with the actual
execution time. """
session['previous_page'] = request.url
diff = round((time() - g.start_time) * 1000)
execution_time_string = "1 millisecond" if diff == 1 else "{} milliseconds".format(diff)
if response.response:
try:
response.response[0] = response.response[0].replace('__EXECUTION_TIME__'.encode('utf-8'), execution_time_string.encode('utf-8'))
response.headers["content-length"] = len(response.response[0])
except TypeError:
# Response doesn't contain the text __EXECUTION_TIME__
pass
return response
def setup_error_handlers(app):
@app.errorhandler(404)
def not_found_error(error):
try:
redirect_url = session['previous_page']
except KeyError:
redirect_url = url_for('root.index')
return render_template('404.html', title='404', redirect_url=redirect_url), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
try:
redirect_url = session['previous_page']
except KeyError:
redirect_url = url_for('root.index')
return render_template('500.html', title='500', redirect_url=redirect_url), 500
redirect http to https when not running in debug mode
from time import time
from app import db
from flask import render_template, g, request, session, url_for, Blueprint, redirect, current_app
mod_root = Blueprint('root', __name__, url_prefix='/')
@mod_root.route('/')
def index():
return render_template('index.html')
@mod_root.before_app_request
def before_request():
# Save current time to be used after the request to display the time the request took to complete.
g.start_time = time()
# Redirect to https when not running in debug mode
if not current_app.debug and request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return redirect(url, code=code)
@mod_root.after_app_request
def after_request(response):
""" Replace the string __EXECUTION_TIME__ in the reponse with the actual
execution time. """
session['previous_page'] = request.url
diff = round((time() - g.start_time) * 1000)
execution_time_string = "1 millisecond" if diff == 1 else "{} milliseconds".format(diff)
if response.response:
try:
response.response[0] = response.response[0].replace('__EXECUTION_TIME__'.encode('utf-8'), execution_time_string.encode('utf-8'))
response.headers["content-length"] = len(response.response[0])
except TypeError:
# Response doesn't contain the text __EXECUTION_TIME__
pass
return response
def setup_error_handlers(app):
@app.errorhandler(404)
def not_found_error(error):
try:
redirect_url = session['previous_page']
except KeyError:
redirect_url = url_for('root.index')
return render_template('404.html', title='404', redirect_url=redirect_url), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
try:
redirect_url = session['previous_page']
except KeyError:
redirect_url = url_for('root.index')
return render_template('500.html', title='500', redirect_url=redirect_url), 500
|
"""
fs.errors: error class definitions for FS
"""
import sys
import errno
from fs.path import *
try:
from functools import wraps
except ImportError:
def wraps(func):
def decorator(wfunc):
wfunc.__name__ == func.__name__
wfunc.__doc__ == func.__doc__
wfunc.__module__ == func.__module__
return decorator
class FSError(Exception):
"""Base exception class for the FS module."""
default_message = "Unspecified error"
def __init__(self,msg=None,details=None):
if msg is None:
msg = self.default_message
self.msg = msg
self.details = details
def __str__(self):
keys = dict((k,str(v)) for k,v in self.__dict__.iteritems())
return self.msg % keys
def __unicode__(self):
return unicode(str(self))
def __getstate__(self):
return self.__dict__.copy()
class PathError(FSError):
"""Exception for errors to do with a path string."""
default_message = "Path is invalid: %(path)s"
def __init__(self,path="",**kwds):
self.path = path
super(PathError,self).__init__(**kwds)
class OperationFailedError(FSError):
"""Base exception class for errors associated with a specific operation."""
default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]"
def __init__(self,opname="",path=None,**kwds):
self.opname = opname
self.path = path
self.errno = getattr(kwds.get("details",None),"errno",None)
super(OperationFailedError,self).__init__(**kwds)
class UnsupportedError(OperationFailedError):
"""Exception raised for operations that are not supported by the FS."""
default_message = "Unable to %(opname)s: not supported by this filesystem"
class RemoteConnectionError(OperationFailedError):
"""Exception raised when operations encounter remote connection trouble."""
default_message = "Unable to %(opname)s: remote connection errror"
class StorageSpaceError(OperationFailedError):
"""Exception raised when operations encounter storage space trouble."""
default_message = "Unable to %(opname)s: insufficient storage space"
class PermissionDeniedError(OperationFailedError):
default_message = "Unable to %(opname)s: permission denied"
class ResourceError(FSError):
"""Base exception class for error associated with a specific resource."""
default_message = "Unspecified resource error: %(path)s"
def __init__(self,path="",**kwds):
self.path = path
self.opname = kwds.pop("opname",None)
super(ResourceError,self).__init__(**kwds)
class NoSysPathError(ResourceError):
"""Exception raised when there is no syspath for a given path."""
default_message = "No mapping to OS filesystem: %(path)s"
class ResourceNotFoundError(ResourceError):
"""Exception raised when a required resource is not found."""
default_message = "Resource not found: %(path)s"
class ResourceInvalidError(ResourceError):
"""Exception raised when a resource is the wrong type."""
default_message = "Resource is invalid: %(path)s"
class DestinationExistsError(ResourceError):
"""Exception raised when a target destination already exists."""
default_message = "Destination exists: %(path)s"
class DirectoryNotEmptyError(ResourceError):
"""Exception raised when a directory to be removed is not empty."""
default_message = "Directory is not empty: %(path)s"
class ParentDirectoryMissingError(ResourceError):
"""Exception raised when a parent directory is missing."""
default_message = "Parent directory is missing: %(path)s"
class ResourceLockedError(ResourceError):
"""Exception raised when a resource can't be used because it is locked."""
default_message = "Resource is locked: %(path)s"
def convert_fs_errors(func):
"""Function wrapper to convert FSError instances into OSErrors."""
@wraps(func)
def wrapper(*args,**kwds):
try:
return func(*args,**kwds)
except ResourceNotFoundError, e:
raise OSError(errno.ENOENT,str(e))
except ResourceInvalidError, e:
raise OSError(errno.EINVAL,str(e))
except PermissionDeniedError, e:
raise OSError(errno.EACCESS,str(e))
except DirectoryNotEmptyError, e:
raise OSError(errno.ENOTEMPTY,str(e))
except DestinationExistsError, e:
raise OSError(errno.EEXIST,str(e))
except StorageSpaceError, e:
raise OSError(errno.ENOSPC,str(e))
except RemoteConnectionError, e:
raise OSError(errno.ENONET,str(e))
except UnsupportedError, e:
raise OSError(errno.ENOSYS,str(e))
except FSError, e:
raise OSError(errno.EFAULT,str(e))
return wrapper
def convert_os_errors(func):
"""Function wrapper to convert OSError/IOError instances into FSErrors."""
opname = func.__name__
@wraps(func)
def wrapper(self,*args,**kwds):
try:
return func(self,*args,**kwds)
except (OSError,IOError), e:
(exc_type,exc_inst,tb) = sys.exc_info()
path = getattr(e,"filename",None)
if path and path[0] == "/" and hasattr(self,"root_path"):
path = normpath(path)
if isprefix(self.root_path,path):
path = path[len(self.root_path):]
if not hasattr(e,"errno") or not e.errno:
raise OperationFailedError(opname,details=e),None,tb
if e.errno == errno.ENOENT:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTEMPTY:
raise DirectoryNotEmptyError(path,opname=opname,details=e),None,tb
if e.errno == errno.EEXIST:
raise DestinationExistsError(path,opname=opname,details=e),None,tb
if e.errno == 183: # some sort of win32 equivalent to EEXIST
raise DestinationExistsError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTDIR:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EISDIR:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EINVAL:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EOPNOTSUPP:
raise UnsupportedError(opname,details=e),None,tb
if e.errno == errno.ENOSPC:
raise StorageSpaceError(opname,details=e),None,tb
if e.errno == errno.EACCESS:
raise PermissionDeniedError(opname,details=e),None,tb
# Sometimes windows gives some random errors...
if sys.platform == "win32":
if e.errno in (13,):
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
raise OperationFailedError(opname,details=e),None,tb
return wrapper
correclty spell EACCES
git-svn-id: 74b2def6592cf29d88d1a5d33b5c4a2732d8507c@267 67cdc799-7952-0410-af00-57a81ceafa0f
"""
fs.errors: error class definitions for FS
"""
import sys
import errno
from fs.path import *
try:
from functools import wraps
except ImportError:
def wraps(func):
def decorator(wfunc):
wfunc.__name__ == func.__name__
wfunc.__doc__ == func.__doc__
wfunc.__module__ == func.__module__
return decorator
class FSError(Exception):
"""Base exception class for the FS module."""
default_message = "Unspecified error"
def __init__(self,msg=None,details=None):
if msg is None:
msg = self.default_message
self.msg = msg
self.details = details
def __str__(self):
keys = dict((k,str(v)) for k,v in self.__dict__.iteritems())
return self.msg % keys
def __unicode__(self):
return unicode(str(self))
def __getstate__(self):
return self.__dict__.copy()
class PathError(FSError):
"""Exception for errors to do with a path string."""
default_message = "Path is invalid: %(path)s"
def __init__(self,path="",**kwds):
self.path = path
super(PathError,self).__init__(**kwds)
class OperationFailedError(FSError):
"""Base exception class for errors associated with a specific operation."""
default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]"
def __init__(self,opname="",path=None,**kwds):
self.opname = opname
self.path = path
self.errno = getattr(kwds.get("details",None),"errno",None)
super(OperationFailedError,self).__init__(**kwds)
class UnsupportedError(OperationFailedError):
"""Exception raised for operations that are not supported by the FS."""
default_message = "Unable to %(opname)s: not supported by this filesystem"
class RemoteConnectionError(OperationFailedError):
"""Exception raised when operations encounter remote connection trouble."""
default_message = "Unable to %(opname)s: remote connection errror"
class StorageSpaceError(OperationFailedError):
"""Exception raised when operations encounter storage space trouble."""
default_message = "Unable to %(opname)s: insufficient storage space"
class PermissionDeniedError(OperationFailedError):
default_message = "Unable to %(opname)s: permission denied"
class ResourceError(FSError):
"""Base exception class for error associated with a specific resource."""
default_message = "Unspecified resource error: %(path)s"
def __init__(self,path="",**kwds):
self.path = path
self.opname = kwds.pop("opname",None)
super(ResourceError,self).__init__(**kwds)
class NoSysPathError(ResourceError):
"""Exception raised when there is no syspath for a given path."""
default_message = "No mapping to OS filesystem: %(path)s"
class ResourceNotFoundError(ResourceError):
"""Exception raised when a required resource is not found."""
default_message = "Resource not found: %(path)s"
class ResourceInvalidError(ResourceError):
"""Exception raised when a resource is the wrong type."""
default_message = "Resource is invalid: %(path)s"
class DestinationExistsError(ResourceError):
"""Exception raised when a target destination already exists."""
default_message = "Destination exists: %(path)s"
class DirectoryNotEmptyError(ResourceError):
"""Exception raised when a directory to be removed is not empty."""
default_message = "Directory is not empty: %(path)s"
class ParentDirectoryMissingError(ResourceError):
"""Exception raised when a parent directory is missing."""
default_message = "Parent directory is missing: %(path)s"
class ResourceLockedError(ResourceError):
"""Exception raised when a resource can't be used because it is locked."""
default_message = "Resource is locked: %(path)s"
def convert_fs_errors(func):
"""Function wrapper to convert FSError instances into OSErrors."""
@wraps(func)
def wrapper(*args,**kwds):
try:
return func(*args,**kwds)
except ResourceNotFoundError, e:
raise OSError(errno.ENOENT,str(e))
except ResourceInvalidError, e:
raise OSError(errno.EINVAL,str(e))
except PermissionDeniedError, e:
raise OSError(errno.EACCES,str(e))
except DirectoryNotEmptyError, e:
raise OSError(errno.ENOTEMPTY,str(e))
except DestinationExistsError, e:
raise OSError(errno.EEXIST,str(e))
except StorageSpaceError, e:
raise OSError(errno.ENOSPC,str(e))
except RemoteConnectionError, e:
raise OSError(errno.ENONET,str(e))
except UnsupportedError, e:
raise OSError(errno.ENOSYS,str(e))
except FSError, e:
raise OSError(errno.EFAULT,str(e))
return wrapper
def convert_os_errors(func):
"""Function wrapper to convert OSError/IOError instances into FSErrors."""
opname = func.__name__
@wraps(func)
def wrapper(self,*args,**kwds):
try:
return func(self,*args,**kwds)
except (OSError,IOError), e:
(exc_type,exc_inst,tb) = sys.exc_info()
path = getattr(e,"filename",None)
if path and path[0] == "/" and hasattr(self,"root_path"):
path = normpath(path)
if isprefix(self.root_path,path):
path = path[len(self.root_path):]
if not hasattr(e,"errno") or not e.errno:
raise OperationFailedError(opname,details=e),None,tb
if e.errno == errno.ENOENT:
raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTEMPTY:
raise DirectoryNotEmptyError(path,opname=opname,details=e),None,tb
if e.errno == errno.EEXIST:
raise DestinationExistsError(path,opname=opname,details=e),None,tb
if e.errno == 183: # some sort of win32 equivalent to EEXIST
raise DestinationExistsError(path,opname=opname,details=e),None,tb
if e.errno == errno.ENOTDIR:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EISDIR:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EINVAL:
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
if e.errno == errno.EOPNOTSUPP:
raise UnsupportedError(opname,details=e),None,tb
if e.errno == errno.ENOSPC:
raise StorageSpaceError(opname,details=e),None,tb
if e.errno == errno.EACCES:
raise PermissionDeniedError(opname,details=e),None,tb
# Sometimes windows gives some random errors...
if sys.platform == "win32":
if e.errno in (13,):
raise ResourceInvalidError(path,opname=opname,details=e),None,tb
raise OperationFailedError(opname,details=e),None,tb
return wrapper
|
'''
@author Bill Bollenbacher
@author Swarbhanu Chatterjee
@author David Stuebe
@file ion/services/dm/presentation/test/user_notification_test.py
@description Unit and Integration test implementations for the user notification service class.
'''
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.util.containers import DotDict, get_ion_ts
from pyon.public import IonObject, RT, OT, PRED, Container
from pyon.core.exception import NotFound, BadRequest
from pyon.core.bootstrap import get_sys_name, CFG
from ion.services.dm.utility.granule_utils import time_series_domain
from interface.services.coi.iidentity_management_service import IdentityManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.services.dm.iuser_notification_service import UserNotificationServiceClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from interface.services.dm.idiscovery_service import DiscoveryServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from ion.services.dm.presentation.user_notification_service import UserNotificationService
from interface.objects import UserInfo, DeliveryConfig, ComputedListValue, ComputedValueAvailability
from interface.objects import DeviceEvent, NotificationPreferences, NotificationDeliveryModeEnum
from pyon.util.context import LocalContextMixin
from interface.services.cei.ischeduler_service import SchedulerServiceProcessClient
from nose.plugins.attrib import attr
import unittest
from pyon.util.log import log
from pyon.event.event import EventPublisher, EventSubscriber
import gevent
from mock import Mock, mocksignature
from interface.objects import NotificationRequest, TemporalBounds
from ion.services.dm.inventory.index_management_service import IndexManagementService
from ion.services.dm.presentation.user_notification_service import EmailEventProcessor
from ion.processes.bootstrap.index_bootstrap import STD_INDEXES
import os, time, uuid
from gevent import event, queue
from gevent.timeout import Timeout
from gevent.event import Event
import elasticpy as ep
from datetime import datetime, timedelta
from sets import Set
use_es = CFG.get_safe('system.elasticsearch',False)
def now():
'''
This method defines what the UNS uses as its "current" time
'''
return datetime.utcnow()
class FakeProcess(LocalContextMixin):
name = 'scheduler_for_user_notification_test'
id = 'scheduler_client'
process_type = 'simple'
@attr('UNIT',group='dm')
class UserNotificationTest(PyonTestCase):
def setUp(self):
mock_clients = self._create_service_mock('user_notification')
self.user_notification = UserNotificationService()
self.user_notification.clients = mock_clients
self.user_notification.container = DotDict()
self.user_notification.container.node = Mock()
self.user_notification.container['spawn_process'] = Mock()
self.user_notification.container['id'] = 'mock_container_id'
self.user_notification.container['proc_manager'] = DotDict()
self.user_notification.container.proc_manager['terminate_process'] = Mock()
self.user_notification.container.proc_manager['procs'] = {}
self.mock_cc_spawn = self.user_notification.container.spawn_process
self.mock_cc_terminate = self.user_notification.container.proc_manager.terminate_process
self.mock_cc_procs = self.user_notification.container.proc_manager.procs
self.mock_rr_client = self.user_notification.clients.resource_registry
self.user_notification.smtp_server = 'smtp_server'
self.user_notification.smtp_client = 'smtp_client'
self.user_notification.event_publisher = EventPublisher()
self.user_notification.event_processor = EmailEventProcessor()
def test_create_notification(self):
# Test creating a notification
user_id = 'user_id_1'
self.mock_rr_client.create = mocksignature(self.mock_rr_client.create)
self.mock_rr_client.create.return_value = ('notification_id_1','rev_1')
self.mock_rr_client.find_resources = mocksignature(self.mock_rr_client.find_resources)
self.mock_rr_client.find_resources.return_value = [],[]
self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
self.mock_rr_client.read.return_value = 'notification'
self.mock_rr_client.find_associations = mocksignature(self.mock_rr_client.find_associations)
self.mock_rr_client.find_associations.return_value = []
self.mock_rr_client.create_association = mocksignature(self.mock_rr_client.create_association)
self.mock_rr_client.create_association.return_value = None
self.user_notification.notifications = {}
self.user_notification.event_processor.add_notification_for_user = mocksignature(self.user_notification.event_processor.add_notification_for_user)
self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
self.user_notification._notification_in_notifications = mocksignature(self.user_notification._notification_in_notifications)
self.user_notification._notification_in_notifications.return_value = None
self.mock_rr_client.create_association = mocksignature(self.mock_rr_client.create_association)
#-------------------------------------------------------------------------------------------------------------------
# Create a notification object
#-------------------------------------------------------------------------------------------------------------------
notification_request = NotificationRequest(name='a name',
origin = 'origin_1',
origin_type = 'origin_type_1',
event_type= 'event_type_1',
event_subtype = 'event_subtype_1' )
#-------------------------------------------------------------------------------------------------------------------
# execution
#-------------------------------------------------------------------------------------------------------------------
notification_id = self.user_notification.create_notification(notification_request, user_id)
#-------------------------------------------------------------------------------------------------------------------
# assertions
#-------------------------------------------------------------------------------------------------------------------
self.assertEquals('notification_id_1', notification_id)
self.mock_rr_client.create.assert_called_once_with(notification_request)
self.user_notification.event_processor.add_notification_for_user.assert_called_once_with('notification', user_id)
def test_create_notification_validation(self):
# Test that creating a notification without a providing a user_id results in an error
#------------------------------------------------------------------------------------------------------
# Test with no user provided
#------------------------------------------------------------------------------------------------------
# Create a notification object
notification_request = NotificationRequest(name='Setting_email',
origin = 'origin',
origin_type = 'origin_type',
event_type= 'event_type',
event_subtype = 'event_subtype')
with self.assertRaises(BadRequest) as br:
notification_id = self.user_notification.create_notification(notification=notification_request)
self.assertEquals(
br.exception.message,
'''User id not provided.'''
)
# def test_update_notification(self):
#
# # Test updating a notification
#
# notification = 'notification'
# user_id = 'user_id_1'
#
# self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
# self.mock_rr_client.read.return_value = notification
#
# self.user_notification.update_user_info_object = mocksignature(self.user_notification.update_user_info_object)
# self.user_notification.update_user_info_object.return_value = 'user'
#
# self.user_notification.notifications = []
#
# self.user_notification._update_notification_in_notifications_dict = mocksignature(self.user_notification._update_notification_in_notifications_dict)
#
# self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
#
# #-------------------------------------------------------------------------------------------------------------------
# # Create a notification object
# #-------------------------------------------------------------------------------------------------------------------
#
# notification_request = NotificationRequest(name='a name',
# origin = 'origin_1',
# origin_type = 'origin_type_1',
# event_type= 'event_type_1',
# event_subtype = 'event_subtype_1' )
#
# notification_request._id = 'an id'
#
# #-------------------------------------------------------------------------------------------------------------------
# # execution
# #-------------------------------------------------------------------------------------------------------------------
#
# self.user_notification.update_notification(notification_request, user_id)
#
# #-------------------------------------------------------------------------------------------------------------------
# # assertions
# #-------------------------------------------------------------------------------------------------------------------
#
# self.user_notification.update_user_info_object.assert_called_once_with(user_id, notification, notification)
def test_delete_user_notification(self):
# Test deleting a notification
notification_id = 'notification_id_1'
self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
self.user_notification.user_info = {}
#-------------------------------------------------------------------------------------------------------------------
# Create a notification object
#-------------------------------------------------------------------------------------------------------------------
notification_request = NotificationRequest(name='a name',
origin = 'origin_1',
origin_type = 'origin_type_1',
event_type= 'event_type_1',
event_subtype = 'event_subtype_1',
temporal_bounds = TemporalBounds())
notification_request.temporal_bounds.start_datetime = ''
self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
self.mock_rr_client.read.return_value = notification_request
self.mock_rr_client.update = mocksignature(self.mock_rr_client.update)
self.mock_rr_client.update.return_value = ''
self.mock_rr_client.find_subjects = mocksignature(self.mock_rr_client.find_subjects)
self.mock_rr_client.find_subjects.return_value = [], ''
#-------------------------------------------------------------------------------------------------------------------
# execution
#-------------------------------------------------------------------------------------------------------------------
self.user_notification.delete_notification(notification_id=notification_id)
#-------------------------------------------------------------------------------------------------------------------
# assertions
#-------------------------------------------------------------------------------------------------------------------
self.mock_rr_client.read.assert_called_once_with(notification_id, '')
notification_request.temporal_bounds.end_datetime = get_ion_ts()
self.mock_rr_client.update.assert_called_once_with(notification_request)
@attr('INT', group='dm')
class UserNotificationIntTest(IonIntegrationTestCase):
def setUp(self):
super(UserNotificationIntTest, self).setUp()
config = DotDict()
config.bootstrap.use_es = True
self._start_container()
self.addCleanup(UserNotificationIntTest.es_cleanup)
self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)
self.unsc = UserNotificationServiceClient()
self.rrc = ResourceRegistryServiceClient()
self.imc = IdentityManagementServiceClient()
self.discovery = DiscoveryServiceClient()
self.event = Event()
self.number_event_published = 0
process = FakeProcess()
self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
self.ION_NOTIFICATION_EMAIL_ADDRESS = 'data_alerts@oceanobservatories.org'
def event_poll(self, poller, timeout):
success = False
with gevent.timeout.Timeout(timeout):
while not success:
success = poller()
gevent.sleep(0.1) # Let the sockets close by yielding this greenlet
return success
@staticmethod
def es_cleanup():
es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
es_port = CFG.get_safe('server.elasticsearch.port', '9200')
es = ep.ElasticSearch(
host=es_host,
port=es_port,
timeout=10
)
indexes = STD_INDEXES.keys()
indexes.append('%s_resources_index' % get_sys_name().lower())
indexes.append('%s_events_index' % get_sys_name().lower())
for index in indexes:
IndexManagementService._es_call(es.river_couchdb_delete,index)
IndexManagementService._es_call(es.index_delete,index)
def poll(self, tries, callback, *args, **kwargs):
'''
Polling wrapper for queries
Elasticsearch may not index and cache the changes right away so we may need
a couple of tries and a little time to go by before the results show.
'''
for i in xrange(tries):
retval = callback(*args, **kwargs)
if retval:
return retval
time.sleep(0.2)
return None
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_pub_reload_user_info_event(self):
# Test that the publishing of reload user info event occurs every time a create, update
# or delete notification occurs.
#--------------------------------------------------------------------------------------
# Create subscribers for reload events
#--------------------------------------------------------------------------------------
queue = gevent.queue.Queue()
def reload_event_received(message, headers):
queue.put(message)
reload_event_subscriber = EventSubscriber(origin="UserNotificationService",
event_type="ReloadUserInfoEvent",
callback=reload_event_received)
reload_event_subscriber.start()
self.addCleanup(reload_event_subscriber.stop)
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name= 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name='notification_2',
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create a user and get the user_id
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'new_user'
user.contact.email = 'new_user@yahoo.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Create notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
notifications = set([notification_id_1, notification_id_2])
#--------------------------------------------------------------------------------------
# Check the publishing
#--------------------------------------------------------------------------------------
received_event_1 = queue.get(timeout=10)
received_event_2 = queue.get(timeout=10)
notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
self.assertEquals(notifications, notifications_received)
# #--------------------------------------------------------------------------------------
# # Update notification
# #--------------------------------------------------------------------------------------
# notification_request_correct = self.unsc.read_notification(notification_id_1)
# notification_request_correct.origin = 'instrument_correct'
#
# notification_request_2 = self.unsc.read_notification(notification_id_2)
# notification_request_2.origin = 'instrument_2_correct'
#
# self.unsc.update_notification(notification=notification_request_correct, user_id=user_id)
# self.unsc.update_notification(notification=notification_request_2, user_id=user_id)
#
# #--------------------------------------------------------------------------------------
# # Check that the correct events were published
# #--------------------------------------------------------------------------------------
#
# received_event_1 = queue.get(timeout=10)
# received_event_2 = queue.get(timeout=10)
#
# notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
#
# self.assertEquals(notifications, notifications_received)
#--------------------------------------------------------------------------------------
# Delete notification
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id_1)
self.unsc.delete_notification(notification_id_2)
notifications = set([notification_id_1, notification_id_2])
#--------------------------------------------------------------------------------------
# Check that the correct events were published
#--------------------------------------------------------------------------------------
received_event_1 = queue.get(timeout=10)
received_event_2 = queue.get(timeout=10)
notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
self.assertEquals(notifications, notifications_received)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_notification_preferences(self):
#--------------------------------------------------------------------------------------
# Make a notification request object
#--------------------------------------------------------------------------------------
notification_request = NotificationRequest( name= 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create user 1
#--------------------------------------------------------------------------------------
notification_preferences_1 = NotificationPreferences()
notification_preferences_1.delivery_mode = NotificationDeliveryModeEnum.REALTIME
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@yahoo.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_1})
user_id_1, _ = self.rrc.create(user_1)
#--------------------------------------------------------------------------------------
# user 2
#--------------------------------------------------------------------------------------
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = False
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@yahoo.com'
user_2.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Create notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request, user_id=user_id_1)
notification_id_2 = self.unsc.create_notification(notification=notification_request, user_id=user_id_2)
notifications = set([notification_id_1, notification_id_2])
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------------------------------------------------------
# check user_info dictionary to see that the notification preferences are properly loaded to the user info dictionaries
#--------------------------------------------------------------------------------------------------------------------------------------
self.assertEquals(proc1.user_info[user_id_1]['notification_preferences'], notification_preferences_1)
self.assertEquals(proc1.user_info[user_id_2]['notification_preferences'], notification_preferences_2)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_user_info_UNS(self):
# Test that the user info dictionary maintained by the notification workers get updated when
# a notification is created, updated, or deleted by UNS
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent',
event_subtype = 'subtype_1')
notification_request_2 = NotificationRequest( name = 'notification_2',
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent',
event_subtype = 'subtype_2')
#--------------------------------------------------------------------------------------
# Create users and make user_ids
#--------------------------------------------------------------------------------------
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Create a notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
#--------------------------------------------------------------------------------------
# Check the user_info and reverse_user_info got reloaded
#--------------------------------------------------------------------------------------
# Check in UNS ------------>
# read back the registered notification request objects
notification_request_correct = self.rrc.read(notification_id_1)
notification_request_2 = self.rrc.read(notification_id_2)
# check user_info dictionary
self.assertEquals(proc1.user_info[user_id_1]['user_contact'].email, 'user_1@gmail.com' )
self.assertEquals(proc1.user_info[user_id_1]['notifications'], [notification_request_correct])
self.assertEquals(proc1.user_info[user_id_2]['user_contact'].email, 'user_2@gmail.com' )
self.assertEquals(proc1.user_info[user_id_2]['notifications'], [notification_request_2])
self.assertEquals(proc1.reverse_user_info['event_origin']['instrument_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_origin']['instrument_2'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_type']['DetectionEvent'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_subtype']['subtype_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_subtype']['subtype_2'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_origin_type']['type_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_origin_type']['type_2'], [user_id_2])
log.debug("The event processor received the notification topics after a create_notification() for two users")
log.debug("Verified that the event processor correctly updated its user info dictionaries")
#--------------------------------------------------------------------------------------
# Create another notification for the first user
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
proc1 = self.container.proc_manager.procs_by_name['user_notification']
def found_user_info_dicts(proc1,*args, **kwargs):
reloaded_user_info = proc1.user_info
reloaded_reverse_user_info = proc1.reverse_user_info
notifications = proc1.user_info[user_id_1]['notifications']
origins = []
event_types = []
log.debug("Within the poll, got notifications here :%s", notifications)
if notifications:
for notific in notifications:
self.assertTrue(notific._id != '')
origins.append(notific.origin)
event_types.append(notific.event_type)
if set(origins) == set(['instrument_1', 'instrument_2']) and set(event_types) == set(['ResourceLifecycleEvent', 'DetectionEvent']):
return reloaded_user_info, reloaded_reverse_user_info
else:
return None
reloaded_user_info, reloaded_reverse_user_info= self.poll(9, found_user_info_dicts, proc1)
# Check in UNS ------------>
self.assertEquals(reloaded_user_info[user_id_1]['user_contact'].email, 'user_1@gmail.com' )
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_origin']['instrument_2']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_type']['DetectionEvent']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_subtype']['subtype_2']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_origin_type']['type_2']), set([user_id_2, user_id_1]))
log.debug("The event processor received the notification topics after another create_notification() for the first user")
log.debug("Verified that the event processor correctly updated its user info dictionaries")
# #--------------------------------------------------------------------------------------
# # Update notification and check that the user_info and reverse_user_info in UNS got reloaded
# #--------------------------------------------------------------------------------------
#
# notification_request_correct.origin = "newly_changed_instrument"
#
# self.unsc.update_notification(notification=notification_request_correct, user_id=user_id_1)
#
# # Check for UNS ------->
#
# # user_info
# notification_request_correct = self.rrc.read(notification_id_1)
#
# # check that the updated notification is in the user info dictionary
# self.assertTrue(notification_request_correct in proc1.user_info[user_id_1]['notifications'] )
#
# # check that the notifications in the user info dictionary got updated
# update_worked = False
# for notification in proc1.user_info[user_id_1]['notifications']:
# if notification.origin == "newly_changed_instrument":
# update_worked = True
# break
#
# self.assertTrue(update_worked)
#
# # reverse_user_info
# self.assertTrue(user_id_1 in proc1.reverse_user_info['event_origin']["newly_changed_instrument"])
#
# log.debug("Verified that the event processor correctly updated its user info dictionaries after an update_notification()")
#--------------------------------------------------------------------------------------------------------------------------------------
# Delete notification and check. Whether the user_info and reverse_user_info in UNS got reloaded is done in test_get_subscriptions()
#--------------------------------------------------------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id_2)
notific = self.rrc.read(notification_id_2)
# This checks that the notification has been retired.
self.assertNotEquals(notific.temporal_bounds.end_datetime, '')
log.debug("REQ: L4-CI-DM-RQ-56 was satisfied here for UNS")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_user_info_notification_worker(self):
# Test the user_info and reverse user info dictionary capability of the notification worker
#--------------------------------------------------------------------------------------
# Create a user subscribed to REALTIME notifications
#--------------------------------------------------------------------------------------
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = True
user = UserInfo()
user.name = 'new_user'
user.contact.email = 'new_user@gmail.com'
user.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
#--------------------------------------------------------------------------------------
# Create a user subscribed to BATCH notifications
#--------------------------------------------------------------------------------------
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = True
user_batch = UserInfo()
user_batch.name = 'user_batch'
user_batch.contact.email = 'user_batch@gmail.com'
user_batch.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
#--------------------------------------------------------------------------------------
# Create a user subscribed to REALTIME notifications but with delivery turned OFF
#--------------------------------------------------------------------------------------
notification_preferences_3 = NotificationPreferences()
notification_preferences_3.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences_3.delivery_enabled = False
user_disabled = UserInfo()
user_disabled.name = 'user_disabled'
user_disabled.contact.email = 'user_disabled@gmail.com'
user_disabled.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_3})
# this part of code is in the beginning to allow enough time for users_index creation
user_id, _ = self.rrc.create(user)
user_batch_id, _ = self.rrc.create(user_batch)
user_disabled_id, _ = self.rrc.create(user_disabled)
# confirm that users_index got created by discovery
search_string = 'search "name" is "*" from "users_index"'
results = self.poll(9, self.discovery.parse,search_string)
self.assertIsNotNone(results, 'Results not found')
#--------------------------------------------------------------------------------------
# Create notification workers
#--------------------------------------------------------------------------------------
pids = self.unsc.create_worker(number_of_workers=1)
self.assertIsNotNone(pids, 'No workers were created')
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent',
event_subtype = 'subtype_1')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent',
event_subtype = 'subtype_2')
#--------------------------------------------------------------------------------------
# Create a notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id_batch = self.unsc.create_notification(notification=notification_request_correct, user_id=user_batch_id)
notification_id_disabled = self.unsc.create_notification(notification=notification_request_correct, user_id=user_disabled_id)
#--------------------------------------------------------------------------------------
# Check the user_info and reverse_user_info got reloaded
#--------------------------------------------------------------------------------------
processes =self.container.proc_manager.procs
def found_user_info_dicts(processes, qsize,*args, **kwargs):
for key in processes:
if key.startswith('notification_worker'):
proc1 = processes[key]
queue = proc1.q
if queue.qsize() >= qsize:
log.debug("the name of the process: %s" % key)
reloaded_user_info, reloaded_reverse_user_info = queue.get(timeout=10)
proc1.q.queue.clear()
return reloaded_user_info, reloaded_reverse_user_info
reloaded_user_info, reloaded_reverse_user_info= self.poll(9, found_user_info_dicts, processes, 3)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
self.assertIsNotNone(reloaded_user_info)
self.assertIsNotNone(reloaded_reverse_user_info)
# read back the registered notification request objects
notification_request_correct = self.rrc.read(notification_id_1)
self.assertEquals(reloaded_user_info[user_id]['notifications'], [notification_request_correct] )
self.assertEquals(reloaded_user_info[user_id]['notification_preferences'].delivery_mode, notification_preferences.delivery_mode )
self.assertEquals(reloaded_user_info[user_id]['notification_preferences'].delivery_enabled, notification_preferences.delivery_enabled )
self.assertEquals(reloaded_user_info[user_batch_id]['notification_preferences'].delivery_mode, notification_preferences_2.delivery_mode )
self.assertEquals(reloaded_user_info[user_batch_id]['notification_preferences'].delivery_enabled, notification_preferences_2.delivery_enabled )
self.assertEquals(reloaded_user_info[user_disabled_id]['notification_preferences'].delivery_mode, notification_preferences_3.delivery_mode )
self.assertEquals(reloaded_user_info[user_disabled_id]['notification_preferences'].delivery_enabled, notification_preferences_3.delivery_enabled )
self.assertEquals(reloaded_user_info[user_id]['user_contact'].email, 'new_user@gmail.com')
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id] )
log.debug("Verified that the notification worker correctly updated its user info dictionaries after a create_notification()")
#--------------------------------------------------------------------------------------
# Create another notification
#--------------------------------------------------------------------------------------
reloaded_user_info, reloaded_reverse_user_info= self.poll(9, found_user_info_dicts, processes, 1)
notification_request_2 = self.rrc.read(notification_id_2)
#--------------------------------------------------------------------------------------------------------------------------
# Check that the two notifications created for the same user got properly reloaded in the user_info dictionaries of the workers
#--------------------------------------------------------------------------------------------------------------------------
notifications = reloaded_user_info[user_id]['notifications']
origins = []
event_types = []
for notific in notifications:
origins.append(notific.origin)
event_types.append(notific.event_type)
shouldbe_origins = []
shouldbe_event_types = []
for notific in [notification_request_correct, notification_request_2]:
shouldbe_origins.append(notific.origin)
shouldbe_event_types.append(notific.event_type)
self.assertEquals(set(origins), set(shouldbe_origins))
self.assertEquals(set(event_types), set(shouldbe_event_types))
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_2'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_2'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['DetectionEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_2'], [user_id] )
log.debug("Verified that the notification worker correctly updated its user info dictionaries after another create_notification()")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_process_batch(self):
# Test that the process_batch() method works
test_start_time = get_ion_ts() # Note this time is in milliseconds
test_end_time = str(int(get_ion_ts()) + 10000) # Adding 10 seconds
#--------------------------------------------------------------------------------------
# Publish events corresponding to the notification requests just made
# These events will get stored in the event repository allowing UNS to batch process
# them later for batch notifications
#--------------------------------------------------------------------------------------
event_publisher = EventPublisher()
# this part of code is in the beginning to allow enough time for the events_index creation
for i in xrange(10):
event_publisher.publish_event(
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
event_publisher.publish_event(
origin="instrument_3",
origin_type="type_3",
event_type='ResourceLifecycleEvent')
#----------------------------------------------------------------------------------------
# Create users and get the user_ids
#----------------------------------------------------------------------------------------
# user_1 -- default notification preferences
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
# user_2 --- prefers BATCH notification
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = True
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_2.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
# user_3 --- delivery enabled at default
notification_preferences_3 = NotificationPreferences()
notification_preferences_3.delivery_mode = NotificationDeliveryModeEnum.BATCH
user_3 = UserInfo()
user_3.name = 'user_3'
user_3.contact.email = 'user_3@gmail.com'
user_3.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_3})
# user_4 --- prefers REALTIME notification
notification_preferences_4 = NotificationPreferences()
notification_preferences_4.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences_4.delivery_enabled = True
user_4 = UserInfo()
user_4.name = 'user_4'
user_4.contact.email = 'user_4@gmail.com'
user_4.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_4})
# user_5 --- delivery disabled
notification_preferences_5 = NotificationPreferences()
notification_preferences_5.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_5.delivery_enabled = False
user_5 = UserInfo()
user_5.name = 'user_5'
user_5.contact.email = 'user_5@gmail.com'
user_5.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_5})
# this part of code is in the beginning to allow enough time for the users_index creation
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
user_id_3, _ = self.rrc.create(user_3)
user_id_4, _ = self.rrc.create(user_4)
user_id_5, _ = self.rrc.create(user_5)
#--------------------------------------------------------------------------------------
# Grab the UNS process
#--------------------------------------------------------------------------------------
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
notification_request_3 = NotificationRequest( name = "notification_3",
origin="instrument_3",
origin_type="type_3",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create a notification using UNS. This should cause the user_info to be updated
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_3)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_3)
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_4)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_4)
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_5)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_5)
#--------------------------------------------------------------------------------------
# Do a process_batch() in order to start the batch notifications machinery
#--------------------------------------------------------------------------------------
self.unsc.process_batch(start_time=test_start_time, end_time= test_end_time)
#--------------------------------------------------------------------------------------
# Check that the emails were sent to the users. This is done using the fake smtp client
# Make assertions....
#--------------------------------------------------------------------------------------
self.assertFalse(proc1.smtp_client.sent_mail.empty())
email_list = []
while not proc1.smtp_client.sent_mail.empty():
email_tuple = proc1.smtp_client.sent_mail.get(timeout=10)
email_list.append(email_tuple)
self.assertEquals(len(email_list), 2)
for email_tuple in email_list:
msg_sender, msg_recipient, msg = email_tuple
self.assertEquals(msg_sender, CFG.get_safe('server.smtp.sender') )
self.assertTrue(msg_recipient in ['user_1@gmail.com', 'user_2@gmail.com', 'user_3@gmail.com'])
lines = msg.split("\r\n")
maps = []
for line in lines:
maps.extend(line.split(','))
event_time = ''
for map in maps:
fields = map.split(":")
if fields[0].find("Time of event") > -1:
event_time = fields[1].strip(" ")
break
self.assertIsNotNone(event_time)
# # Check that the events sent in the email had times within the user specified range
# self.assertTrue(event_time >= test_start_time)
# self.assertTrue(event_time <= test_end_time)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_worker_send_email(self):
# Test that the workers process the notification event and send email using the
# fake smtp client
#-------------------------------------------------------
# Create users and get the user_ids
#-------------------------------------------------------
# user_1
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = True
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
# user_2
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
# user_3
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = False
user_3 = UserInfo()
user_3.name = 'user_3'
user_3.contact.email = 'user_3@gmail.com'
user_3.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
# user_4
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences.delivery_enabled = True
user_4 = UserInfo()
user_4.name = 'user_4'
user_4.contact.email = 'user_4@gmail.com'
user_4.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
user_id_3, _ = self.rrc.create(user_3)
user_id_4, _ = self.rrc.create(user_4)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
event_type='ResourceLifecycleEvent',
)
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
event_type='DeviceStatusEvent',
)
notification_request_3 = NotificationRequest( name = "notification_3",
origin="instrument_3",
event_type='DeviceCommsEvent',
)
#--------------------------------------------------------------------------------------
# Create notification workers
#--------------------------------------------------------------------------------------
'''
Since notification workers are being created in bootstrap, we dont need to generate any here
'''
# pids = self.unsc.create_worker(number_of_workers=1)
# self.assertEquals(len(pids), 1)
#--------------------------------------------------------------------------------------
# Get the list of notification worker processes existing in the container
# This will enable us to get the fake smtp client objects they are using,
# which in turn will allow us to check what the notification emails they are sending
#--------------------------------------------------------------------------------------
procs = []
for process_name in self.container.proc_manager.procs.iterkeys():
# if the process is a notification worker process, add its pid to the list of pids
if process_name.find("notification_worker") != -1:
proc = self.container.proc_manager.procs[process_name]
log.debug("Got the following notification worker process with name: %s, process: %s" % (process_name, proc))
procs.append(proc)
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
q = gevent.queue.Queue()
id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_1)
q.put(id1)
id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
q.put(id2)
id3 = self.unsc.create_notification(notification=notification_request_3, user_id=user_id_2)
q.put(id3)
id4 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_3)
q.put(id4)
id5 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_4)
q.put(id5)
# Wait till all the notifications have been created....
for i in xrange(5):
q.get(timeout = 10)
#--------------------------------------------------------------------------------------
# Publish events
#--------------------------------------------------------------------------------------
event_publisher = EventPublisher()
event_publisher.publish_event(
event_type = "ResourceLifecycleEvent",
origin="instrument_1")
event_publisher.publish_event(
event_type = "DeviceStatusEvent",
origin="instrument_2",
time_stamps = [get_ion_ts(), str(int(get_ion_ts()) + 60*20*1000)])
event_publisher.publish_event(
event_type = "DeviceCommsEvent",
origin="instrument_3",
time_stamp = get_ion_ts())
#--------------------------------------------------------------------------------------
# Check that the workers processed the events
#--------------------------------------------------------------------------------------
worker_that_sent_email = None
for proc in procs:
if not proc.smtp_client.sent_mail.empty():
worker_that_sent_email = proc
break
email_tuples = []
while not worker_that_sent_email.smtp_client.sent_mail.empty():
email_tuple = worker_that_sent_email.smtp_client.sent_mail.get(timeout=20)
email_tuples.append(email_tuple)
log.debug("size of sent_mail queue: %s" % worker_that_sent_email.smtp_client.sent_mail.qsize())
log.debug("email tuple::: %s" % str(email_tuple))
for email_tuple in email_tuples:
# Parse the email sent and check and make assertions about email body. Make assertions about the sender and recipient
msg_sender, msg_recipient, msg = email_tuple
self.assertEquals(msg_sender, CFG.get_safe('server.smtp.sender') )
self.assertTrue(msg_recipient in ['user_1@gmail.com', 'user_2@gmail.com'])
# The below users did not want real time notifications or disabled delivery
self.assertTrue(msg_recipient not in ['user_3@gmail.com', 'user_4@gmail.com'])
maps = msg.split(",")
event_type = ''
for map in maps:
fields = map.split(":")
log.debug("fields::: %s" % fields)
if fields[0].find("type_") > -1:
event_type = fields[1].strip(" ").strip("'")
break
# if fields[0].find("Time stamp") > -1:
# event_time = int(fields[1].strip(" "))
# break
if msg_recipient == 'user_1@gmail.com':
self.assertTrue(event_type in ['ResourceLifecycleEvent', 'DeviceStatusEvent'])
elif msg_recipient == 'user_2@gmail.com':
self.assertTrue(event_type in ['DeviceCommsEvent', 'DeviceStatusEvent'])
else:
self.fail('Got email sent to msg recipient who did not set a correct notification preference.')
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_create_read_user_notifications(self):
# Test the create and read notification methods
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
# a notification is created for user 1
notification_id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
#--------------------------------------------------------------------------------------
# Check the resource registry
#--------------------------------------------------------------------------------------
n1 = self.unsc.read_notification(notification_id1)
self.assertEquals(n1.event_type, notification_request_1.event_type)
self.assertEquals(n1.origin, notification_request_1.origin)
self.assertEquals(n1.origin_type, notification_request_1.origin_type)
# Check the user notification service process
proc = self.container.proc_manager.procs_by_name['user_notification']
self.assertEquals(len(proc.notifications.values()), 2)
# Check the user info dictionary of the UNS process
user_info = proc.user_info
notifications_held = user_info[user_id]['notifications']
self.assertEquals(len(notifications_held), 2)
def _compare_notifications(notifications):
log.debug("notification insider here:: %s", notifications)
for notif in notifications:
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
else:
self.assertEquals(notif.event_type, notification_request_2.event_type)
self.assertEquals(notif.origin, notification_request_2.origin)
self.assertEquals(notif.origin_type, notification_request_2.origin_type)
self.assertEquals(notif._id, notification_id2)
_compare_notifications(notifications_held)
#--------------------------------------------------------------------------------------
# Create the same notification request again using UNS. Check that no duplicate notification request is made
#--------------------------------------------------------------------------------------
notification_again_id = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_again = self.rrc.read(notification_again_id)
# Check the resource registry to see that the common notification request is being used
self.assertEquals(notification_again.event_type, notification_request_1.event_type)
self.assertEquals(notification_again.origin, notification_request_1.origin)
self.assertEquals(notification_again.origin_type, notification_request_1.origin_type)
# assert that the old id is unchanged
self.assertEquals(notification_again_id, notification_id1)
# Check the user info object
user = self.rrc.read(user_id)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 2)
_compare_notifications(notifs_of_user)
#--------------------------------------------------------------------------------------
# now have the other user subscribe to the same notification request
#--------------------------------------------------------------------------------------
notification_id_user_2 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_2)
##########-------------------------------------------------------------------------------------------------------
# Now check if subscriptions of user 1 are getting overwritten because user 2 subscribed to the same notification
##########-------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
# Check the resource registry
#--------------------------------------------------------------------------------------
n2 = self.unsc.read_notification(notification_id_user_2)
self.assertEquals(n2.event_type, notification_request_1.event_type)
self.assertEquals(n2.origin, notification_request_1.origin)
self.assertEquals(n2.origin_type, notification_request_1.origin_type)
self.assertEquals(len(proc.notifications.values()), 2)
#--------------------------------------------------------------------------------------
# Check the user info dictionary of the UNS process
#--------------------------------------------------------------------------------------
user_info = proc.user_info
# For the first user, his subscriptions should be unchanged
notifications_held_1 = user_info[user_id]['notifications']
self.assertEquals(len(notifications_held_1), 2)
_compare_notifications(notifications_held_1)
# For the second user, he should have got a new subscription
notifications_held_2 = user_info[user_id_2]['notifications']
self.assertEquals(len(notifications_held_2), 1)
notif = notifications_held_2[0]
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
#--------------------------------------------------------------------------------------
# Check the user info objects
#--------------------------------------------------------------------------------------
# Check the first user's info object
user = self.rrc.read(user_id)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 2)
_compare_notifications(notifs_of_user)
# Check the second user's info object
user = self.rrc.read(user_id_2)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 1)
notif = notifs_of_user[0]
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
#--------------------------------------------------------------------------------------
# Check the associations... check that user 1 is associated with the same two notifications as before
# and that user 2 is associated with one notification
#--------------------------------------------------------------------------------------
not_ids, _ = self.rrc.find_objects(subject=user_id,
predicate=PRED.hasNotification,
id_only=True)
log.debug("not_ids::: %s", not_ids)
self.assertEquals(set(not_ids), set([notification_id1,notification_id2]))
not_ids_2, _ = self.rrc.find_objects(subject=user_id_2,
predicate=PRED.hasNotification,
id_only=True)
log.debug("not_ids_2::: %s", not_ids_2)
self.assertEquals(set(not_ids_2), set([notification_id1]))
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_delete_user_notifications(self):
# Test deleting a notification
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects - Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
notification_id1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
self.assertTrue(notif.origin in ["instrument_1", "instrument_2"])
self.assertTrue(notif.origin_type in ["type_1", "type_2"])
self.assertTrue(notif.event_type, ["ResourceLifecycleEvent", "DetectionEvent"])
#--------------------------------------------------------------------------------------
# Delete notification 2
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id2)
notific_2 = self.rrc.read(notification_id2)
# This checks that the notifications have been retired.
self.assertNotEquals(notific_2.temporal_bounds.end_datetime, '')
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
if notif.origin == "instrument_2":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
elif notif.origin == "instrument_1":
self.assertEquals(notif.temporal_bounds.end_datetime, '')
else:
self.fail("ACHTUNG: A completely different notification is being stored in the user info object")
#--------------------------------------------------------------------------------------
# Delete notification 1
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id1)
notific_1 = self.rrc.read(notification_id1)
self.assertNotEquals(notific_1.temporal_bounds.end_datetime, '')
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
if notif.origin == "instrument_2":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
elif notif.origin == "instrument_1":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
else:
self.fail("ACHTUNG: A completely different notification is being stored in the user info object")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_update_user_notification(self):
# Test updating a user notification
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
# Make notification request objects - Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_id = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
# read back the notification and change it
notification = self.unsc.read_notification(notification_id)
notification.origin_type = 'new_type'
# self.unsc.update_notification(notification, user_id)
#
# # read back the notification and check that it got changed
# notification = self.unsc.read_notification(notification_id)
#
# # Assert that the notification resource in the datastore does not get overwritten
# self.assertEquals(notification.origin_type, 'type_1')
# self.assertEquals(notification.event_type, 'ResourceLifecycleEvent')
# self.assertEquals(notification.origin, 'instrument_1')
#
# # Check that the UserInfo object is updated
#
# # Check that the user info dictionary is updated
@attr('LOCOINT')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_find_events(self):
# Test the find events functionality of UNS
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("ReloadUserInfoEvent")
min_datetime = get_ion_ts()
for i in xrange(10):
event_publisher_1.publish_event(origin='my_special_find_events_origin', ts_created = get_ion_ts())
event_publisher_2.publish_event(origin='another_origin', ts_created = get_ion_ts())
max_datetime = get_ion_ts()
def poller():
events = self.unsc.find_events(origin='my_special_find_events_origin', type = 'PlatformEvent', min_datetime= min_datetime, max_datetime=max_datetime)
log.debug("(UNS) got events: %s", events)
return len(events) >= 4
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_find_events_extended(self):
# Test the find events functionality of UNS
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("ReloadUserInfoEvent")
min_time = get_ion_ts()
for i in xrange(10):
event_publisher_1.publish_event(origin='Some_Resource_Agent_ID1', ts_created = get_ion_ts())
event_publisher_2.publish_event(origin='Some_Resource_Agent_ID2', ts_created = get_ion_ts())
max_time = get_ion_ts()
# allow elastic search to populate the indexes. This gives enough time for the reload of user_info
def poller():
events = self.unsc.find_events_extended(origin='Some_Resource_Agent_ID1', min_time=min_time, max_time=max_time)
return len(events) >= 4
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_create_several_workers(self):
# Create more than one worker. Test that they process events in round robin
pids = self.unsc.create_worker(number_of_workers=2)
self.assertEquals(len(pids), 2)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_publish_event_object(self):
# Test the publish_event_object() method of UNS
event_recvd_count = 0
#--------------------------------------------------------------------------------
# Create an event object
#--------------------------------------------------------------------------------
event_1 = DeviceEvent( origin= "origin_1",
origin_type='origin_type_1',
sub_type= 'sub_type_1')
event_with_ts_created = event_1
event_with_ts_created.ts_created = get_ion_ts()
# create async result to wait on in test
ar = gevent.event.AsyncResult()
#--------------------------------------------------------------------------------
# Set up a subscriber to listen for that event
#--------------------------------------------------------------------------------
def received_event(result, event_recvd_count, event, headers):
log.debug("received the event in the test: %s" % event)
#--------------------------------------------------------------------------------
# check that the event was published
#--------------------------------------------------------------------------------
self.assertEquals(event.origin, "origin_1")
self.assertEquals(event.type_, 'DeviceEvent')
self.assertEquals(event.origin_type, 'origin_type_1')
self.assertNotEquals(event.ts_created, '')
self.assertEquals(event.sub_type, 'sub_type_1')
event_recvd_count += 1
if event_recvd_count == 2:
result.set(True)
event_subscriber = EventSubscriber( event_type = 'DeviceEvent',
origin="origin_1",
callback=lambda m, h: received_event(ar, event_recvd_count, m, h))
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
#--------------------------------------------------------------------------------
# Use the UNS publish_event
#--------------------------------------------------------------------------------
self.unsc.publish_event_object(event=event_1)
self.unsc.publish_event_object(event=event_with_ts_created)
ar.wait(timeout=10)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_publish_event(self):
# Test the publish_event() method of UNS
type = "PlatformTelemetryEvent"
origin= "origin_1"
origin_type='origin_type_1'
sub_type= 'sub_type_1'
event_attrs = {'status': 'OK'}
# create async result to wait on in test
ar = gevent.event.AsyncResult()
#--------------------------------------------------------------------------------
# Set up a subscriber to listen for that event
#--------------------------------------------------------------------------------
def received_event(result, event, headers):
log.debug("received the event in the test: %s" % event)
#--------------------------------------------------------------------------------
# check that the event was published
#--------------------------------------------------------------------------------
self.assertEquals(event.origin, "origin_1")
self.assertEquals(event.type_, 'PlatformTelemetryEvent')
self.assertEquals(event.origin_type, 'origin_type_1')
self.assertNotEquals(event.ts_created, '')
self.assertEquals(event.sub_type, 'sub_type_1')
result.set(True)
event_subscriber = EventSubscriber( event_type = 'PlatformTelemetryEvent',
origin="origin_1",
callback=lambda m, h: received_event(ar, m, h))
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
#--------------------------------------------------------------------------------
# Use the UNS publish_event
#--------------------------------------------------------------------------------
self.unsc.publish_event(
event_type=type,
origin=origin,
origin_type=origin_type,
sub_type=sub_type,
description="a description",
event_attrs = event_attrs
)
ar.wait(timeout=10)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_batch_notifications(self):
# Test how the UNS listens to timer events and through the call back runs the process_batch()
# with the correct arguments.
#--------------------------------------------------------------------------------------------
# The operator sets up the process_batch_key. The UNS will listen for scheduler created
# timer events with origin = process_batch_key
#--------------------------------------------------------------------------------------------
# generate a uuid
newkey = 'batch_processing_' + str(uuid.uuid4())
self.unsc.set_process_batch_key(process_batch_key = newkey)
#--------------------------------------------------------------------------------
# Publish the events that the user will later be notified about
#--------------------------------------------------------------------------------
event_publisher = EventPublisher()
# this part of code is in the beginning to allow enough time for the events_index creation
times_of_events_published = Set()
def publish_events():
for i in xrange(3):
t = get_ion_ts()
event_publisher.publish_event( ts_created= t ,
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
event_publisher.publish_event( ts_created= t ,
origin="instrument_2",
origin_type="type_2",
event_type='ResourceLifecycleEvent')
times_of_events_published.add(t)
self.number_event_published += 2
self.event.set()
# time.sleep(1)
log.debug("Published events of origins = instrument_1, instrument_2 with ts_created: %s" % t)
publish_events()
self.assertTrue(self.event.wait(10))
#----------------------------------------------------------------------------------------
# Create users and get the user_ids
#----------------------------------------------------------------------------------------
# user_1
notification_preferences_1 = NotificationPreferences()
notification_preferences_1.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_1.delivery_enabled = True
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_1})
# this part of code is in the beginning to allow enough time for the users_index creation
user_id_1, _ = self.rrc.create(user_1)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create a notification using UNS. This should cause the user_info to be updated
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
#--------------------------------------------------------------------------------
# Set up the scheduler to publish daily events that should kick off process_batch()
#--------------------------------------------------------------------------------
# Set up a time for the scheduler to trigger timer events
# Trigger the timer event 15 seconds later from now
time_now = datetime.utcnow() + timedelta(seconds=15)
times_of_day =[{'hour': str(time_now.hour),'minute' : str(time_now.minute), 'second':str(time_now.second) }]
sid = self.ssclient.create_time_of_day_timer( times_of_day=times_of_day,
expires=time.time()+25200+60,
event_origin= newkey,
event_subtype="")
log.debug("created the timer id: %s", sid)
def cleanup_timer(scheduler, schedule_id):
"""
Do a friendly cancel of the scheduled event.
If it fails, it's ok.
"""
try:
scheduler.cancel_timer(schedule_id)
except:
log.warn("Couldn't cancel")
self.addCleanup(cleanup_timer, self.ssclient, sid)
#--------------------------------------------------------------------------------
# Assert that emails were sent
#--------------------------------------------------------------------------------
proc = self.container.proc_manager.procs_by_name['user_notification']
ar_1 = gevent.event.AsyncResult()
ar_2 = gevent.event.AsyncResult()
def send_email(events_for_message, user_id, *args, **kwargs):
log.warning("(in asyncresult) events_for_message: %s" % events_for_message)
ar_1.set(events_for_message)
ar_2.set(user_id)
proc.format_and_send_email = send_email
events_for_message = ar_1.get(timeout=20)
user_id = ar_2.get(timeout=20)
log.warning("user_id: %s" % user_id)
origins_of_events = Set()
times = Set()
for event in events_for_message:
origins_of_events.add(event.origin)
times.add(event.ts_created)
#--------------------------------------------------------------------------------
# Make assertions on the events mentioned in the formatted email
#--------------------------------------------------------------------------------
self.assertEquals(len(events_for_message), self.number_event_published)
self.assertEquals(times, times_of_events_published)
self.assertEquals(origins_of_events, Set(['instrument_1', 'instrument_2']))
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_user_notifications(self):
# Test that the get_user_notifications() method returns the notifications for a user
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
notification_id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
#--------------------------------------------------------------------------------------
# Get the notifications for the user
#--------------------------------------------------------------------------------------
notifications= self.unsc.get_user_notifications(user_id)
self.assertEquals(len(notifications),2)
names = []
origins = []
origin_types = []
event_types = []
for notification in notifications:
names.append(notification.name)
origins.append(notification.origin)
origin_types.append(notification.origin_type)
event_types.append(notification.event_type)
self.assertEquals(Set(names), Set(['notification_1', 'notification_2']) )
self.assertEquals(Set(origins), Set(['instrument_1', 'instrument_2']) )
self.assertEquals(Set(origin_types), Set(['type_1', 'type_2']) )
self.assertEquals(Set(event_types), Set(['ResourceLifecycleEvent', 'DetectionEvent']) )
#--------------------------------------------------------------------------------------
# Now delete a notification and verify that it wont get picked up by get_user_notifications()
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id=notification_id2)
# Get the notifications for the user
notifications = self.unsc.get_user_notifications(user_id)
self.assertEquals(len(notifications),1)
notification = notifications[0]
self.assertEquals(notification.name, 'notification_1' )
self.assertEquals(notification.origin, 'instrument_1' )
self.assertEquals(notification.origin_type, 'type_1')
self.assertEquals(notification.event_type, 'ResourceLifecycleEvent' )
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_recent_events(self):
# Test that the get_recent_events(resource_id, limit) method returns the events whose origin is
# the specified resource.
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("PlatformEvent")
def publish_events():
x = 0
for i in xrange(10):
t = get_ion_ts()
event_publisher_1.publish_event(origin='my_unique_test_recent_events_origin', ts_created = t)
event_publisher_2.publish_event(origin='Another_recent_events_origin', ts_created = t)
x += 1
self.event.set()
publish_events()
self.assertTrue(self.event.wait(10))
#--------------------------------------------------------------------------------------
# Test with specified limit
#--------------------------------------------------------------------------------------
def poller():
ret = self.unsc.get_recent_events(resource_id='my_unique_test_recent_events_origin', limit = 5)
events = ret.value
return len(events) >= 5
success = self.event_poll(poller, 10)
self.assertTrue(success)
#--------------------------------------------------------------------------------------
# Test without specified limit
#--------------------------------------------------------------------------------------
def poller():
ret = self.unsc.get_recent_events(resource_id='Another_recent_events_origin')
events = ret.value
return len(events) >= 10
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_subscriptions(self):
# Test that the get_subscriptions works correctly
#--------------------------------------------------------------------------------------
# Create users
#--------------------------------------------------------------------------------------
user_ids = []
for i in xrange(5):
user = UserInfo()
user.name = 'user_%s' % i
user.contact.email = 'user_%s@gmail.com' % i
user_id, _ = self.rrc.create(user)
user_ids.append(user_id)
#--------------------------------------------------------------------------------------
# Make a data product
#--------------------------------------------------------------------------------------
data_product_management = DataProductManagementServiceClient()
dataset_management = DatasetManagementServiceClient()
pubsub = PubsubManagementServiceClient()
pdict_id = dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
streamdef_id = pubsub.create_stream_definition(name="test_subscriptions", parameter_dictionary_id=pdict_id)
tdom, sdom = time_series_domain()
tdom, sdom = tdom.dump(), sdom.dump()
dp_obj = IonObject(RT.DataProduct,
name='DP1',
description='some new dp',
temporal_domain = tdom,
spatial_domain = sdom)
data_product_id = data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=streamdef_id)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_active_1 = NotificationRequest( name = "notification_1",
origin=data_product_id,
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_active_2 = NotificationRequest( name = "notification_2",
origin=data_product_id,
origin_type="type_2",
event_type='ResourceLifecycleEvent')
notification_active_3 = NotificationRequest( name = "notification_2",
origin='wrong_origin',
origin_type="type_2",
event_type='ResourceLifecycleEvent')
notification_past_1 = NotificationRequest( name = "notification_3_to_be_retired",
origin=data_product_id,
origin_type="type_3",
event_type='DetectionEvent')
notification_past_2 = NotificationRequest( name = "notification_4_to_be_retired",
origin=data_product_id,
origin_type="type_4",
event_type='DetectionEvent')
notification_past_3 = NotificationRequest( name = "notification_4_to_be_retired",
origin='wrong_origin_2',
origin_type="type_4",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
active_notification_ids = set()
past_notification_ids = set()
for user_id in user_ids:
notification_id_active_1 = self.unsc.create_notification(notification=notification_active_1, user_id=user_id)
notification_id_active_2 = self.unsc.create_notification(notification=notification_active_2, user_id=user_id)
notification_id_active_3 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id)
# Store the ids for the active notifications in a set
active_notification_ids.add(notification_id_active_1)
active_notification_ids.add(notification_id_active_2)
active_notification_ids.add(notification_id_active_3)
notification_id_past_1 = self.unsc.create_notification(notification=notification_past_1, user_id=user_id)
notification_id_past_2 = self.unsc.create_notification(notification=notification_past_2, user_id=user_id)
notification_id_past_3 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id)
# Store the ids for the retired-to-be notifications in a set
past_notification_ids.add(notification_id_past_1)
past_notification_ids.add(notification_id_past_2)
past_notification_ids.add(notification_id_past_3)
log.debug("Number of active notification ids: %s" % len(active_notification_ids))
log.debug("Number of past notification ids: %s" % len(past_notification_ids))
# Retire the retired-to-be notifications
for notific_id in past_notification_ids:
self.unsc.delete_notification(notification_id=notific_id)
#--------------------------------------------------------------------------------------
# Use UNS to get the subscriptions
#--------------------------------------------------------------------------------------
res_notifs= self.unsc.get_subscriptions(resource_id=data_product_id, include_nonactive=False)
log.debug("Result for subscriptions: %s" % res_notifs)
log.debug("Number of subscriptions returned: %s" % len(res_notifs))
self.assertEquals(len(res_notifs), 2)
for notific in res_notifs:
notific_in_db = self.rrc.read(notific._id)
self.assertTrue(notific_in_db)
self.assertEquals(notific.origin, data_product_id)
self.assertEquals(notific.temporal_bounds.end_datetime, '')
self.assertTrue(notific.origin_type == 'type_1' or notific.origin_type =='type_2')
self.assertEquals(notific.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertTrue(notific_in_db.origin_type == 'type_1' or notific_in_db.origin_type =='type_2')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Use UNS to get the all subscriptions --- including retired
#--------------------------------------------------------------------------------------
res_notifs = self.unsc.get_subscriptions(resource_id=data_product_id, include_nonactive=True)
for notific in res_notifs:
log.debug("notif.origin_type:: %s", notific.origin_type)
notific_in_db = self.rrc.read(notific._id)
self.assertTrue(notific_in_db)
self.assertEquals(notific.origin, data_product_id)
self.assertTrue(notific.origin_type in ['type_1', 'type_2', 'type_3', 'type_4'])
self.assertTrue(notific.event_type in ['ResourceLifecycleEvent', 'DetectionEvent'])
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertTrue(notific_in_db.origin_type in ['type_1', 'type_2', 'type_3', 'type_4'])
self.assertTrue(notific_in_db.event_type in ['ResourceLifecycleEvent', 'DetectionEvent'])
self.assertEquals(len(res_notifs), 4)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_subscriptions_for_user(self):
# Test that the get_subscriptions works correctly
#--------------------------------------------------------------------------------------
# Create 2 users
#--------------------------------------------------------------------------------------
user_ids = []
for i in xrange(2):
user = UserInfo()
user.name = 'user_%s' % i
user.contact.email = 'user_%s@gmail.com' % i
user_id, _ = self.rrc.create(user)
user_ids.append(user_id)
#--------------------------------------------------------------------------------------
# Make a data product
#--------------------------------------------------------------------------------------
data_product_management = DataProductManagementServiceClient()
dataset_management = DatasetManagementServiceClient()
pubsub = PubsubManagementServiceClient()
pdict_id = dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
streamdef_id = pubsub.create_stream_definition(name="test_subscriptions", parameter_dictionary_id=pdict_id)
tdom, sdom = time_series_domain()
tdom, sdom = tdom.dump(), sdom.dump()
dp_obj = IonObject(RT.DataProduct,
name='DP1',
description='some new dp',
temporal_domain = tdom,
spatial_domain = sdom)
data_product_id = data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=streamdef_id)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
# ACTIVE
# user 1
notification_active_1 = NotificationRequest( name = "notification_1",
origin=data_product_id,
origin_type="active_1",
event_type='ResourceLifecycleEvent')
# user 2
notification_active_2 = NotificationRequest( name = "notification_2",
origin=data_product_id,
origin_type="active_2",
event_type='ResourceLifecycleEvent')
# wrong origin
notification_active_3 = NotificationRequest( name = "notification_2",
origin='wrong_origin',
origin_type="active_3",
event_type='ResourceLifecycleEvent')
# PAST
# user 1 - past
notification_past_1 = NotificationRequest( name = "notification_3_to_be_retired",
origin=data_product_id,
origin_type="past_1",
event_type='DetectionEvent')
# user 2 - past
notification_past_2 = NotificationRequest( name = "notification_4_to_be_retired",
origin=data_product_id,
origin_type="past_2",
event_type='DetectionEvent')
# wrong origin - past
notification_past_3 = NotificationRequest( name = "notification_4_to_be_retired",
origin='wrong_origin_2',
origin_type="past_3",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
active_notification_ids = set()
past_notification_ids = set()
user_id_1 = user_ids[0]
user_id_2 = user_ids[1]
#--------------------------------------------------------------------------------------
# Create notifications that will stay active for the following users
#--------------------------------------------------------------------------------------
# user 1
notification_id_active_1 = self.unsc.create_notification(notification=notification_active_1, user_id=user_id_1)
# and the notification below for the wrong origin
notification_id_active_31 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id_1)
#### Therefore, only one active notification for user_1 has been created so far
#user 2
notification_id_active_2 = self.unsc.create_notification(notification=notification_active_2, user_id=user_id_2)
# below we create notification for a different resource id
notification_id_active_32 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id_2)
#### Therefore, only one active notification for user_2 created so far
# Store the ids for the active notifications in a set
active_notification_ids.add(notification_id_active_1)
active_notification_ids.add(notification_id_active_2)
active_notification_ids.add(notification_id_active_31)
active_notification_ids.add(notification_id_active_32)
#--------------------------------------------------------------------------------------
# Create notifications that will be RETIRED for the following users
#--------------------------------------------------------------------------------------
# user 1
notification_id_past_1 = self.unsc.create_notification(notification=notification_past_1, user_id=user_id_1)
# the one below for a different resource id
notification_id_past_31 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id_1)
# user 2
notification_id_past_2 = self.unsc.create_notification(notification=notification_past_2, user_id=user_id_2)
# the one below for a different resource id
notification_id_past_32 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id_2)
# Store the ids for the retired-to-be notifications in a set
past_notification_ids.add(notification_id_past_1)
past_notification_ids.add(notification_id_past_2)
past_notification_ids.add(notification_id_past_31)
past_notification_ids.add(notification_id_past_32)
log.debug("Number of active notification ids: %s" % len(active_notification_ids)) # should be 3
log.debug("Number of past notification ids: %s" % len(past_notification_ids)) # should be 3
self.assertEquals(len(active_notification_ids), 3)
self.assertEquals(len(past_notification_ids), 3)
# Retire the retired-to-be notifications
for notific_id in past_notification_ids:
self.unsc.delete_notification(notification_id=notific_id)
# now we should be left wih 1 active and 1 past notification FOR THE RELEVANT RESOURCE ID AS ORIGIN for each user
#--------------------------------------------------------------------------------------
# Use UNS to get the subscriptions
#--------------------------------------------------------------------------------------
n_for_user_1 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_1, include_nonactive=False)
n_for_user_2 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_2, include_nonactive=False)
self.assertEquals(len(n_for_user_1), 1)
self.assertEquals(len(n_for_user_2), 1)
for notif in n_for_user_1:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertEquals(notif.temporal_bounds.end_datetime, '')
self.assertEquals(notif.origin_type, 'active_1')
self.assertEquals(notif.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertEquals(notific_in_db.origin_type, 'active_1')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
for notif in n_for_user_2:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertEquals(notif.temporal_bounds.end_datetime, '')
self.assertEquals(notif.origin_type, 'active_2')
self.assertEquals(notif.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertEquals(notific_in_db.origin_type, 'active_2')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Use UNS to get the all subscriptions --- including retired
#--------------------------------------------------------------------------------------
notifs_for_user_1 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_1, include_nonactive=True)
notifs_for_user_2 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_2, include_nonactive=True)
log.debug("number of returned notif object: %s", len(notifs_for_user_1))
self.assertEquals(len(notifs_for_user_1), 2)
log.debug("number of returned notif object for user 2: %s", len(notifs_for_user_2))
self.assertEquals(len(notifs_for_user_2), 2)
for notif in notifs_for_user_1:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertTrue(notif.origin_type == 'active_1' or notif.origin_type == 'past_1')
self.assertTrue(notif.event_type== 'ResourceLifecycleEvent' or notif.event_type=='DetectionEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertTrue(notific_in_db.origin_type == 'active_1' or notific_in_db.origin_type == 'past_1')
self.assertTrue(notific_in_db.event_type== 'ResourceLifecycleEvent' or notific_in_db.event_type=='DetectionEvent')
for notif in notifs_for_user_2:
self.assertEquals(notif.origin, data_product_id)
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertTrue(notif.origin_type == 'active_2' or notif.origin_type == 'past_2')
self.assertTrue(notif.event_type== 'ResourceLifecycleEvent' or notif.event_type=='DetectionEvent')
self.assertTrue(notific_in_db.origin_type == 'active_2' or notific_in_db.origin_type == 'past_2')
self.assertTrue(notific_in_db.event_type== 'ResourceLifecycleEvent' or notific_in_db.event_type=='DetectionEvent')
increased number of polling tries as it may help in fixing a test that fails sporadically probably
'''
@author Bill Bollenbacher
@author Swarbhanu Chatterjee
@author David Stuebe
@file ion/services/dm/presentation/test/user_notification_test.py
@description Unit and Integration test implementations for the user notification service class.
'''
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.util.containers import DotDict, get_ion_ts
from pyon.public import IonObject, RT, OT, PRED, Container
from pyon.core.exception import NotFound, BadRequest
from pyon.core.bootstrap import get_sys_name, CFG
from ion.services.dm.utility.granule_utils import time_series_domain
from interface.services.coi.iidentity_management_service import IdentityManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.services.dm.iuser_notification_service import UserNotificationServiceClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from interface.services.dm.idiscovery_service import DiscoveryServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from ion.services.dm.presentation.user_notification_service import UserNotificationService
from interface.objects import UserInfo, DeliveryConfig, ComputedListValue, ComputedValueAvailability
from interface.objects import DeviceEvent, NotificationPreferences, NotificationDeliveryModeEnum
from pyon.util.context import LocalContextMixin
from interface.services.cei.ischeduler_service import SchedulerServiceProcessClient
from nose.plugins.attrib import attr
import unittest
from pyon.util.log import log
from pyon.event.event import EventPublisher, EventSubscriber
import gevent
from mock import Mock, mocksignature
from interface.objects import NotificationRequest, TemporalBounds
from ion.services.dm.inventory.index_management_service import IndexManagementService
from ion.services.dm.presentation.user_notification_service import EmailEventProcessor
from ion.processes.bootstrap.index_bootstrap import STD_INDEXES
import os, time, uuid
from gevent import event, queue
from gevent.timeout import Timeout
from gevent.event import Event
import elasticpy as ep
from datetime import datetime, timedelta
from sets import Set
use_es = CFG.get_safe('system.elasticsearch',False)
def now():
'''
This method defines what the UNS uses as its "current" time
'''
return datetime.utcnow()
class FakeProcess(LocalContextMixin):
name = 'scheduler_for_user_notification_test'
id = 'scheduler_client'
process_type = 'simple'
@attr('UNIT',group='dm')
class UserNotificationTest(PyonTestCase):
def setUp(self):
mock_clients = self._create_service_mock('user_notification')
self.user_notification = UserNotificationService()
self.user_notification.clients = mock_clients
self.user_notification.container = DotDict()
self.user_notification.container.node = Mock()
self.user_notification.container['spawn_process'] = Mock()
self.user_notification.container['id'] = 'mock_container_id'
self.user_notification.container['proc_manager'] = DotDict()
self.user_notification.container.proc_manager['terminate_process'] = Mock()
self.user_notification.container.proc_manager['procs'] = {}
self.mock_cc_spawn = self.user_notification.container.spawn_process
self.mock_cc_terminate = self.user_notification.container.proc_manager.terminate_process
self.mock_cc_procs = self.user_notification.container.proc_manager.procs
self.mock_rr_client = self.user_notification.clients.resource_registry
self.user_notification.smtp_server = 'smtp_server'
self.user_notification.smtp_client = 'smtp_client'
self.user_notification.event_publisher = EventPublisher()
self.user_notification.event_processor = EmailEventProcessor()
def test_create_notification(self):
# Test creating a notification
user_id = 'user_id_1'
self.mock_rr_client.create = mocksignature(self.mock_rr_client.create)
self.mock_rr_client.create.return_value = ('notification_id_1','rev_1')
self.mock_rr_client.find_resources = mocksignature(self.mock_rr_client.find_resources)
self.mock_rr_client.find_resources.return_value = [],[]
self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
self.mock_rr_client.read.return_value = 'notification'
self.mock_rr_client.find_associations = mocksignature(self.mock_rr_client.find_associations)
self.mock_rr_client.find_associations.return_value = []
self.mock_rr_client.create_association = mocksignature(self.mock_rr_client.create_association)
self.mock_rr_client.create_association.return_value = None
self.user_notification.notifications = {}
self.user_notification.event_processor.add_notification_for_user = mocksignature(self.user_notification.event_processor.add_notification_for_user)
self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
self.user_notification._notification_in_notifications = mocksignature(self.user_notification._notification_in_notifications)
self.user_notification._notification_in_notifications.return_value = None
self.mock_rr_client.create_association = mocksignature(self.mock_rr_client.create_association)
#-------------------------------------------------------------------------------------------------------------------
# Create a notification object
#-------------------------------------------------------------------------------------------------------------------
notification_request = NotificationRequest(name='a name',
origin = 'origin_1',
origin_type = 'origin_type_1',
event_type= 'event_type_1',
event_subtype = 'event_subtype_1' )
#-------------------------------------------------------------------------------------------------------------------
# execution
#-------------------------------------------------------------------------------------------------------------------
notification_id = self.user_notification.create_notification(notification_request, user_id)
#-------------------------------------------------------------------------------------------------------------------
# assertions
#-------------------------------------------------------------------------------------------------------------------
self.assertEquals('notification_id_1', notification_id)
self.mock_rr_client.create.assert_called_once_with(notification_request)
self.user_notification.event_processor.add_notification_for_user.assert_called_once_with('notification', user_id)
def test_create_notification_validation(self):
# Test that creating a notification without a providing a user_id results in an error
#------------------------------------------------------------------------------------------------------
# Test with no user provided
#------------------------------------------------------------------------------------------------------
# Create a notification object
notification_request = NotificationRequest(name='Setting_email',
origin = 'origin',
origin_type = 'origin_type',
event_type= 'event_type',
event_subtype = 'event_subtype')
with self.assertRaises(BadRequest) as br:
notification_id = self.user_notification.create_notification(notification=notification_request)
self.assertEquals(
br.exception.message,
'''User id not provided.'''
)
# def test_update_notification(self):
#
# # Test updating a notification
#
# notification = 'notification'
# user_id = 'user_id_1'
#
# self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
# self.mock_rr_client.read.return_value = notification
#
# self.user_notification.update_user_info_object = mocksignature(self.user_notification.update_user_info_object)
# self.user_notification.update_user_info_object.return_value = 'user'
#
# self.user_notification.notifications = []
#
# self.user_notification._update_notification_in_notifications_dict = mocksignature(self.user_notification._update_notification_in_notifications_dict)
#
# self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
#
# #-------------------------------------------------------------------------------------------------------------------
# # Create a notification object
# #-------------------------------------------------------------------------------------------------------------------
#
# notification_request = NotificationRequest(name='a name',
# origin = 'origin_1',
# origin_type = 'origin_type_1',
# event_type= 'event_type_1',
# event_subtype = 'event_subtype_1' )
#
# notification_request._id = 'an id'
#
# #-------------------------------------------------------------------------------------------------------------------
# # execution
# #-------------------------------------------------------------------------------------------------------------------
#
# self.user_notification.update_notification(notification_request, user_id)
#
# #-------------------------------------------------------------------------------------------------------------------
# # assertions
# #-------------------------------------------------------------------------------------------------------------------
#
# self.user_notification.update_user_info_object.assert_called_once_with(user_id, notification, notification)
def test_delete_user_notification(self):
# Test deleting a notification
notification_id = 'notification_id_1'
self.user_notification.event_publisher.publish_event = mocksignature(self.user_notification.event_publisher.publish_event)
self.user_notification.user_info = {}
#-------------------------------------------------------------------------------------------------------------------
# Create a notification object
#-------------------------------------------------------------------------------------------------------------------
notification_request = NotificationRequest(name='a name',
origin = 'origin_1',
origin_type = 'origin_type_1',
event_type= 'event_type_1',
event_subtype = 'event_subtype_1',
temporal_bounds = TemporalBounds())
notification_request.temporal_bounds.start_datetime = ''
self.mock_rr_client.read = mocksignature(self.mock_rr_client.read)
self.mock_rr_client.read.return_value = notification_request
self.mock_rr_client.update = mocksignature(self.mock_rr_client.update)
self.mock_rr_client.update.return_value = ''
self.mock_rr_client.find_subjects = mocksignature(self.mock_rr_client.find_subjects)
self.mock_rr_client.find_subjects.return_value = [], ''
#-------------------------------------------------------------------------------------------------------------------
# execution
#-------------------------------------------------------------------------------------------------------------------
self.user_notification.delete_notification(notification_id=notification_id)
#-------------------------------------------------------------------------------------------------------------------
# assertions
#-------------------------------------------------------------------------------------------------------------------
self.mock_rr_client.read.assert_called_once_with(notification_id, '')
notification_request.temporal_bounds.end_datetime = get_ion_ts()
self.mock_rr_client.update.assert_called_once_with(notification_request)
@attr('INT', group='dm')
class UserNotificationIntTest(IonIntegrationTestCase):
def setUp(self):
super(UserNotificationIntTest, self).setUp()
config = DotDict()
config.bootstrap.use_es = True
self._start_container()
self.addCleanup(UserNotificationIntTest.es_cleanup)
self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)
self.unsc = UserNotificationServiceClient()
self.rrc = ResourceRegistryServiceClient()
self.imc = IdentityManagementServiceClient()
self.discovery = DiscoveryServiceClient()
self.event = Event()
self.number_event_published = 0
process = FakeProcess()
self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
self.ION_NOTIFICATION_EMAIL_ADDRESS = 'data_alerts@oceanobservatories.org'
def event_poll(self, poller, timeout):
success = False
with gevent.timeout.Timeout(timeout):
while not success:
success = poller()
gevent.sleep(0.1) # Let the sockets close by yielding this greenlet
return success
@staticmethod
def es_cleanup():
es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
es_port = CFG.get_safe('server.elasticsearch.port', '9200')
es = ep.ElasticSearch(
host=es_host,
port=es_port,
timeout=10
)
indexes = STD_INDEXES.keys()
indexes.append('%s_resources_index' % get_sys_name().lower())
indexes.append('%s_events_index' % get_sys_name().lower())
for index in indexes:
IndexManagementService._es_call(es.river_couchdb_delete,index)
IndexManagementService._es_call(es.index_delete,index)
def poll(self, tries, callback, *args, **kwargs):
'''
Polling wrapper for queries
Elasticsearch may not index and cache the changes right away so we may need
a couple of tries and a little time to go by before the results show.
'''
for i in xrange(tries):
retval = callback(*args, **kwargs)
if retval:
return retval
time.sleep(0.2)
return None
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_pub_reload_user_info_event(self):
# Test that the publishing of reload user info event occurs every time a create, update
# or delete notification occurs.
#--------------------------------------------------------------------------------------
# Create subscribers for reload events
#--------------------------------------------------------------------------------------
queue = gevent.queue.Queue()
def reload_event_received(message, headers):
queue.put(message)
reload_event_subscriber = EventSubscriber(origin="UserNotificationService",
event_type="ReloadUserInfoEvent",
callback=reload_event_received)
reload_event_subscriber.start()
self.addCleanup(reload_event_subscriber.stop)
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name= 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name='notification_2',
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create a user and get the user_id
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'new_user'
user.contact.email = 'new_user@yahoo.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Create notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
notifications = set([notification_id_1, notification_id_2])
#--------------------------------------------------------------------------------------
# Check the publishing
#--------------------------------------------------------------------------------------
received_event_1 = queue.get(timeout=10)
received_event_2 = queue.get(timeout=10)
notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
self.assertEquals(notifications, notifications_received)
# #--------------------------------------------------------------------------------------
# # Update notification
# #--------------------------------------------------------------------------------------
# notification_request_correct = self.unsc.read_notification(notification_id_1)
# notification_request_correct.origin = 'instrument_correct'
#
# notification_request_2 = self.unsc.read_notification(notification_id_2)
# notification_request_2.origin = 'instrument_2_correct'
#
# self.unsc.update_notification(notification=notification_request_correct, user_id=user_id)
# self.unsc.update_notification(notification=notification_request_2, user_id=user_id)
#
# #--------------------------------------------------------------------------------------
# # Check that the correct events were published
# #--------------------------------------------------------------------------------------
#
# received_event_1 = queue.get(timeout=10)
# received_event_2 = queue.get(timeout=10)
#
# notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
#
# self.assertEquals(notifications, notifications_received)
#--------------------------------------------------------------------------------------
# Delete notification
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id_1)
self.unsc.delete_notification(notification_id_2)
notifications = set([notification_id_1, notification_id_2])
#--------------------------------------------------------------------------------------
# Check that the correct events were published
#--------------------------------------------------------------------------------------
received_event_1 = queue.get(timeout=10)
received_event_2 = queue.get(timeout=10)
notifications_received = set([received_event_1.notification_id, received_event_2.notification_id])
self.assertEquals(notifications, notifications_received)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_notification_preferences(self):
#--------------------------------------------------------------------------------------
# Make a notification request object
#--------------------------------------------------------------------------------------
notification_request = NotificationRequest( name= 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create user 1
#--------------------------------------------------------------------------------------
notification_preferences_1 = NotificationPreferences()
notification_preferences_1.delivery_mode = NotificationDeliveryModeEnum.REALTIME
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@yahoo.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_1})
user_id_1, _ = self.rrc.create(user_1)
#--------------------------------------------------------------------------------------
# user 2
#--------------------------------------------------------------------------------------
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = False
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@yahoo.com'
user_2.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Create notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request, user_id=user_id_1)
notification_id_2 = self.unsc.create_notification(notification=notification_request, user_id=user_id_2)
notifications = set([notification_id_1, notification_id_2])
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------------------------------------------------------
# check user_info dictionary to see that the notification preferences are properly loaded to the user info dictionaries
#--------------------------------------------------------------------------------------------------------------------------------------
self.assertEquals(proc1.user_info[user_id_1]['notification_preferences'], notification_preferences_1)
self.assertEquals(proc1.user_info[user_id_2]['notification_preferences'], notification_preferences_2)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_user_info_UNS(self):
# Test that the user info dictionary maintained by the notification workers get updated when
# a notification is created, updated, or deleted by UNS
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = 'notification_1',
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent',
event_subtype = 'subtype_1')
notification_request_2 = NotificationRequest( name = 'notification_2',
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent',
event_subtype = 'subtype_2')
#--------------------------------------------------------------------------------------
# Create users and make user_ids
#--------------------------------------------------------------------------------------
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Create a notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
#--------------------------------------------------------------------------------------
# Check the user_info and reverse_user_info got reloaded
#--------------------------------------------------------------------------------------
# Check in UNS ------------>
# read back the registered notification request objects
notification_request_correct = self.rrc.read(notification_id_1)
notification_request_2 = self.rrc.read(notification_id_2)
# check user_info dictionary
self.assertEquals(proc1.user_info[user_id_1]['user_contact'].email, 'user_1@gmail.com' )
self.assertEquals(proc1.user_info[user_id_1]['notifications'], [notification_request_correct])
self.assertEquals(proc1.user_info[user_id_2]['user_contact'].email, 'user_2@gmail.com' )
self.assertEquals(proc1.user_info[user_id_2]['notifications'], [notification_request_2])
self.assertEquals(proc1.reverse_user_info['event_origin']['instrument_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_origin']['instrument_2'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_type']['DetectionEvent'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_subtype']['subtype_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_subtype']['subtype_2'], [user_id_2])
self.assertEquals(proc1.reverse_user_info['event_origin_type']['type_1'], [user_id_1])
self.assertEquals(proc1.reverse_user_info['event_origin_type']['type_2'], [user_id_2])
log.debug("The event processor received the notification topics after a create_notification() for two users")
log.debug("Verified that the event processor correctly updated its user info dictionaries")
#--------------------------------------------------------------------------------------
# Create another notification for the first user
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
proc1 = self.container.proc_manager.procs_by_name['user_notification']
def found_user_info_dicts(proc1,*args, **kwargs):
reloaded_user_info = proc1.user_info
reloaded_reverse_user_info = proc1.reverse_user_info
notifications = proc1.user_info[user_id_1]['notifications']
origins = []
event_types = []
log.debug("Within the poll, got notifications here :%s", notifications)
if notifications:
for notific in notifications:
self.assertTrue(notific._id != '')
origins.append(notific.origin)
event_types.append(notific.event_type)
if set(origins) == set(['instrument_1', 'instrument_2']) and set(event_types) == set(['ResourceLifecycleEvent', 'DetectionEvent']):
return reloaded_user_info, reloaded_reverse_user_info
else:
return None
reloaded_user_info, reloaded_reverse_user_info= self.poll(9, found_user_info_dicts, proc1)
# Check in UNS ------------>
self.assertEquals(reloaded_user_info[user_id_1]['user_contact'].email, 'user_1@gmail.com' )
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_origin']['instrument_2']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_type']['DetectionEvent']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_subtype']['subtype_2']), set([user_id_2, user_id_1]))
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id_1])
self.assertEquals(set(reloaded_reverse_user_info['event_origin_type']['type_2']), set([user_id_2, user_id_1]))
log.debug("The event processor received the notification topics after another create_notification() for the first user")
log.debug("Verified that the event processor correctly updated its user info dictionaries")
# #--------------------------------------------------------------------------------------
# # Update notification and check that the user_info and reverse_user_info in UNS got reloaded
# #--------------------------------------------------------------------------------------
#
# notification_request_correct.origin = "newly_changed_instrument"
#
# self.unsc.update_notification(notification=notification_request_correct, user_id=user_id_1)
#
# # Check for UNS ------->
#
# # user_info
# notification_request_correct = self.rrc.read(notification_id_1)
#
# # check that the updated notification is in the user info dictionary
# self.assertTrue(notification_request_correct in proc1.user_info[user_id_1]['notifications'] )
#
# # check that the notifications in the user info dictionary got updated
# update_worked = False
# for notification in proc1.user_info[user_id_1]['notifications']:
# if notification.origin == "newly_changed_instrument":
# update_worked = True
# break
#
# self.assertTrue(update_worked)
#
# # reverse_user_info
# self.assertTrue(user_id_1 in proc1.reverse_user_info['event_origin']["newly_changed_instrument"])
#
# log.debug("Verified that the event processor correctly updated its user info dictionaries after an update_notification()")
#--------------------------------------------------------------------------------------------------------------------------------------
# Delete notification and check. Whether the user_info and reverse_user_info in UNS got reloaded is done in test_get_subscriptions()
#--------------------------------------------------------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id_2)
notific = self.rrc.read(notification_id_2)
# This checks that the notification has been retired.
self.assertNotEquals(notific.temporal_bounds.end_datetime, '')
log.debug("REQ: L4-CI-DM-RQ-56 was satisfied here for UNS")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_user_info_notification_worker(self):
# Test the user_info and reverse user info dictionary capability of the notification worker
#--------------------------------------------------------------------------------------
# Create a user subscribed to REALTIME notifications
#--------------------------------------------------------------------------------------
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = True
user = UserInfo()
user.name = 'new_user'
user.contact.email = 'new_user@gmail.com'
user.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
#--------------------------------------------------------------------------------------
# Create a user subscribed to BATCH notifications
#--------------------------------------------------------------------------------------
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = True
user_batch = UserInfo()
user_batch.name = 'user_batch'
user_batch.contact.email = 'user_batch@gmail.com'
user_batch.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
#--------------------------------------------------------------------------------------
# Create a user subscribed to REALTIME notifications but with delivery turned OFF
#--------------------------------------------------------------------------------------
notification_preferences_3 = NotificationPreferences()
notification_preferences_3.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences_3.delivery_enabled = False
user_disabled = UserInfo()
user_disabled.name = 'user_disabled'
user_disabled.contact.email = 'user_disabled@gmail.com'
user_disabled.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_3})
# this part of code is in the beginning to allow enough time for users_index creation
user_id, _ = self.rrc.create(user)
user_batch_id, _ = self.rrc.create(user_batch)
user_disabled_id, _ = self.rrc.create(user_disabled)
# confirm that users_index got created by discovery
search_string = 'search "name" is "*" from "users_index"'
results = self.poll(9, self.discovery.parse,search_string)
self.assertIsNotNone(results, 'Results not found')
#--------------------------------------------------------------------------------------
# Create notification workers
#--------------------------------------------------------------------------------------
pids = self.unsc.create_worker(number_of_workers=1)
self.assertIsNotNone(pids, 'No workers were created')
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent',
event_subtype = 'subtype_1')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent',
event_subtype = 'subtype_2')
#--------------------------------------------------------------------------------------
# Create a notification
#--------------------------------------------------------------------------------------
notification_id_1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id_batch = self.unsc.create_notification(notification=notification_request_correct, user_id=user_batch_id)
notification_id_disabled = self.unsc.create_notification(notification=notification_request_correct, user_id=user_disabled_id)
#--------------------------------------------------------------------------------------
# Check the user_info and reverse_user_info got reloaded
#--------------------------------------------------------------------------------------
processes =self.container.proc_manager.procs
def found_user_info_dicts(processes, qsize,*args, **kwargs):
for key in processes:
if key.startswith('notification_worker'):
proc1 = processes[key]
queue = proc1.q
if queue.qsize() >= qsize:
log.debug("the name of the process: %s" % key)
reloaded_user_info, reloaded_reverse_user_info = queue.get(timeout=10)
proc1.q.queue.clear()
return reloaded_user_info, reloaded_reverse_user_info
reloaded_user_info, reloaded_reverse_user_info= self.poll(20, found_user_info_dicts, processes, 3)
notification_id_2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
self.assertIsNotNone(reloaded_user_info)
self.assertIsNotNone(reloaded_reverse_user_info)
# read back the registered notification request objects
notification_request_correct = self.rrc.read(notification_id_1)
self.assertEquals(reloaded_user_info[user_id]['notifications'], [notification_request_correct] )
self.assertEquals(reloaded_user_info[user_id]['notification_preferences'].delivery_mode, notification_preferences.delivery_mode )
self.assertEquals(reloaded_user_info[user_id]['notification_preferences'].delivery_enabled, notification_preferences.delivery_enabled )
self.assertEquals(reloaded_user_info[user_batch_id]['notification_preferences'].delivery_mode, notification_preferences_2.delivery_mode )
self.assertEquals(reloaded_user_info[user_batch_id]['notification_preferences'].delivery_enabled, notification_preferences_2.delivery_enabled )
self.assertEquals(reloaded_user_info[user_disabled_id]['notification_preferences'].delivery_mode, notification_preferences_3.delivery_mode )
self.assertEquals(reloaded_user_info[user_disabled_id]['notification_preferences'].delivery_enabled, notification_preferences_3.delivery_enabled )
self.assertEquals(reloaded_user_info[user_id]['user_contact'].email, 'new_user@gmail.com')
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id] )
log.debug("Verified that the notification worker correctly updated its user info dictionaries after a create_notification()")
#--------------------------------------------------------------------------------------
# Create another notification
#--------------------------------------------------------------------------------------
reloaded_user_info, reloaded_reverse_user_info= self.poll(20, found_user_info_dicts, processes, 1)
notification_request_2 = self.rrc.read(notification_id_2)
#--------------------------------------------------------------------------------------------------------------------------
# Check that the two notifications created for the same user got properly reloaded in the user_info dictionaries of the workers
#--------------------------------------------------------------------------------------------------------------------------
notifications = reloaded_user_info[user_id]['notifications']
origins = []
event_types = []
for notific in notifications:
origins.append(notific.origin)
event_types.append(notific.event_type)
shouldbe_origins = []
shouldbe_event_types = []
for notific in [notification_request_correct, notification_request_2]:
shouldbe_origins.append(notific.origin)
shouldbe_event_types.append(notific.event_type)
self.assertEquals(set(origins), set(shouldbe_origins))
self.assertEquals(set(event_types), set(shouldbe_event_types))
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin']['instrument_2'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_subtype']['subtype_2'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['ResourceLifecycleEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_type']['DetectionEvent'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_1'], [user_id] )
self.assertEquals(reloaded_reverse_user_info['event_origin_type']['type_2'], [user_id] )
log.debug("Verified that the notification worker correctly updated its user info dictionaries after another create_notification()")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_process_batch(self):
# Test that the process_batch() method works
test_start_time = get_ion_ts() # Note this time is in milliseconds
test_end_time = str(int(get_ion_ts()) + 10000) # Adding 10 seconds
#--------------------------------------------------------------------------------------
# Publish events corresponding to the notification requests just made
# These events will get stored in the event repository allowing UNS to batch process
# them later for batch notifications
#--------------------------------------------------------------------------------------
event_publisher = EventPublisher()
# this part of code is in the beginning to allow enough time for the events_index creation
for i in xrange(10):
event_publisher.publish_event(
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
event_publisher.publish_event(
origin="instrument_3",
origin_type="type_3",
event_type='ResourceLifecycleEvent')
#----------------------------------------------------------------------------------------
# Create users and get the user_ids
#----------------------------------------------------------------------------------------
# user_1 -- default notification preferences
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
# user_2 --- prefers BATCH notification
notification_preferences_2 = NotificationPreferences()
notification_preferences_2.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_2.delivery_enabled = True
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_2.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_2})
# user_3 --- delivery enabled at default
notification_preferences_3 = NotificationPreferences()
notification_preferences_3.delivery_mode = NotificationDeliveryModeEnum.BATCH
user_3 = UserInfo()
user_3.name = 'user_3'
user_3.contact.email = 'user_3@gmail.com'
user_3.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_3})
# user_4 --- prefers REALTIME notification
notification_preferences_4 = NotificationPreferences()
notification_preferences_4.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences_4.delivery_enabled = True
user_4 = UserInfo()
user_4.name = 'user_4'
user_4.contact.email = 'user_4@gmail.com'
user_4.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_4})
# user_5 --- delivery disabled
notification_preferences_5 = NotificationPreferences()
notification_preferences_5.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_5.delivery_enabled = False
user_5 = UserInfo()
user_5.name = 'user_5'
user_5.contact.email = 'user_5@gmail.com'
user_5.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_5})
# this part of code is in the beginning to allow enough time for the users_index creation
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
user_id_3, _ = self.rrc.create(user_3)
user_id_4, _ = self.rrc.create(user_4)
user_id_5, _ = self.rrc.create(user_5)
#--------------------------------------------------------------------------------------
# Grab the UNS process
#--------------------------------------------------------------------------------------
proc1 = self.container.proc_manager.procs_by_name['user_notification']
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
notification_request_3 = NotificationRequest( name = "notification_3",
origin="instrument_3",
origin_type="type_3",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create a notification using UNS. This should cause the user_info to be updated
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_3)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_3)
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_4)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_4)
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_5)
self.unsc.create_notification(notification=notification_request_3, user_id=user_id_5)
#--------------------------------------------------------------------------------------
# Do a process_batch() in order to start the batch notifications machinery
#--------------------------------------------------------------------------------------
self.unsc.process_batch(start_time=test_start_time, end_time= test_end_time)
#--------------------------------------------------------------------------------------
# Check that the emails were sent to the users. This is done using the fake smtp client
# Make assertions....
#--------------------------------------------------------------------------------------
self.assertFalse(proc1.smtp_client.sent_mail.empty())
email_list = []
while not proc1.smtp_client.sent_mail.empty():
email_tuple = proc1.smtp_client.sent_mail.get(timeout=10)
email_list.append(email_tuple)
self.assertEquals(len(email_list), 2)
for email_tuple in email_list:
msg_sender, msg_recipient, msg = email_tuple
self.assertEquals(msg_sender, CFG.get_safe('server.smtp.sender') )
self.assertTrue(msg_recipient in ['user_1@gmail.com', 'user_2@gmail.com', 'user_3@gmail.com'])
lines = msg.split("\r\n")
maps = []
for line in lines:
maps.extend(line.split(','))
event_time = ''
for map in maps:
fields = map.split(":")
if fields[0].find("Time of event") > -1:
event_time = fields[1].strip(" ")
break
self.assertIsNotNone(event_time)
# # Check that the events sent in the email had times within the user specified range
# self.assertTrue(event_time >= test_start_time)
# self.assertTrue(event_time <= test_end_time)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_worker_send_email(self):
# Test that the workers process the notification event and send email using the
# fake smtp client
#-------------------------------------------------------
# Create users and get the user_ids
#-------------------------------------------------------
# user_1
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = True
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
# user_2
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
# user_3
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.REALTIME
notification_preferences.delivery_enabled = False
user_3 = UserInfo()
user_3.name = 'user_3'
user_3.contact.email = 'user_3@gmail.com'
user_3.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
# user_4
notification_preferences = NotificationPreferences()
notification_preferences.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences.delivery_enabled = True
user_4 = UserInfo()
user_4.name = 'user_4'
user_4.contact.email = 'user_4@gmail.com'
user_4.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences})
user_id_1, _ = self.rrc.create(user_1)
user_id_2, _ = self.rrc.create(user_2)
user_id_3, _ = self.rrc.create(user_3)
user_id_4, _ = self.rrc.create(user_4)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
event_type='ResourceLifecycleEvent',
)
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
event_type='DeviceStatusEvent',
)
notification_request_3 = NotificationRequest( name = "notification_3",
origin="instrument_3",
event_type='DeviceCommsEvent',
)
#--------------------------------------------------------------------------------------
# Create notification workers
#--------------------------------------------------------------------------------------
'''
Since notification workers are being created in bootstrap, we dont need to generate any here
'''
# pids = self.unsc.create_worker(number_of_workers=1)
# self.assertEquals(len(pids), 1)
#--------------------------------------------------------------------------------------
# Get the list of notification worker processes existing in the container
# This will enable us to get the fake smtp client objects they are using,
# which in turn will allow us to check what the notification emails they are sending
#--------------------------------------------------------------------------------------
procs = []
for process_name in self.container.proc_manager.procs.iterkeys():
# if the process is a notification worker process, add its pid to the list of pids
if process_name.find("notification_worker") != -1:
proc = self.container.proc_manager.procs[process_name]
log.debug("Got the following notification worker process with name: %s, process: %s" % (process_name, proc))
procs.append(proc)
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
q = gevent.queue.Queue()
id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_1)
q.put(id1)
id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
q.put(id2)
id3 = self.unsc.create_notification(notification=notification_request_3, user_id=user_id_2)
q.put(id3)
id4 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_3)
q.put(id4)
id5 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_4)
q.put(id5)
# Wait till all the notifications have been created....
for i in xrange(5):
q.get(timeout = 10)
#--------------------------------------------------------------------------------------
# Publish events
#--------------------------------------------------------------------------------------
event_publisher = EventPublisher()
event_publisher.publish_event(
event_type = "ResourceLifecycleEvent",
origin="instrument_1")
event_publisher.publish_event(
event_type = "DeviceStatusEvent",
origin="instrument_2",
time_stamps = [get_ion_ts(), str(int(get_ion_ts()) + 60*20*1000)])
event_publisher.publish_event(
event_type = "DeviceCommsEvent",
origin="instrument_3",
time_stamp = get_ion_ts())
#--------------------------------------------------------------------------------------
# Check that the workers processed the events
#--------------------------------------------------------------------------------------
worker_that_sent_email = None
for proc in procs:
if not proc.smtp_client.sent_mail.empty():
worker_that_sent_email = proc
break
email_tuples = []
while not worker_that_sent_email.smtp_client.sent_mail.empty():
email_tuple = worker_that_sent_email.smtp_client.sent_mail.get(timeout=20)
email_tuples.append(email_tuple)
log.debug("size of sent_mail queue: %s" % worker_that_sent_email.smtp_client.sent_mail.qsize())
log.debug("email tuple::: %s" % str(email_tuple))
for email_tuple in email_tuples:
# Parse the email sent and check and make assertions about email body. Make assertions about the sender and recipient
msg_sender, msg_recipient, msg = email_tuple
self.assertEquals(msg_sender, CFG.get_safe('server.smtp.sender') )
self.assertTrue(msg_recipient in ['user_1@gmail.com', 'user_2@gmail.com'])
# The below users did not want real time notifications or disabled delivery
self.assertTrue(msg_recipient not in ['user_3@gmail.com', 'user_4@gmail.com'])
maps = msg.split(",")
event_type = ''
for map in maps:
fields = map.split(":")
log.debug("fields::: %s" % fields)
if fields[0].find("type_") > -1:
event_type = fields[1].strip(" ").strip("'")
break
# if fields[0].find("Time stamp") > -1:
# event_time = int(fields[1].strip(" "))
# break
if msg_recipient == 'user_1@gmail.com':
self.assertTrue(event_type in ['ResourceLifecycleEvent', 'DeviceStatusEvent'])
elif msg_recipient == 'user_2@gmail.com':
self.assertTrue(event_type in ['DeviceCommsEvent', 'DeviceStatusEvent'])
else:
self.fail('Got email sent to msg recipient who did not set a correct notification preference.')
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_create_read_user_notifications(self):
# Test the create and read notification methods
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
user_2 = UserInfo()
user_2.name = 'user_2'
user_2.contact.email = 'user_2@gmail.com'
user_id_2, _ = self.rrc.create(user_2)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
# a notification is created for user 1
notification_id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
#--------------------------------------------------------------------------------------
# Check the resource registry
#--------------------------------------------------------------------------------------
n1 = self.unsc.read_notification(notification_id1)
self.assertEquals(n1.event_type, notification_request_1.event_type)
self.assertEquals(n1.origin, notification_request_1.origin)
self.assertEquals(n1.origin_type, notification_request_1.origin_type)
# Check the user notification service process
proc = self.container.proc_manager.procs_by_name['user_notification']
self.assertEquals(len(proc.notifications.values()), 2)
# Check the user info dictionary of the UNS process
user_info = proc.user_info
notifications_held = user_info[user_id]['notifications']
self.assertEquals(len(notifications_held), 2)
def _compare_notifications(notifications):
log.debug("notification insider here:: %s", notifications)
for notif in notifications:
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
else:
self.assertEquals(notif.event_type, notification_request_2.event_type)
self.assertEquals(notif.origin, notification_request_2.origin)
self.assertEquals(notif.origin_type, notification_request_2.origin_type)
self.assertEquals(notif._id, notification_id2)
_compare_notifications(notifications_held)
#--------------------------------------------------------------------------------------
# Create the same notification request again using UNS. Check that no duplicate notification request is made
#--------------------------------------------------------------------------------------
notification_again_id = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_again = self.rrc.read(notification_again_id)
# Check the resource registry to see that the common notification request is being used
self.assertEquals(notification_again.event_type, notification_request_1.event_type)
self.assertEquals(notification_again.origin, notification_request_1.origin)
self.assertEquals(notification_again.origin_type, notification_request_1.origin_type)
# assert that the old id is unchanged
self.assertEquals(notification_again_id, notification_id1)
# Check the user info object
user = self.rrc.read(user_id)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 2)
_compare_notifications(notifs_of_user)
#--------------------------------------------------------------------------------------
# now have the other user subscribe to the same notification request
#--------------------------------------------------------------------------------------
notification_id_user_2 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_2)
##########-------------------------------------------------------------------------------------------------------
# Now check if subscriptions of user 1 are getting overwritten because user 2 subscribed to the same notification
##########-------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
# Check the resource registry
#--------------------------------------------------------------------------------------
n2 = self.unsc.read_notification(notification_id_user_2)
self.assertEquals(n2.event_type, notification_request_1.event_type)
self.assertEquals(n2.origin, notification_request_1.origin)
self.assertEquals(n2.origin_type, notification_request_1.origin_type)
self.assertEquals(len(proc.notifications.values()), 2)
#--------------------------------------------------------------------------------------
# Check the user info dictionary of the UNS process
#--------------------------------------------------------------------------------------
user_info = proc.user_info
# For the first user, his subscriptions should be unchanged
notifications_held_1 = user_info[user_id]['notifications']
self.assertEquals(len(notifications_held_1), 2)
_compare_notifications(notifications_held_1)
# For the second user, he should have got a new subscription
notifications_held_2 = user_info[user_id_2]['notifications']
self.assertEquals(len(notifications_held_2), 1)
notif = notifications_held_2[0]
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
#--------------------------------------------------------------------------------------
# Check the user info objects
#--------------------------------------------------------------------------------------
# Check the first user's info object
user = self.rrc.read(user_id)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 2)
_compare_notifications(notifs_of_user)
# Check the second user's info object
user = self.rrc.read(user_id_2)
notifs_of_user = [item['value'] for item in user.variables if item['name']=='notifications'][0]
self.assertTrue(len(notifs_of_user), 1)
notif = notifs_of_user[0]
self.assertTrue(notif._id==notification_id1 or notif._id==notification_id2)
if notif._id==notification_id1:
self.assertEquals(notif.event_type, notification_request_1.event_type)
self.assertEquals(notif.origin, notification_request_1.origin)
self.assertEquals(notif.origin_type, notification_request_1.origin_type)
self.assertEquals(notif._id, notification_id1)
#--------------------------------------------------------------------------------------
# Check the associations... check that user 1 is associated with the same two notifications as before
# and that user 2 is associated with one notification
#--------------------------------------------------------------------------------------
not_ids, _ = self.rrc.find_objects(subject=user_id,
predicate=PRED.hasNotification,
id_only=True)
log.debug("not_ids::: %s", not_ids)
self.assertEquals(set(not_ids), set([notification_id1,notification_id2]))
not_ids_2, _ = self.rrc.find_objects(subject=user_id_2,
predicate=PRED.hasNotification,
id_only=True)
log.debug("not_ids_2::: %s", not_ids_2)
self.assertEquals(set(not_ids_2), set([notification_id1]))
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_delete_user_notifications(self):
# Test deleting a notification
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects - Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
notification_id1 = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
self.assertTrue(notif.origin in ["instrument_1", "instrument_2"])
self.assertTrue(notif.origin_type in ["type_1", "type_2"])
self.assertTrue(notif.event_type, ["ResourceLifecycleEvent", "DetectionEvent"])
#--------------------------------------------------------------------------------------
# Delete notification 2
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id2)
notific_2 = self.rrc.read(notification_id2)
# This checks that the notifications have been retired.
self.assertNotEquals(notific_2.temporal_bounds.end_datetime, '')
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
if notif.origin == "instrument_2":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
elif notif.origin == "instrument_1":
self.assertEquals(notif.temporal_bounds.end_datetime, '')
else:
self.fail("ACHTUNG: A completely different notification is being stored in the user info object")
#--------------------------------------------------------------------------------------
# Delete notification 1
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id1)
notific_1 = self.rrc.read(notification_id1)
self.assertNotEquals(notific_1.temporal_bounds.end_datetime, '')
# Now check the user info object has the notifications
user = self.rrc.read(user_id)
vars = user.variables
for var in vars:
if var['name'] == 'notifications':
self.assertEquals(len(var['value']), 2)
for notif in var['value']:
self.assertTrue(notif.name in ["notification_1", "notification_2"])
if notif.origin == "instrument_2":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
elif notif.origin == "instrument_1":
self.assertNotEquals(notif.temporal_bounds.end_datetime, '')
else:
self.fail("ACHTUNG: A completely different notification is being stored in the user info object")
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_update_user_notification(self):
# Test updating a user notification
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects
#--------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
# Make notification request objects - Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_id = self.unsc.create_notification(notification=notification_request_correct, user_id=user_id)
# read back the notification and change it
notification = self.unsc.read_notification(notification_id)
notification.origin_type = 'new_type'
# self.unsc.update_notification(notification, user_id)
#
# # read back the notification and check that it got changed
# notification = self.unsc.read_notification(notification_id)
#
# # Assert that the notification resource in the datastore does not get overwritten
# self.assertEquals(notification.origin_type, 'type_1')
# self.assertEquals(notification.event_type, 'ResourceLifecycleEvent')
# self.assertEquals(notification.origin, 'instrument_1')
#
# # Check that the UserInfo object is updated
#
# # Check that the user info dictionary is updated
@attr('LOCOINT')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_find_events(self):
# Test the find events functionality of UNS
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("ReloadUserInfoEvent")
min_datetime = get_ion_ts()
for i in xrange(10):
event_publisher_1.publish_event(origin='my_special_find_events_origin', ts_created = get_ion_ts())
event_publisher_2.publish_event(origin='another_origin', ts_created = get_ion_ts())
max_datetime = get_ion_ts()
def poller():
events = self.unsc.find_events(origin='my_special_find_events_origin', type = 'PlatformEvent', min_datetime= min_datetime, max_datetime=max_datetime)
log.debug("(UNS) got events: %s", events)
return len(events) >= 4
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_find_events_extended(self):
# Test the find events functionality of UNS
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("ReloadUserInfoEvent")
min_time = get_ion_ts()
for i in xrange(10):
event_publisher_1.publish_event(origin='Some_Resource_Agent_ID1', ts_created = get_ion_ts())
event_publisher_2.publish_event(origin='Some_Resource_Agent_ID2', ts_created = get_ion_ts())
max_time = get_ion_ts()
# allow elastic search to populate the indexes. This gives enough time for the reload of user_info
def poller():
events = self.unsc.find_events_extended(origin='Some_Resource_Agent_ID1', min_time=min_time, max_time=max_time)
return len(events) >= 4
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_create_several_workers(self):
# Create more than one worker. Test that they process events in round robin
pids = self.unsc.create_worker(number_of_workers=2)
self.assertEquals(len(pids), 2)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_publish_event_object(self):
# Test the publish_event_object() method of UNS
event_recvd_count = 0
#--------------------------------------------------------------------------------
# Create an event object
#--------------------------------------------------------------------------------
event_1 = DeviceEvent( origin= "origin_1",
origin_type='origin_type_1',
sub_type= 'sub_type_1')
event_with_ts_created = event_1
event_with_ts_created.ts_created = get_ion_ts()
# create async result to wait on in test
ar = gevent.event.AsyncResult()
#--------------------------------------------------------------------------------
# Set up a subscriber to listen for that event
#--------------------------------------------------------------------------------
def received_event(result, event_recvd_count, event, headers):
log.debug("received the event in the test: %s" % event)
#--------------------------------------------------------------------------------
# check that the event was published
#--------------------------------------------------------------------------------
self.assertEquals(event.origin, "origin_1")
self.assertEquals(event.type_, 'DeviceEvent')
self.assertEquals(event.origin_type, 'origin_type_1')
self.assertNotEquals(event.ts_created, '')
self.assertEquals(event.sub_type, 'sub_type_1')
event_recvd_count += 1
if event_recvd_count == 2:
result.set(True)
event_subscriber = EventSubscriber( event_type = 'DeviceEvent',
origin="origin_1",
callback=lambda m, h: received_event(ar, event_recvd_count, m, h))
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
#--------------------------------------------------------------------------------
# Use the UNS publish_event
#--------------------------------------------------------------------------------
self.unsc.publish_event_object(event=event_1)
self.unsc.publish_event_object(event=event_with_ts_created)
ar.wait(timeout=10)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_publish_event(self):
# Test the publish_event() method of UNS
type = "PlatformTelemetryEvent"
origin= "origin_1"
origin_type='origin_type_1'
sub_type= 'sub_type_1'
event_attrs = {'status': 'OK'}
# create async result to wait on in test
ar = gevent.event.AsyncResult()
#--------------------------------------------------------------------------------
# Set up a subscriber to listen for that event
#--------------------------------------------------------------------------------
def received_event(result, event, headers):
log.debug("received the event in the test: %s" % event)
#--------------------------------------------------------------------------------
# check that the event was published
#--------------------------------------------------------------------------------
self.assertEquals(event.origin, "origin_1")
self.assertEquals(event.type_, 'PlatformTelemetryEvent')
self.assertEquals(event.origin_type, 'origin_type_1')
self.assertNotEquals(event.ts_created, '')
self.assertEquals(event.sub_type, 'sub_type_1')
result.set(True)
event_subscriber = EventSubscriber( event_type = 'PlatformTelemetryEvent',
origin="origin_1",
callback=lambda m, h: received_event(ar, m, h))
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
#--------------------------------------------------------------------------------
# Use the UNS publish_event
#--------------------------------------------------------------------------------
self.unsc.publish_event(
event_type=type,
origin=origin,
origin_type=origin_type,
sub_type=sub_type,
description="a description",
event_attrs = event_attrs
)
ar.wait(timeout=10)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_batch_notifications(self):
# Test how the UNS listens to timer events and through the call back runs the process_batch()
# with the correct arguments.
#--------------------------------------------------------------------------------------------
# The operator sets up the process_batch_key. The UNS will listen for scheduler created
# timer events with origin = process_batch_key
#--------------------------------------------------------------------------------------------
# generate a uuid
newkey = 'batch_processing_' + str(uuid.uuid4())
self.unsc.set_process_batch_key(process_batch_key = newkey)
#--------------------------------------------------------------------------------
# Publish the events that the user will later be notified about
#--------------------------------------------------------------------------------
event_publisher = EventPublisher()
# this part of code is in the beginning to allow enough time for the events_index creation
times_of_events_published = Set()
def publish_events():
for i in xrange(3):
t = get_ion_ts()
event_publisher.publish_event( ts_created= t ,
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
event_publisher.publish_event( ts_created= t ,
origin="instrument_2",
origin_type="type_2",
event_type='ResourceLifecycleEvent')
times_of_events_published.add(t)
self.number_event_published += 2
self.event.set()
# time.sleep(1)
log.debug("Published events of origins = instrument_1, instrument_2 with ts_created: %s" % t)
publish_events()
self.assertTrue(self.event.wait(10))
#----------------------------------------------------------------------------------------
# Create users and get the user_ids
#----------------------------------------------------------------------------------------
# user_1
notification_preferences_1 = NotificationPreferences()
notification_preferences_1.delivery_mode = NotificationDeliveryModeEnum.BATCH
notification_preferences_1.delivery_enabled = True
user_1 = UserInfo()
user_1.name = 'user_1'
user_1.contact.email = 'user_1@gmail.com'
user_1.variables.append({'name' : 'notification_preferences', 'value' : notification_preferences_1})
# this part of code is in the beginning to allow enough time for the users_index creation
user_id_1, _ = self.rrc.create(user_1)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_correct = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Create a notification using UNS. This should cause the user_info to be updated
#--------------------------------------------------------------------------------------
self.unsc.create_notification(notification=notification_request_correct, user_id=user_id_1)
self.unsc.create_notification(notification=notification_request_2, user_id=user_id_1)
#--------------------------------------------------------------------------------
# Set up the scheduler to publish daily events that should kick off process_batch()
#--------------------------------------------------------------------------------
# Set up a time for the scheduler to trigger timer events
# Trigger the timer event 15 seconds later from now
time_now = datetime.utcnow() + timedelta(seconds=15)
times_of_day =[{'hour': str(time_now.hour),'minute' : str(time_now.minute), 'second':str(time_now.second) }]
sid = self.ssclient.create_time_of_day_timer( times_of_day=times_of_day,
expires=time.time()+25200+60,
event_origin= newkey,
event_subtype="")
log.debug("created the timer id: %s", sid)
def cleanup_timer(scheduler, schedule_id):
"""
Do a friendly cancel of the scheduled event.
If it fails, it's ok.
"""
try:
scheduler.cancel_timer(schedule_id)
except:
log.warn("Couldn't cancel")
self.addCleanup(cleanup_timer, self.ssclient, sid)
#--------------------------------------------------------------------------------
# Assert that emails were sent
#--------------------------------------------------------------------------------
proc = self.container.proc_manager.procs_by_name['user_notification']
ar_1 = gevent.event.AsyncResult()
ar_2 = gevent.event.AsyncResult()
def send_email(events_for_message, user_id, *args, **kwargs):
log.warning("(in asyncresult) events_for_message: %s" % events_for_message)
ar_1.set(events_for_message)
ar_2.set(user_id)
proc.format_and_send_email = send_email
events_for_message = ar_1.get(timeout=20)
user_id = ar_2.get(timeout=20)
log.warning("user_id: %s" % user_id)
origins_of_events = Set()
times = Set()
for event in events_for_message:
origins_of_events.add(event.origin)
times.add(event.ts_created)
#--------------------------------------------------------------------------------
# Make assertions on the events mentioned in the formatted email
#--------------------------------------------------------------------------------
self.assertEquals(len(events_for_message), self.number_event_published)
self.assertEquals(times, times_of_events_published)
self.assertEquals(origins_of_events, Set(['instrument_1', 'instrument_2']))
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_user_notifications(self):
# Test that the get_user_notifications() method returns the notifications for a user
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
user = UserInfo()
user.name = 'user_1'
user.contact.email = 'user_1@gmail.com'
user_id, _ = self.rrc.create(user)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_request_1 = NotificationRequest( name = "notification_1",
origin="instrument_1",
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_request_2 = NotificationRequest( name = "notification_2",
origin="instrument_2",
origin_type="type_2",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
notification_id1 = self.unsc.create_notification(notification=notification_request_1, user_id=user_id)
notification_id2 = self.unsc.create_notification(notification=notification_request_2, user_id=user_id)
#--------------------------------------------------------------------------------------
# Get the notifications for the user
#--------------------------------------------------------------------------------------
notifications= self.unsc.get_user_notifications(user_id)
self.assertEquals(len(notifications),2)
names = []
origins = []
origin_types = []
event_types = []
for notification in notifications:
names.append(notification.name)
origins.append(notification.origin)
origin_types.append(notification.origin_type)
event_types.append(notification.event_type)
self.assertEquals(Set(names), Set(['notification_1', 'notification_2']) )
self.assertEquals(Set(origins), Set(['instrument_1', 'instrument_2']) )
self.assertEquals(Set(origin_types), Set(['type_1', 'type_2']) )
self.assertEquals(Set(event_types), Set(['ResourceLifecycleEvent', 'DetectionEvent']) )
#--------------------------------------------------------------------------------------
# Now delete a notification and verify that it wont get picked up by get_user_notifications()
#--------------------------------------------------------------------------------------
self.unsc.delete_notification(notification_id=notification_id2)
# Get the notifications for the user
notifications = self.unsc.get_user_notifications(user_id)
self.assertEquals(len(notifications),1)
notification = notifications[0]
self.assertEquals(notification.name, 'notification_1' )
self.assertEquals(notification.origin, 'instrument_1' )
self.assertEquals(notification.origin_type, 'type_1')
self.assertEquals(notification.event_type, 'ResourceLifecycleEvent' )
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_recent_events(self):
# Test that the get_recent_events(resource_id, limit) method returns the events whose origin is
# the specified resource.
#--------------------------------------------------------------------------------------
# create user with email address in RR
#--------------------------------------------------------------------------------------
# publish some events for the event repository
event_publisher_1 = EventPublisher("PlatformEvent")
event_publisher_2 = EventPublisher("PlatformEvent")
def publish_events():
x = 0
for i in xrange(10):
t = get_ion_ts()
event_publisher_1.publish_event(origin='my_unique_test_recent_events_origin', ts_created = t)
event_publisher_2.publish_event(origin='Another_recent_events_origin', ts_created = t)
x += 1
self.event.set()
publish_events()
self.assertTrue(self.event.wait(10))
#--------------------------------------------------------------------------------------
# Test with specified limit
#--------------------------------------------------------------------------------------
def poller():
ret = self.unsc.get_recent_events(resource_id='my_unique_test_recent_events_origin', limit = 5)
events = ret.value
return len(events) >= 5
success = self.event_poll(poller, 10)
self.assertTrue(success)
#--------------------------------------------------------------------------------------
# Test without specified limit
#--------------------------------------------------------------------------------------
def poller():
ret = self.unsc.get_recent_events(resource_id='Another_recent_events_origin')
events = ret.value
return len(events) >= 10
success = self.event_poll(poller, 10)
self.assertTrue(success)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_subscriptions(self):
# Test that the get_subscriptions works correctly
#--------------------------------------------------------------------------------------
# Create users
#--------------------------------------------------------------------------------------
user_ids = []
for i in xrange(5):
user = UserInfo()
user.name = 'user_%s' % i
user.contact.email = 'user_%s@gmail.com' % i
user_id, _ = self.rrc.create(user)
user_ids.append(user_id)
#--------------------------------------------------------------------------------------
# Make a data product
#--------------------------------------------------------------------------------------
data_product_management = DataProductManagementServiceClient()
dataset_management = DatasetManagementServiceClient()
pubsub = PubsubManagementServiceClient()
pdict_id = dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
streamdef_id = pubsub.create_stream_definition(name="test_subscriptions", parameter_dictionary_id=pdict_id)
tdom, sdom = time_series_domain()
tdom, sdom = tdom.dump(), sdom.dump()
dp_obj = IonObject(RT.DataProduct,
name='DP1',
description='some new dp',
temporal_domain = tdom,
spatial_domain = sdom)
data_product_id = data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=streamdef_id)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
notification_active_1 = NotificationRequest( name = "notification_1",
origin=data_product_id,
origin_type="type_1",
event_type='ResourceLifecycleEvent')
notification_active_2 = NotificationRequest( name = "notification_2",
origin=data_product_id,
origin_type="type_2",
event_type='ResourceLifecycleEvent')
notification_active_3 = NotificationRequest( name = "notification_2",
origin='wrong_origin',
origin_type="type_2",
event_type='ResourceLifecycleEvent')
notification_past_1 = NotificationRequest( name = "notification_3_to_be_retired",
origin=data_product_id,
origin_type="type_3",
event_type='DetectionEvent')
notification_past_2 = NotificationRequest( name = "notification_4_to_be_retired",
origin=data_product_id,
origin_type="type_4",
event_type='DetectionEvent')
notification_past_3 = NotificationRequest( name = "notification_4_to_be_retired",
origin='wrong_origin_2',
origin_type="type_4",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
active_notification_ids = set()
past_notification_ids = set()
for user_id in user_ids:
notification_id_active_1 = self.unsc.create_notification(notification=notification_active_1, user_id=user_id)
notification_id_active_2 = self.unsc.create_notification(notification=notification_active_2, user_id=user_id)
notification_id_active_3 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id)
# Store the ids for the active notifications in a set
active_notification_ids.add(notification_id_active_1)
active_notification_ids.add(notification_id_active_2)
active_notification_ids.add(notification_id_active_3)
notification_id_past_1 = self.unsc.create_notification(notification=notification_past_1, user_id=user_id)
notification_id_past_2 = self.unsc.create_notification(notification=notification_past_2, user_id=user_id)
notification_id_past_3 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id)
# Store the ids for the retired-to-be notifications in a set
past_notification_ids.add(notification_id_past_1)
past_notification_ids.add(notification_id_past_2)
past_notification_ids.add(notification_id_past_3)
log.debug("Number of active notification ids: %s" % len(active_notification_ids))
log.debug("Number of past notification ids: %s" % len(past_notification_ids))
# Retire the retired-to-be notifications
for notific_id in past_notification_ids:
self.unsc.delete_notification(notification_id=notific_id)
#--------------------------------------------------------------------------------------
# Use UNS to get the subscriptions
#--------------------------------------------------------------------------------------
res_notifs= self.unsc.get_subscriptions(resource_id=data_product_id, include_nonactive=False)
log.debug("Result for subscriptions: %s" % res_notifs)
log.debug("Number of subscriptions returned: %s" % len(res_notifs))
self.assertEquals(len(res_notifs), 2)
for notific in res_notifs:
notific_in_db = self.rrc.read(notific._id)
self.assertTrue(notific_in_db)
self.assertEquals(notific.origin, data_product_id)
self.assertEquals(notific.temporal_bounds.end_datetime, '')
self.assertTrue(notific.origin_type == 'type_1' or notific.origin_type =='type_2')
self.assertEquals(notific.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertTrue(notific_in_db.origin_type == 'type_1' or notific_in_db.origin_type =='type_2')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Use UNS to get the all subscriptions --- including retired
#--------------------------------------------------------------------------------------
res_notifs = self.unsc.get_subscriptions(resource_id=data_product_id, include_nonactive=True)
for notific in res_notifs:
log.debug("notif.origin_type:: %s", notific.origin_type)
notific_in_db = self.rrc.read(notific._id)
self.assertTrue(notific_in_db)
self.assertEquals(notific.origin, data_product_id)
self.assertTrue(notific.origin_type in ['type_1', 'type_2', 'type_3', 'type_4'])
self.assertTrue(notific.event_type in ['ResourceLifecycleEvent', 'DetectionEvent'])
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertTrue(notific_in_db.origin_type in ['type_1', 'type_2', 'type_3', 'type_4'])
self.assertTrue(notific_in_db.event_type in ['ResourceLifecycleEvent', 'DetectionEvent'])
self.assertEquals(len(res_notifs), 4)
@attr('LOCOINT')
@unittest.skipIf(not use_es, 'No ElasticSearch')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_get_subscriptions_for_user(self):
# Test that the get_subscriptions works correctly
#--------------------------------------------------------------------------------------
# Create 2 users
#--------------------------------------------------------------------------------------
user_ids = []
for i in xrange(2):
user = UserInfo()
user.name = 'user_%s' % i
user.contact.email = 'user_%s@gmail.com' % i
user_id, _ = self.rrc.create(user)
user_ids.append(user_id)
#--------------------------------------------------------------------------------------
# Make a data product
#--------------------------------------------------------------------------------------
data_product_management = DataProductManagementServiceClient()
dataset_management = DatasetManagementServiceClient()
pubsub = PubsubManagementServiceClient()
pdict_id = dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
streamdef_id = pubsub.create_stream_definition(name="test_subscriptions", parameter_dictionary_id=pdict_id)
tdom, sdom = time_series_domain()
tdom, sdom = tdom.dump(), sdom.dump()
dp_obj = IonObject(RT.DataProduct,
name='DP1',
description='some new dp',
temporal_domain = tdom,
spatial_domain = sdom)
data_product_id = data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=streamdef_id)
#--------------------------------------------------------------------------------------
# Make notification request objects -- Remember to put names
#--------------------------------------------------------------------------------------
# ACTIVE
# user 1
notification_active_1 = NotificationRequest( name = "notification_1",
origin=data_product_id,
origin_type="active_1",
event_type='ResourceLifecycleEvent')
# user 2
notification_active_2 = NotificationRequest( name = "notification_2",
origin=data_product_id,
origin_type="active_2",
event_type='ResourceLifecycleEvent')
# wrong origin
notification_active_3 = NotificationRequest( name = "notification_2",
origin='wrong_origin',
origin_type="active_3",
event_type='ResourceLifecycleEvent')
# PAST
# user 1 - past
notification_past_1 = NotificationRequest( name = "notification_3_to_be_retired",
origin=data_product_id,
origin_type="past_1",
event_type='DetectionEvent')
# user 2 - past
notification_past_2 = NotificationRequest( name = "notification_4_to_be_retired",
origin=data_product_id,
origin_type="past_2",
event_type='DetectionEvent')
# wrong origin - past
notification_past_3 = NotificationRequest( name = "notification_4_to_be_retired",
origin='wrong_origin_2',
origin_type="past_3",
event_type='DetectionEvent')
#--------------------------------------------------------------------------------------
# Create notifications using UNS.
#--------------------------------------------------------------------------------------
active_notification_ids = set()
past_notification_ids = set()
user_id_1 = user_ids[0]
user_id_2 = user_ids[1]
#--------------------------------------------------------------------------------------
# Create notifications that will stay active for the following users
#--------------------------------------------------------------------------------------
# user 1
notification_id_active_1 = self.unsc.create_notification(notification=notification_active_1, user_id=user_id_1)
# and the notification below for the wrong origin
notification_id_active_31 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id_1)
#### Therefore, only one active notification for user_1 has been created so far
#user 2
notification_id_active_2 = self.unsc.create_notification(notification=notification_active_2, user_id=user_id_2)
# below we create notification for a different resource id
notification_id_active_32 = self.unsc.create_notification(notification=notification_active_3, user_id=user_id_2)
#### Therefore, only one active notification for user_2 created so far
# Store the ids for the active notifications in a set
active_notification_ids.add(notification_id_active_1)
active_notification_ids.add(notification_id_active_2)
active_notification_ids.add(notification_id_active_31)
active_notification_ids.add(notification_id_active_32)
#--------------------------------------------------------------------------------------
# Create notifications that will be RETIRED for the following users
#--------------------------------------------------------------------------------------
# user 1
notification_id_past_1 = self.unsc.create_notification(notification=notification_past_1, user_id=user_id_1)
# the one below for a different resource id
notification_id_past_31 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id_1)
# user 2
notification_id_past_2 = self.unsc.create_notification(notification=notification_past_2, user_id=user_id_2)
# the one below for a different resource id
notification_id_past_32 = self.unsc.create_notification(notification=notification_past_3, user_id=user_id_2)
# Store the ids for the retired-to-be notifications in a set
past_notification_ids.add(notification_id_past_1)
past_notification_ids.add(notification_id_past_2)
past_notification_ids.add(notification_id_past_31)
past_notification_ids.add(notification_id_past_32)
log.debug("Number of active notification ids: %s" % len(active_notification_ids)) # should be 3
log.debug("Number of past notification ids: %s" % len(past_notification_ids)) # should be 3
self.assertEquals(len(active_notification_ids), 3)
self.assertEquals(len(past_notification_ids), 3)
# Retire the retired-to-be notifications
for notific_id in past_notification_ids:
self.unsc.delete_notification(notification_id=notific_id)
# now we should be left wih 1 active and 1 past notification FOR THE RELEVANT RESOURCE ID AS ORIGIN for each user
#--------------------------------------------------------------------------------------
# Use UNS to get the subscriptions
#--------------------------------------------------------------------------------------
n_for_user_1 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_1, include_nonactive=False)
n_for_user_2 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_2, include_nonactive=False)
self.assertEquals(len(n_for_user_1), 1)
self.assertEquals(len(n_for_user_2), 1)
for notif in n_for_user_1:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertEquals(notif.temporal_bounds.end_datetime, '')
self.assertEquals(notif.origin_type, 'active_1')
self.assertEquals(notif.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertEquals(notific_in_db.origin_type, 'active_1')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
for notif in n_for_user_2:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertEquals(notif.temporal_bounds.end_datetime, '')
self.assertEquals(notif.origin_type, 'active_2')
self.assertEquals(notif.event_type, 'ResourceLifecycleEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertEquals(notific_in_db.temporal_bounds.end_datetime, '')
self.assertEquals(notific_in_db.origin_type, 'active_2')
self.assertEquals(notific_in_db.event_type, 'ResourceLifecycleEvent')
#--------------------------------------------------------------------------------------
# Use UNS to get the all subscriptions --- including retired
#--------------------------------------------------------------------------------------
notifs_for_user_1 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_1, include_nonactive=True)
notifs_for_user_2 = self.unsc.get_subscriptions(resource_id=data_product_id, user_id = user_id_2, include_nonactive=True)
log.debug("number of returned notif object: %s", len(notifs_for_user_1))
self.assertEquals(len(notifs_for_user_1), 2)
log.debug("number of returned notif object for user 2: %s", len(notifs_for_user_2))
self.assertEquals(len(notifs_for_user_2), 2)
for notif in notifs_for_user_1:
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertEquals(notif.origin, data_product_id)
self.assertTrue(notif.origin_type == 'active_1' or notif.origin_type == 'past_1')
self.assertTrue(notif.event_type== 'ResourceLifecycleEvent' or notif.event_type=='DetectionEvent')
self.assertEquals(notific_in_db.origin, data_product_id)
self.assertTrue(notific_in_db.origin_type == 'active_1' or notific_in_db.origin_type == 'past_1')
self.assertTrue(notific_in_db.event_type== 'ResourceLifecycleEvent' or notific_in_db.event_type=='DetectionEvent')
for notif in notifs_for_user_2:
self.assertEquals(notif.origin, data_product_id)
notific_in_db = self.rrc.read(notif._id)
self.assertTrue(notific_in_db)
self.assertTrue(notif.origin_type == 'active_2' or notif.origin_type == 'past_2')
self.assertTrue(notif.event_type== 'ResourceLifecycleEvent' or notif.event_type=='DetectionEvent')
self.assertTrue(notific_in_db.origin_type == 'active_2' or notific_in_db.origin_type == 'past_2')
self.assertTrue(notific_in_db.event_type== 'ResourceLifecycleEvent' or notific_in_db.event_type=='DetectionEvent')
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
from neo4j.exceptions import ConstraintError
except ImportError:
from neo4j.v1.api import CypherError as ConstraintError # Backwards compatability with version <1.2
from norduniclient.testing import Neo4jTestCase
from norduniclient import core
from norduniclient import exceptions
from norduniclient import models
__author__ = 'lundberg'
class CoreTests(Neo4jTestCase):
def setUp(self):
super(CoreTests, self).setUp()
core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='1')
core.create_node(self.neo4jdb, name='Test Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='2')
def test_create_and_get_node(self):
core.create_node(self.neo4jdb, name='Test Node 3', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
node = core.get_node(self.neo4jdb, handle_id='3')
self.assertEqual(node.get('handle_id'), '3')
def test_create_node_existing_node_handle(self):
self.assertRaises(ConstraintError, core.create_node, self.neo4jdb, name='Test Node 1',
meta_type_label='Logical', type_label='Test_Node', handle_id='1')
def test_create_node_bad_meta_type(self):
self.assertRaises(exceptions.MetaLabelNamingError, core.create_node, self.neo4jdb, name='Test Node 1',
meta_type_label='No_Such_Label', type_label='Test_Node', handle_id='1')
def test_get_node_bundle(self):
node_bundle = core.get_node_bundle(self.neo4jdb, handle_id='1')
self.assertIsInstance(node_bundle, dict)
node_data = node_bundle.get('data')
self.assertEqual(node_data.get('handle_id'), '1')
self.assertEqual(node_bundle.get('meta_type'), 'Logical')
self.assertIsInstance(node_bundle.get('labels'), list)
self.assertIn('Test_Node', node_bundle.get('labels'))
def test_failing_get_node_bundle(self):
self.assertRaises(exceptions.NodeNotFound, core.get_node_bundle, self.neo4jdb, handle_id='3')
def test_delete_node(self):
core.delete_node(self.neo4jdb, handle_id='1')
self.assertRaises(exceptions.NodeNotFound, core.get_node, self.neo4jdb, handle_id='1')
def test_create_and_get_relationship(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
self.assertIsInstance(relationship_id, int)
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship.id, relationship_id)
def test_failing_get_relationship(self):
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb, relationship_id=1)
def test_get_relationship_bundle(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
relationship_bundle = core.get_relationship_bundle(self.neo4jdb, relationship_id=relationship_id)
self.assertIsInstance(relationship_bundle, dict)
relationship = relationship_bundle.get('data')
self.assertIsNotNone(relationship)
self.assertEqual(relationship_bundle.get('id'), relationship_id)
self.assertEqual(relationship_bundle.get('start')['handle_id'], '1')
self.assertEqual(relationship_bundle.get('end')['handle_id'], '2')
self.assertEqual(relationship_bundle.get('type'), 'Tests')
def test_failing_get_relationship_bundle(self):
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship_bundle, self.neo4jdb,
relationship_id=1)
def test_delete_relationship(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship.id, relationship_id)
core.delete_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb,
relationship_id=relationship_id)
def test_create_location_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location',
type_label='Test_Node', handle_id='4')
relationship_id = core.create_location_relationship(self.neo4jdb, location_handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
def test_failing_create_location_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='4', rel_type='Has')
def test_create_logical_relationship(self):
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='5')
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4',
rel_type='Depends_on')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='5',
rel_type='Depends_on')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4',
rel_type='Part_of')
self.assertIsInstance(relationship_id, int)
def test_failing_create_logical_relationship(self):
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='5')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='4', rel_type='Has')
self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='5', rel_type='Part_of')
self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='5', rel_type='Has')
def test_create_relation_relationship(self):
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4',
rel_type='Uses')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4',
rel_type='Provides')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='5',
rel_type='Responsible_for')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6',
rel_type='Owns')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6',
rel_type='Provides')
self.assertIsInstance(relationship_id, int)
def test_failing_create_relation_relationship(self):
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Uses')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship,
self.neo4jdb, relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Owns')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Provides')
def test_create_physical_relationship(self):
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4',
rel_type='Connected_to')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='5',
rel_type='Located_in')
self.assertIsInstance(relationship_id, int)
def test_failing_create_physical_relationship(self):
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb,
physical_handle_id='3', other_handle_id='4', rel_type='Located_in')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb,
physical_handle_id='3', other_handle_id='4', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship,
self.neo4jdb, physical_handle_id='3', other_handle_id='5', rel_type='Has')
def test_create_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
relationship_id = core.create_relationship(self.neo4jdb, handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relationship(self.neo4jdb, handle_id='5', other_handle_id='4',
rel_type='Responsible_for')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relationship(self.neo4jdb, handle_id='6', other_handle_id='4',
rel_type='Located_in')
self.assertIsInstance(relationship_id, int)
def test_failing_create_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relationship, self.neo4jdb,
handle_id='3', other_handle_id='4', rel_type='Has')
def test_get_relationships(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2')
self.assertIn(relationship_id, [r.id for r in relationships])
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2', rel_type='Depends_on')
self.assertIn(relationship_id, [r.id for r in relationships])
# No relationship
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='3')
self.assertEqual(relationships, [])
def test_set_node_properties(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
node = core.get_node(self.neo4jdb, handle_id='1')
new_properties.update({'handle_id': '1'})
self.assertEqual(node['test'], new_properties['test'])
# def test_fail_set_node_properties(self):
# new_properties = {'test': set([])}
# self.assertRaises(exceptions.BadProperties, core.set_node_properties, self.neo4jdb,
# handle_id='1', new_properties=new_properties)
def test_set_relationship_properties(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
new_properties = {'test': 'hello world'}
core.set_relationship_properties(self.neo4jdb, relationship_id=relationship_id, new_properties=new_properties)
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship['test'], new_properties['test'])
# def test_fail_set_relationship_properties(self):
# relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
# rel_type='Depends_on')
# new_properties = {'test': set([])}
# self.assertRaises(exceptions.BadProperties, core.set_relationship_properties, self.neo4jdb,
# relationship_id=relationship_id, new_properties=new_properties)
def test_get_node_model(self):
node_model = core.get_node_model(self.neo4jdb, handle_id='1')
self.assertIsInstance(node_model, models.LogicalModel)
def test_get_relationship_model(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
relationship_model = core.get_relationship_model(self.neo4jdb, relationship_id=relationship_id)
self.assertIsInstance(relationship_model, models.BaseRelationshipModel)
def test_get_nodes_by_value_and_property(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value='hello world', prop='test')
self.assertIsNotNone(result)
for node in result:
self.assertEqual(node.get('test'), 'hello world')
return
self.assertTrue(False, 'Nothing found')
def test_get_nodes_by_value_and_property_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=['hello', 'world'], prop='test')
for node in result:
self.assertEqual(node.get('test'), ['hello', 'world'])
return
self.assertTrue(False, 'Nothing found')
def test_get_nodes_by_value_and_property_bool(self):
new_properties = {'test': False}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=False, prop='test')
for node in result:
self.assertEqual(node.get('test'), False)
return
self.assertTrue(False, 'Nothing found')
def test_get_nodes_by_value_and_property_int(self):
new_properties = {'test': 3}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=3, prop='test')
for node in result:
self.assertEqual(node.get('test'), 3)
return
self.assertTrue(False, 'Nothing found')
def test_search_nodes_by_value(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='world')
self.assertIsNotNone(result)
for node in result:
self.assertEqual(node.get('test'), 'hello world')
return
self.assertTrue(False, 'Nothing found')
def test_search_nodes_by_value_and_property(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='world', prop='test')
self.assertIsNotNone(result)
for node in result:
self.assertEqual(node.get('test'), 'hello world')
return
self.assertTrue(False, 'Nothing found')
def test_search_nodes_by_value_in_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='hel')
self.assertIsNotNone(result)
for node in result:
self.assertEqual(node.get('test'), ['hello', 'world'])
return
self.assertTrue(False, 'Nothing found')
def test_search_nodes_by_value_and_property_in_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='hel', prop='test')
for node in result:
self.assertEqual(node.get('test'), ['hello', 'world'])
return
self.assertTrue(False, 'Nothing found')
def test_get_nodes_by_type(self):
result = core.get_nodes_by_type(self.neo4jdb, 'Test_Node')
for node in result:
self.assertIn('Test_Node', node.labels)
return
self.assertTrue(False, 'Nothing found')
def test_get_nodes_by_name(self):
result = core.get_nodes_by_name(self.neo4jdb, 'Test Node 1')
for node in result:
self.assertEqual(node['name'], 'Test Node 1')
return
self.assertTrue(False, 'Nothing found')
def test_get_unique_node_by_name(self):
node_model = core.get_unique_node_by_name(self.neo4jdb, node_name='Test Node 1', node_type='Test_Node')
self.assertIsInstance(node_model, models.LogicalModel)
def test_failing_get_unique_node_by_name(self):
core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
self.assertRaises(exceptions.MultipleNodesReturned, core.get_unique_node_by_name, self.neo4jdb,
node_name='Test Node 1', node_type='Test_Node')
Dont use for loops if not checking all
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
from neo4j.exceptions import ConstraintError
except ImportError:
from neo4j.v1.api import CypherError as ConstraintError # Backwards compatability with version <1.2
from norduniclient.testing import Neo4jTestCase
from norduniclient import core
from norduniclient import exceptions
from norduniclient import models
__author__ = 'lundberg'
class CoreTests(Neo4jTestCase):
def setUp(self):
super(CoreTests, self).setUp()
core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='1')
core.create_node(self.neo4jdb, name='Test Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='2')
def test_create_and_get_node(self):
core.create_node(self.neo4jdb, name='Test Node 3', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
node = core.get_node(self.neo4jdb, handle_id='3')
self.assertEqual(node.get('handle_id'), '3')
def test_create_node_existing_node_handle(self):
self.assertRaises(ConstraintError, core.create_node, self.neo4jdb, name='Test Node 1',
meta_type_label='Logical', type_label='Test_Node', handle_id='1')
def test_create_node_bad_meta_type(self):
self.assertRaises(exceptions.MetaLabelNamingError, core.create_node, self.neo4jdb, name='Test Node 1',
meta_type_label='No_Such_Label', type_label='Test_Node', handle_id='1')
def test_get_node_bundle(self):
node_bundle = core.get_node_bundle(self.neo4jdb, handle_id='1')
self.assertIsInstance(node_bundle, dict)
node_data = node_bundle.get('data')
self.assertEqual(node_data.get('handle_id'), '1')
self.assertEqual(node_bundle.get('meta_type'), 'Logical')
self.assertIsInstance(node_bundle.get('labels'), list)
self.assertIn('Test_Node', node_bundle.get('labels'))
def test_failing_get_node_bundle(self):
self.assertRaises(exceptions.NodeNotFound, core.get_node_bundle, self.neo4jdb, handle_id='3')
def test_delete_node(self):
core.delete_node(self.neo4jdb, handle_id='1')
self.assertRaises(exceptions.NodeNotFound, core.get_node, self.neo4jdb, handle_id='1')
def test_create_and_get_relationship(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
self.assertIsInstance(relationship_id, int)
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship.id, relationship_id)
def test_failing_get_relationship(self):
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb, relationship_id=1)
def test_get_relationship_bundle(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
relationship_bundle = core.get_relationship_bundle(self.neo4jdb, relationship_id=relationship_id)
self.assertIsInstance(relationship_bundle, dict)
relationship = relationship_bundle.get('data')
self.assertIsNotNone(relationship)
self.assertEqual(relationship_bundle.get('id'), relationship_id)
self.assertEqual(relationship_bundle.get('start')['handle_id'], '1')
self.assertEqual(relationship_bundle.get('end')['handle_id'], '2')
self.assertEqual(relationship_bundle.get('type'), 'Tests')
def test_failing_get_relationship_bundle(self):
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship_bundle, self.neo4jdb,
relationship_id=1)
def test_delete_relationship(self):
relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests')
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship.id, relationship_id)
core.delete_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb,
relationship_id=relationship_id)
def test_create_location_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location',
type_label='Test_Node', handle_id='4')
relationship_id = core.create_location_relationship(self.neo4jdb, location_handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
def test_failing_create_location_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='4', rel_type='Has')
def test_create_logical_relationship(self):
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='5')
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4',
rel_type='Depends_on')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='5',
rel_type='Depends_on')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4',
rel_type='Part_of')
self.assertIsInstance(relationship_id, int)
def test_failing_create_logical_relationship(self):
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='5')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='4', rel_type='Has')
self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='5', rel_type='Part_of')
self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb,
location_handle_id='3', other_handle_id='5', rel_type='Has')
def test_create_relation_relationship(self):
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4',
rel_type='Uses')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4',
rel_type='Provides')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='5',
rel_type='Responsible_for')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6',
rel_type='Owns')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6',
rel_type='Provides')
self.assertIsInstance(relationship_id, int)
def test_failing_create_relation_relationship(self):
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Uses')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship,
self.neo4jdb, relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Owns')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb,
relation_handle_id='3', other_handle_id='5', rel_type='Provides')
def test_create_physical_relationship(self):
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4',
rel_type='Connected_to')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='5',
rel_type='Located_in')
self.assertIsInstance(relationship_id, int)
def test_failing_create_physical_relationship(self):
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='5')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb,
physical_handle_id='3', other_handle_id='4', rel_type='Located_in')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb,
physical_handle_id='3', other_handle_id='4', rel_type='Responsible_for')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship,
self.neo4jdb, physical_handle_id='3', other_handle_id='5', rel_type='Has')
def test_create_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location',
type_label='Test_Node', handle_id='4')
core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation',
type_label='Test_Node', handle_id='5')
core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical',
type_label='Test_Node', handle_id='6')
relationship_id = core.create_relationship(self.neo4jdb, handle_id='3', other_handle_id='4',
rel_type='Has')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relationship(self.neo4jdb, handle_id='5', other_handle_id='4',
rel_type='Responsible_for')
self.assertIsInstance(relationship_id, int)
relationship_id = core.create_relationship(self.neo4jdb, handle_id='6', other_handle_id='4',
rel_type='Located_in')
self.assertIsInstance(relationship_id, int)
def test_failing_create_relationship(self):
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Logical',
type_label='Test_Node', handle_id='4')
self.assertRaises(exceptions.NoRelationshipPossible, core.create_relationship, self.neo4jdb,
handle_id='3', other_handle_id='4', rel_type='Has')
def test_get_relationships(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2')
self.assertIn(relationship_id, [r.id for r in relationships])
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2', rel_type='Depends_on')
self.assertIn(relationship_id, [r.id for r in relationships])
# No relationship
core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location',
type_label='Test_Node', handle_id='3')
relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='3')
self.assertEqual(relationships, [])
def test_set_node_properties(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
node = core.get_node(self.neo4jdb, handle_id='1')
new_properties.update({'handle_id': '1'})
self.assertEqual(node['test'], new_properties['test'])
# def test_fail_set_node_properties(self):
# new_properties = {'test': set([])}
# self.assertRaises(exceptions.BadProperties, core.set_node_properties, self.neo4jdb,
# handle_id='1', new_properties=new_properties)
def test_set_relationship_properties(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
new_properties = {'test': 'hello world'}
core.set_relationship_properties(self.neo4jdb, relationship_id=relationship_id, new_properties=new_properties)
relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id)
self.assertEqual(relationship['test'], new_properties['test'])
# def test_fail_set_relationship_properties(self):
# relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
# rel_type='Depends_on')
# new_properties = {'test': set([])}
# self.assertRaises(exceptions.BadProperties, core.set_relationship_properties, self.neo4jdb,
# relationship_id=relationship_id, new_properties=new_properties)
def test_get_node_model(self):
node_model = core.get_node_model(self.neo4jdb, handle_id='1')
self.assertIsInstance(node_model, models.LogicalModel)
def test_get_relationship_model(self):
relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2',
rel_type='Depends_on')
relationship_model = core.get_relationship_model(self.neo4jdb, relationship_id=relationship_id)
self.assertIsInstance(relationship_model, models.BaseRelationshipModel)
def test_get_nodes_by_value_and_property(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value='hello world', prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), 'hello world')
def test_get_nodes_by_value_and_property_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=['hello', 'world'], prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), ['hello', 'world'])
def test_get_nodes_by_value_and_property_bool(self):
new_properties = {'test': False}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=False, prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), False)
def test_get_nodes_by_value_and_property_int(self):
new_properties = {'test': 3}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.get_nodes_by_value(self.neo4jdb, value=3, prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), 3)
def test_search_nodes_by_value(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='world')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), 'hello world')
def test_search_nodes_by_value_and_property(self):
new_properties = {'test': 'hello world'}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='world', prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), 'hello world')
def test_search_nodes_by_value_in_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='hel')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), ['hello', 'world'])
def test_search_nodes_by_value_and_property_in_list(self):
new_properties = {'test': ['hello', 'world']}
core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties)
result = core.search_nodes_by_value(self.neo4jdb, value='hel', prop='test')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node.get('test'), ['hello', 'world'])
def test_get_nodes_by_type(self):
result = core.get_nodes_by_type(self.neo4jdb, 'Test_Node')
for node in result:
self.assertIn('Test_Node', node.labels)
def test_get_nodes_by_name(self):
result = core.get_nodes_by_name(self.neo4jdb, 'Test Node 1')
all_results = [r for r in result]
self.assertEqual(len(all_results), 1)
node = all_results[0]
self.assertEqual(node['name'], 'Test Node 1')
def test_get_unique_node_by_name(self):
node_model = core.get_unique_node_by_name(self.neo4jdb, node_name='Test Node 1', node_type='Test_Node')
self.assertIsInstance(node_model, models.LogicalModel)
def test_failing_get_unique_node_by_name(self):
core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical',
type_label='Test_Node', handle_id='3')
self.assertRaises(exceptions.MultipleNodesReturned, core.get_unique_node_by_name, self.neo4jdb,
node_name='Test Node 1', node_type='Test_Node')
|
# -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import time
import netsvc
from osv import fields, osv
import decimal_precision as dp
import pooler
from tools import config
from tools.translate import _
import re, string
from unicodedata import normalize
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
from datetime import datetime
##############################################################################
# Fatura (Nota Fiscal) Personalizado
##############################################################################
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def _amount_all(self, cr, uid, ids, name, args, context=None):
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_tax_discount': 0.0,
'amount_total': 0.0,
'icms_base': 0.0,
'icms_value': 0.0,
'icms_st_base': 0.0,
'icms_st_value': 0.0,
'ipi_base': 0.0,
'ipi_value': 0.0,
'pis_base': 0.0,
'pis_value': 0.0,
'cofins_base': 0.0,
'cofins_value': 0.0,
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_total
res[invoice.id]['amount_tax_discount'] += line.price_total - line.price_subtotal
res[invoice.id]['icms_base'] += line.icms_base
res[invoice.id]['icms_value'] += line.icms_value
res[invoice.id]['icms_st_base'] += line.icms_st_base
res[invoice.id]['icms_st_value'] += line.icms_st_value
res[invoice.id]['ipi_base'] += line.ipi_base
res[invoice.id]['ipi_value'] += line.ipi_value
res[invoice.id]['pis_base'] += line.pis_base
res[invoice.id]['pis_value'] += line.pis_value
res[invoice.id]['cofins_base'] += line.cofins_base
res[invoice.id]['cofins_value'] += line.cofins_value
for invoice_tax in invoice.tax_line:
res[invoice.id]['amount_tax'] += invoice_tax.amount
if res[invoice.id]['amount_tax_discount'] > 0 and res[invoice.id]['amount_tax'] > 0:
res[invoice.id]['amount_tax'] = res[invoice.id]['ipi_value'] #FIXME round(res[invoice.id]['amount_tax'] - res[invoice.id]['amount_tax_discount'], prec)
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed'] + res[invoice.id]['icms_st_value']
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
_columns = {
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('sefaz_export','Enviar para Receita'),
('sefaz_exception','Erro de autorização da Receita'),
('paid','Paid'),
('cancel','Cancelled')
],'State', select=True, readonly=True,
help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma state,invoice does not have an invoice number. \
\n* The \'Open\' state is used when user create invoice,a invoice number is generated.Its in open state till user does not pay invoice. \
\n* The \'Paid\' state is set automatically when invoice is paid.\
\n* The \'sefaz_out\' Gerado aquivo de exportação para sistema daReceita.\
\n* The \'sefaz_aut\' Recebido arquivo de autolização da Receita.\
\n* The \'Cancelled\' state is used when user cancel invoice.'),
'nfe_access_key': fields.char('Chave de Acesso NFE', size=44, readonly=True, states={'draft':[('readonly',False)]}),
'nfe_status': fields.char('Status na Sefaz', size=44, readonly=True),
'nfe_date': fields.datetime('Data do Status NFE', readonly=True, states={'draft':[('readonly',False)]}),
'nfe_export_date': fields.datetime('Exportação NFE', readonly=True),
'fiscal_document_id': fields.many2one('l10n_br_account.fiscal.document', 'Documento', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_document_nfe': fields.related('fiscal_document_id', 'nfe', type='boolean', readonly=True, size=64, relation='l10n_br_account.fiscal.document', store=True, string='NFE'),
'document_serie_id': fields.many2one('l10n_br_account.document.serie', 'Serie', domain="[('fiscal_document_id','=',fiscal_document_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id','=',fiscal_operation_category_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP', readonly=True, states={'draft':[('readonly',False)]}),
'vendor_number': fields.char('NF Entrada', size=12, readonly=True, states={'draft':[('readonly',False)]}, help="Número da Nota Fiscal do Fornecedor"),
'vendor_serie': fields.char('Série NF Entrada', size=12, readonly=True, states={'draft':[('readonly',False)]}, help="Série do número da Nota Fiscal do Fornecedor"),
'amount_untaxed': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Untaxed',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
#'account.invoice.tax': (_get_invoice_tax, None, 20),
#'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_st_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
#'account.invoice.tax': (_get_invoice_tax, None, 20),
#'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_st_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'ipi_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'ipi_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'pis_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'pis_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'cofins_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'cofins_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
}
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'internal_number': False,
'nfe_access_key': False,
'nfe_status': False,
'nfe_date': False,
'nfe_export_date': False,
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def action_internal_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
for obj_inv in self.browse(cr, uid, ids):
obj_sequence = self.pool.get('ir.sequence')
seq_no = obj_sequence.get_id(cr, uid, obj_inv.journal_id.internal_sequence.id, context=context)
self.write(cr, uid, obj_inv.id, {'internal_number': seq_no})
return True
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids):
id = obj_inv.id
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
#self.write(cr, uid, ids, {'internal_number':number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
for inv_id, name in self.name_get(cr, uid, [id]):
ctx = context.copy()
if obj_inv.type in ('out_invoice', 'out_refund'):
ctx = self.get_log_context(cr, uid, context=ctx)
message = _('Invoice ') + " '" + name + "' "+ _("is validated.")
self.log(cr, uid, inv_id, message, context=ctx)
return True
def nfe_dv(self, key):
return '2'
def nfe_check(self, cr, uid, ids, context=None):
strErro = ''
if context is None:
context = {}
for inv in self.browse(cr, uid, ids):
#Nota fiscal
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
if not inv.document_serie_id:
strErro = 'Nota Fiscal - Série da nota fiscal\n'
if not inv.fiscal_document_id:
strErro = 'Nota Fiscal - Tipo de documento fiscal\n'
#if not inv.date_invoice:
# strErro = 'Nota Fiscal - Data da nota fiscal\n'
if not inv.journal_id.internal_sequence:
strErro = 'Nota Fiscal - Número da nota fiscal, o diário deve ter uma sequência interna\n'
if not inv.cfop_id:
strErro = 'Nota Fiscal - CFOP\n'
else:
if not inv.cfop_id.small_name:
strErro = 'Nota Fiscal - Descrição reduzida do CFOP\n'
#Emitente
if not inv.company_id.partner_id.legal_name:
strErro = 'Emitente - Razão Social\n'
if not inv.company_id.partner_id.name:
strErro = 'Emitente - Fantasia\n'
if not inv.company_id.partner_id.cnpj_cpf:
strErro = 'Emitente - CNPJ/CPF\n'
if not company_addr_default.street:
strErro = 'Emitente / Endereço - Logradouro\n'
if not company_addr_default.number:
strErro = 'Emitente / Endereço - Número\n'
if not company_addr_default.zip:
strErro = 'Emitente / Endereço - CEP\n'
if not inv.company_id.cnae_main:
strErro = 'Emitente / CNAE Principal\n'
if not inv.company_id.partner_id.inscr_est:
strErro = 'Emitente / Inscrição Estadual\n'
if not company_addr_default.state_id:
strErro = 'Emitente / Endereço - Estado\n'
else:
if not company_addr_default.state_id.ibge_code:
strErro = 'Emitente / Endereço - Código do IBGE do estado\n'
if not company_addr_default.state_id.name:
strErro = 'Emitente / Endereço - Nome do estado\n'
if not company_addr_default.city_id:
strErro = 'Emitente / Endereço - municipio\n'
else:
if not company_addr_default.city_id.name:
strErro = 'Emitente / Endereço - Nome do municipio\n'
if not company_addr_default.city_id.ibge_code:
strErro = 'Emitente / Endereço - Código do IBGE do municipio\n'
if not company_addr_default.country_id:
strErro = 'Emitente / Endereço - país\n'
else:
if not company_addr_default.country_id.name:
strErro = 'Emitente / Endereço - Nome do país\n'
if not company_addr_default.country_id.bc_code:
strErro = 'Emitente / Endereço - Código do BC do país\n'
if not company_addr_default.country_id:
strErro = 'Emitente / Regime Tributário\n'
#Destinatário
if not inv.partner_id.legal_name:
strErro = 'Destinatário - Razão Social\n'
if not inv.partner_id.cnpj_cpf:
strErro = 'Destinatário - CNPJ/CPF\n'
if not inv.address_invoice_id.street:
strErro = 'Destinatário / Endereço - Logradouro\n'
if not inv.address_invoice_id.number:
strErro = 'Destinatário / Endereço - Número\n'
if not inv.address_invoice_id.zip:
strErro = 'Destinatário / Endereço - CEP\n'
if not inv.address_invoice_id.state_id:
strErro = 'Destinatário / Endereço - Estado\n'
else:
if not inv.address_invoice_id.state_id.ibge_code:
strErro = 'Destinatário / Endereço - Código do IBGE do estado\n'
if not inv.address_invoice_id.state_id.name:
strErro = 'Destinatário / Endereço - Nome do estado\n'
if not inv.address_invoice_id.city_id:
strErro = 'Destinatário / Endereço - Municipio\n'
else:
if not inv.address_invoice_id.city_id.name:
strErro = 'Destinatário / Endereço - Nome do municipio\n'
if not inv.address_invoice_id.city_id.ibge_code:
strErro = 'Destinatário / Endereço - Código do IBGE do municipio\n'
if not inv.address_invoice_id.country_id:
strErro = 'Destinatário / Endereço - País\n'
else:
if not inv.address_invoice_id.country_id.name:
strErro = 'Destinatário / Endereço - Nome do país\n'
if not inv.address_invoice_id.country_id.bc_code:
strErro = 'Destinatário / Endereço - Código do BC do país\n'
#endereco de entrega
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
if not inv.partner_shipping_id.street:
strErro = 'Destinatário / Endereço de Entrega - Logradouro\n'
if not inv.partner_shipping_id.number:
strErro = 'Destinatário / Endereço de Entrega - Número\n'
if not inv.address_invoice_id.zip:
strErro = 'Destinatário / Endereço de Entrega - CEP\n'
if not inv.partner_shipping_id.state_id:
strErro = 'Destinatário / Endereço de Entrega - Estado\n'
else:
if not inv.partner_shipping_id.state_id.ibge_code:
strErro = 'Destinatário / Endereço de Entrega - Código do IBGE do estado\n'
if not inv.partner_shipping_id.state_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do estado\n'
if not inv.partner_shipping_id.city_id:
strErro = 'Destinatário / Endereço - Municipio\n'
else:
if not inv.partner_shipping_id.city_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do municipio\n'
if not inv.partner_shipping_id.city_id.ibge_code:
strErro = 'Destinatário / Endereço de Entrega - Código do IBGE do municipio\n'
if not inv.partner_shipping_id.country_id:
strErro = 'Destinatário / Endereço de Entrega - País\n'
else:
if not inv.partner_shipping_id.country_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do país\n'
if not inv.partner_shipping_id.country_id.bc_code:
strErro = 'Destinatário / Endereço de Entrega - Código do BC do país\n'
#produtos
for inv_line in inv.invoice_line:
if inv_line.product_id:
if not inv_line.product_id.code:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Código do produto\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.product_id.name:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Nome do produto\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.cfop_id:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CFOP\n' % (inv_line.product_id.name,inv_line.quantity)
else:
if not inv_line.cfop_id.code:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Código do CFOP\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.uos_id:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Unidade de medida\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.quantity:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Quantidade\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.price_unit:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Preço unitário\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.icms_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do ICMS\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.ipi_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do IPI\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.pis_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do PIS\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.cofins_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do COFINS\n' % (inv_line.product_id.name,inv_line.quantity)
#Transportadora
if inv.carrier_id:
if not inv.carrier_id.partner_id.legal_name:
strErro = 'Transportadora - Razão Social\n'
if not inv.carrier_id.partner_id.cnpj_cpf:
strErro = 'Transportadora - CNPJ/CPF\n'
#Dados do Veiculo
if inv.vehicle_id:
if not inv.vehicle_id.plate:
strErro = 'Transportadora / Veículo - Placa\n'
if not inv.vehicle_id.plate.state_id.code:
strErro = 'Transportadora / Veículo - UF da Placa\n'
if not inv.vehicle_id.rntc_code:
strErro = 'Transportadora / Veículo - RNTC\n'
if inv.number_of_packages:
if not inv.weight_net:
strErro = 'Totais - Peso Liquido\n'
if not inv.weight:
strErro = 'Totais - Peso Bruto\n'
if strErro:
raise osv.except_osv(_('Error !'),_("Validação da Nota fiscal:\n '%s'") % (strErro,))
return True
def nfe_export_txt(self, cr, uid, ids, context=False):
StrFile = ''
StrNF = 'NOTA FISCAL|%s|\n' % len(ids)
StrFile = StrNF
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
#nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
#nfe_key += re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '')
#nfe_key += inv.fiscal_document_id.code
#nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
#nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
#fe_key += unicode('1').strip().rjust(1, u'0') # Homologação
#nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
#nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
StrA = 'A|%s|%s|\n' % ('2.00', '')
StrFile += StrA
StrRegB = {
'cUF': company_addr_default.state_id.ibge_code,
'cNF': '',
'NatOp': normalize('NFKD',unicode(inv.cfop_id.small_name or '')).encode('ASCII','ignore'),
'intPag': '2',
'mod': inv.fiscal_document_id.code,
'serie': inv.document_serie_id.code,
'nNF': inv.internal_number or '',
'dEmi': inv.date_invoice or '',
'dSaiEnt': inv.date_invoice or '',
'hSaiEnt': '',
'tpNF': '',
'cMunFG': ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code),
'TpImp': '1',
'TpEmis': '1',
'cDV': '',
'tpAmb': '2',
'finNFe': '1',
'procEmi': '0',
'VerProc': '2.0.4',
'dhCont': '',
'xJust': '',
}
if inv.cfop_id.type in ("input"):
StrRegB['tpNF'] = '0'
else:
StrRegB['tpNF'] = '1'
StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['intPag'],
StrRegB['mod'], StrRegB['serie'], StrRegB['nNF'], StrRegB['dEmi'], StrRegB['dSaiEnt'],
StrRegB['hSaiEnt'], StrRegB['tpNF'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'],
StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['procEmi'], StrRegB['VerProc'],
StrRegB['dhCont'], StrRegB['xJust'])
StrFile += StrB
StrRegC = {
'XNome': normalize('NFKD',unicode(inv.company_id.partner_id.legal_name or '')).encode('ASCII','ignore'),
'XFant': normalize('NFKD',unicode(inv.company_id.partner_id.name or '')).encode('ASCII','ignore'),
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_est or ''),
'IEST': '',
'IM': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_mun or ''),
'CNAE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.cnae_main or ''),
'CRT': inv.company_id.fiscal_type or '',
}
StrC = 'C|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC['XNome'], StrRegC['XFant'], StrRegC['IE'], StrRegC['IEST'],
StrRegC['IM'],StrRegC['CNAE'],StrRegC['CRT'])
StrFile += StrC
if inv.company_id.partner_id.tipo_pessoa == 'J':
StrC02 = 'C02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
else:
StrC02 = 'C02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
StrFile += StrC02
StrRegC05 = {
'XLgr': normalize('NFKD',unicode(company_addr_default.street or '')).encode('ASCII','ignore'),
'Nro': company_addr_default.number or '',
'Cpl': normalize('NFKD',unicode(company_addr_default.street2 or '')).encode('ASCII','ignore'),
'Bairro': normalize('NFKD',unicode(company_addr_default.district or 'Sem Bairro')).encode('ASCII','ignore'),
'CMun': '%s%s' % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code),
'XMun': normalize('NFKD',unicode(company_addr_default.city_id.name or '')).encode('ASCII','ignore'),
'UF': company_addr_default.state_id.code or '',
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.zip or '').replace(' ','')),
'cPais': company_addr_default.country_id.bc_code or '',
'xPais': normalize('NFKD',unicode(company_addr_default.country_id.name or '')).encode('ASCII','ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.phone or '').replace(' ','')),
}
StrC05 = 'C05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC05['XLgr'], StrRegC05['Nro'], StrRegC05['Cpl'], StrRegC05['Bairro'],
StrRegC05['CMun'], StrRegC05['XMun'], StrRegC05['UF'], StrRegC05['CEP'],
StrRegC05['cPais'], StrRegC05['xPais'], StrRegC05['fone'])
StrFile += StrC05
StrRegE = {
'xNome': normalize('NFKD',unicode(inv.partner_id.legal_name or '')).encode('ASCII','ignore'),
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.inscr_est or ''),
'ISUF': '',
'email': inv.partner_id.email or '',
}
StrE = 'E|%s|%s|%s|%s|\n' % (StrRegE['xNome'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['email'])
StrFile += StrE
if inv.partner_id.tipo_pessoa == 'J':
StrE0 = 'E02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrE0 = 'E03|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrE0
StrRegE05 = {
'xLgr': normalize('NFKD',unicode(inv.address_invoice_id.street or '')).encode('ASCII','ignore'),
'nro': normalize('NFKD',unicode(inv.address_invoice_id.number or '')).encode('ASCII','ignore'),
'xCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.address_invoice_id.street2 or '' )).encode('ASCII','ignore')),
'xBairro': normalize('NFKD',unicode(inv.address_invoice_id.district or 'Sem Bairro')).encode('ASCII','ignore'),
'cMun': ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.city_id.ibge_code),
'xMun': normalize('NFKD',unicode(inv.address_invoice_id.city_id.name or '')).encode('ASCII','ignore'),
'UF': inv.address_invoice_id.state_id.code,
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.zip or '').replace(' ','')),
'cPais': inv.address_invoice_id.country_id.bc_code,
'xPais': normalize('NFKD',unicode(inv.address_invoice_id.country_id.name or '')).encode('ASCII','ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.phone or '').replace(' ','')),
}
StrE05 = 'E05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegE05['xLgr'], StrRegE05['nro'], StrRegE05['xCpl'], StrRegE05['xBairro'],
StrRegE05['cMun'], StrRegE05['xMun'], StrRegE05['UF'], StrRegE05['CEP'],
StrRegE05['cPais'],StrRegE05['xPais'], StrRegE05['fone'],)
StrFile += StrE05
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
StrRegG = {
'XLgr': normalize('NFKD',unicode(inv.partner_shipping_id.street or '',)).encode('ASCII','ignore'),
'Nro': normalize('NFKD',unicode(inv.partner_shipping_id.number or '')).encode('ASCII','ignore'),
'XCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.partner_shipping_id.street2 or '' )).encode('ASCII','ignore')),
'XBairro': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.partner_shipping_id.district or 'Sem Bairro' )).encode('ASCII','ignore')),
'CMun': ('%s%s') % (inv.partner_shipping_id.state_id.ibge_code, inv.partner_shipping_id.city_id.ibge_code),
'XMun': normalize('NFKD',unicode(inv.partner_shipping_id.city_id.name or '')).encode('ASCII','ignore'),
'UF': inv.address_invoice_id.state_id.code,
}
StrG = 'G|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegG['XLgr'],StrRegG['Nro'],StrRegG['XCpl'],StrRegG['XBairro'],StrRegG['CMun'],StrRegG['XMun'],StrRegG['UF'])
StrFile += StrG
if inv.partner_id.tipo_pessoa == 'J':
StrG0 = 'G02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrG0 = 'G02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrG0
i = 0
for inv_line in inv.invoice_line:
i += 1
StrH = 'H|%s||\n' % (i)
StrFile += StrH
StrRegI = {
'CProd': normalize('NFKD',unicode(inv_line.product_id.code or '',)).encode('ASCII','ignore'),
'CEAN': inv_line.product_id.ean13 or '',
'XProd': normalize('NFKD',unicode(inv_line.product_id.name or '')).encode('ASCII','ignore'),
'NCM': re.sub('[%s]' % re.escape(string.punctuation), '', inv_line.product_id.property_fiscal_classification.name or ''),
'EXTIPI': '',
'CFOP': inv_line.cfop_id.code,
'UCom': normalize('NFKD',unicode(inv_line.uos_id.name or '',)).encode('ASCII','ignore'),
'QCom': str("%.4f" % inv_line.quantity),
'VUnCom': str("%.2f" % (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0))),
'VProd': str("%.2f" % inv_line.price_total),
'CEANTrib': '',
'UTrib': inv_line.uos_id.name,
'QTrib': str("%.4f" % inv_line.quantity),
'VUnTrib': str("%.2f" % inv_line.price_unit),
'VFrete': '',
'VSeg': '',
'VDesc': '',
'vOutro': '',
'indTot': '1',
'xPed': '',
'nItemPed': '',
}
if inv_line.product_id.code:
StrRegI['CProd'] = inv_line.product_id.code
else:
StrRegI['CProd'] = unicode(i).strip().rjust(4, u'0')
#No OpenERP já traz o valor unitário como desconto
#if inv_line.discount > 0:
# StrRegI['VDesc'] = str("%.2f" % (inv_line.quantity * (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0))))
StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'],
StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'], StrRegI['QCom'],
StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'], StrRegI['UTrib'],
StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'], StrRegI['VSeg'],
StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'], StrRegI['xPed'],
StrRegI['nItemPed'])
StrFile += StrI
StrM = 'M|\n'
StrFile += StrM
StrN = 'N|\n'
#TODO - Fazer alteração para cada tipo de cst
StrFile += StrN
StrRegN02 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN02 = 'N02|%s|%s|%s|%s|%s|%s|\n' % (StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['VBC'], StrRegN02['PICMS'],
StrRegN02['VICMS'])
StrRegN03 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', #TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN03 = 'N03|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN03['Orig'], StrRegN03['CST'], StrRegN03['ModBC'], StrRegN03['VBC'], StrRegN03['PICMS'],
StrRegN03['VICMS'], StrRegN03['ModBCST'], StrRegN03['PMVAST'], StrRegN03['PRedBCST'], StrRegN03['VBCST'],
StrRegN03['PICMSST'], StrRegN03['VICMSST'])
StrRegN04 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN04 = 'N04|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['PRedBC'], StrRegN02['VBC'], StrRegN02['PICMS'],
StrRegN02['VICMS'])
StrRegN06 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'vICMS': str("%.2f" % inv_line.icms_value),
'motDesICMS': '9', #FIXME
}
StrN06 = 'N06|%s|%s|%s|%s|\n' % (StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'], StrRegN06['motDesICMS'])
StrRegN09 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', #TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN09 = 'N09|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN09['Orig'], StrRegN09['CST'], StrRegN09['ModBC'], StrRegN09['PRedBC'], StrRegN09['VBC'], StrRegN09['PICMS'], StrRegN09['VICMS'], StrRegN09['ModBCST'], StrRegN09['PMVAST'], StrRegN09['PRedBCST'], StrRegN09['VBCST'], StrRegN09['PICMSST'], StrRegN09['VICMSST'])
StrRegN08 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'VBCST': str("%.2f" % 0.00),
'VICMSST': str("%.2f" % 0.00),
}
StrN08 = 'N08|%s|%s|%s|%s|\n' % (StrRegN08['Orig'], StrRegN08['CST'], StrRegN08['VBCST'], StrRegN08['VICMSST'])
#TODO - Fazer alteração para cada tipo de cst
if inv_line.icms_cst in ('00'):
StrFile += StrN02
if inv_line.icms_cst in ('20'):
StrFile += StrN04
if inv_line.icms_cst in ('10'):
StrFile += StrN03
if inv_line.icms_cst in ('40', '41', '50', '51'):
StrFile += StrN06
if inv_line.icms_cst in ('60'):
StrFile += StrN08
if inv_line.icms_cst in ('70'):
StrFile += StrN09
StrRegO = {
'ClEnq': '',
'CNPJProd': '',
'CSelo': '',
'QSelo': '',
'CEnq': '999',
}
StrO = 'O|%s|%s|%s|%s|%s|\n' % (StrRegO['ClEnq'], StrRegO['CNPJProd'], StrRegO['CSelo'], StrRegO['QSelo'], StrRegO['CEnq'])
StrFile += StrO
if inv_line.ipi_percent > 0:
StrRegO07 = {
'CST': inv_line.ipi_cst,
'VIPI': str("%.2f" % inv_line.ipi_value),
}
StrO07 = 'O07|%s|%s|\n' % (StrRegO07['CST'], StrRegO07['VIPI'])
StrFile += StrO07
if inv_line.ipi_type == 'percent':
StrRegO10 = {
'VBC': str("%.2f" % inv_line.ipi_base),
'PIPI': str("%.2f" % inv_line.ipi_percent),
}
StrO1 = 'O10|%s|%s|\n' % (StrRegO10['VBC'], StrRegO10['PIPI'])
if inv_line.ipi_type == 'quantity':
pesol = 0
if inv_line.product_id:
pesol = inv_line.product_id.weight_net
StrRegO11 = {
'QUnid': str("%.4f" % (inv_line.quantity * pesol)),
'VUnid': str("%.4f" % inv_line.ipi_percent),
}
StrO1 = 'O11|%s|%s|\n' % (StrRegO11['QUnid'], StrRegO11['VUnid'])
StrFile += StrO1
else:
StrO1 = 'O08|%s|\n' % inv_line.ipi_cst
StrFile += StrO1
StrQ = 'Q|\n'
StrFile += StrQ
if inv_line.pis_percent > 0:
StrRegQ02 = {
'CST': inv_line.pis_cst,
'VBC': str("%.2f" % inv_line.pis_base),
'PPIS': str("%.2f" % inv_line.pis_percent),
'VPIS': str("%.2f" % inv_line.pis_value),
}
StrQ02 = ('Q02|%s|%s|%s|%s|\n') % (StrRegQ02['CST'], StrRegQ02['VBC'], StrRegQ02['PPIS'], StrRegQ02['VPIS'])
else:
StrQ02 = 'Q04|%s|\n' % inv_line.pis_cst
StrFile += StrQ02
StrQ = 'S|\n'
StrFile += StrQ
if inv_line.cofins_percent > 0:
StrRegS02 = {
'CST': inv_line.cofins_cst,
'VBC': str("%.2f" % inv_line.cofins_base),
'PCOFINS': str("%.2f" % inv_line.cofins_percent),
'VCOFINS': str("%.2f" % inv_line.cofins_value),
}
StrS02 = ('S02|%s|%s|%s|%s|\n') % (StrRegS02['CST'], StrRegS02['VBC'], StrRegS02['PCOFINS'], StrRegS02['VCOFINS'])
else:
StrS02 = 'S04|%s|\n' % inv_line.cofins_cst
StrFile += StrS02
StrW = 'W|\n'
StrFile += StrW
StrRegW02 = {
'vBC': str("%.2f" % inv.icms_base),
'vICMS': str("%.2f" % inv.icms_value),
'vBCST': str("%.2f" % inv.icms_st_base),
'vST': str("%.2f" % inv.icms_st_value),
'vProd': str("%.2f" % inv.amount_untaxed),
'vFrete': str("%.2f" % inv.amount_freight),
'vSeg': str("%.2f" % inv.amount_insurance),
'vDesc': '0.00',
'vII': '0.00',
'vIPI': str("%.2f" % inv.ipi_value),
'vPIS': str("%.2f" % inv.pis_value),
'vCOFINS': str("%.2f" % inv.cofins_value),
'vOutro': str("%.2f" % inv.amount_costs),
'vNF': str("%.2f" % inv.amount_total),
}
StrW02 = 'W02|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegW02['vBC'], StrRegW02['vICMS'], StrRegW02['vBCST'], StrRegW02['vST'], StrRegW02['vProd'],
StrRegW02['vFrete'], StrRegW02['vSeg'], StrRegW02['vDesc'], StrRegW02['vII'], StrRegW02['vIPI'],
StrRegW02['vPIS'], StrRegW02['vCOFINS'], StrRegW02['vOutro'], StrRegW02['vNF'])
StrFile += StrW02
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
StrRegX0 = '0'
if inv.incoterm.code == 'FOB':
StrRegX0 = '0'
if inv.incoterm.code == 'CIF':
StrRegX0 = '1'
StrX = 'X|%s|\n' % (StrRegX0)
StrFile += StrX
StrRegX03 = {
'XNome': '',
'IE': '',
'XEnder': '',
'UF': '',
'XMun': '',
}
StrX0 = ''
if inv.carrier_id:
#Endereço da transportadora
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
if inv.carrier_id.partner_id.legal_name:
StrRegX03['XNome'] = normalize('NFKD',unicode(inv.carrier_id.partner_id.legal_name or '')).encode('ASCII','ignore')
else:
StrRegX03['XNome'] = normalize('NFKD',unicode(inv.carrier_id.partner_id.name or '')).encode('ASCII','ignore')
StrRegX03['IE'] = inv.carrier_id.partner_id.inscr_est or ''
StrRegX03['xEnder'] = normalize('NFKD',unicode(carrier_addr_default.street or '')).encode('ASCII','ignore')
StrRegX03['UF'] = carrier_addr_default.state_id.code or ''
if carrier_addr_default.city_id:
StrRegX03['xMun'] = normalize('NFKD',unicode(carrier_addr_default.city_id.name or '')).encode('ASCII','ignore')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
StrX0 = 'X04|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
else:
StrX0 = 'X05|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
StrX03 = 'X03|%s|%s|%s|%s|%s|\n' % (StrRegX03['XNome'], StrRegX03['IE'], StrRegX03['XEnder'], StrRegX03['UF'], StrRegX03['XMun'])
StrFile += StrX03
StrFile += StrX0
StrRegX18 = {
'Placa': '',
'UF': '',
'RNTC': '',
}
if inv.vehicle_id:
StrRegX18['Placa'] = inv.vehicle_id.plate or ''
StrRegX18['UF'] = inv.vehicle_id.plate.state_id.code or ''
StrRegX18['RNTC'] = inv.vehicle_id.rntc_code or ''
StrX18 = 'X18|%s|%s|%s|\n' % (StrRegX18['Placa'], StrRegX18['UF'], StrRegX18['RNTC'])
StrFile += StrX18
StrRegX26 = {
'QVol': '',
'Esp': '',
'Marca': '',
'NVol': '',
'PesoL': '',
'PesoB': '',
}
if inv.number_of_packages:
StrRegX26['QVol'] = inv.number_of_packages
StrRegX26['Esp'] = 'Volume' #TODO
StrRegX26['Marca']
StrRegX26['NVol']
StrRegX26['PesoL'] = str("%.3f" % inv.weight_net)
StrRegX26['PesoB'] = str("%.3f" % inv.weight)
StrX26 = 'X26|%s|%s|%s|%s|%s|%s|\n' % (StrRegX26['QVol'], StrRegX26['Esp'], StrRegX26['Marca'], StrRegX26['NVol'], StrRegX26['PesoL'], StrRegX26['PesoB'])
StrFile += StrX26
StrRegZ = {
'InfAdFisco': '',
'InfCpl': normalize('NFKD',unicode(inv.comment or '')).encode('ASCII','ignore'),
}
StrZ = 'Z|%s|%s|\n' % (StrRegZ['InfAdFisco'], StrRegZ['InfCpl'])
StrFile += StrZ
self.write(cr, uid, [inv.id], {'nfe_export_date': datetime.now()})
return unicode(StrFile.encode('utf-8'))
def nfe_export_xml(self, cr, uid, ids, context=False):
nfeProc = Element('nfeProc', {'versao': '2.00', 'xmlns': 'http://www.portalfiscal.inf.br/nfe' })
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#MontaChave da Nota Fiscal Eletronica
nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
nfe_key += '08478495000170' # unicode(inv.company_id.partner_id.cnpj_cpf).strip().rjust(14, u'0')
nfe_key += inv.fiscal_document_id.code
nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
nfe_key += unicode('1').strip().rjust(1, u'0') # Homologação
nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
NFe = SubElement(nfeProc, 'NFe', { 'xmlns': 'http://www.portalfiscal.inf.br/nfe' })
infNFe = SubElement(NFe, 'infNFe', {'versao': '2.00', 'Id': nfe_key })
#Dados da identificação da nota fiscal
ide = SubElement(infNFe, 'ide')
ide_cUF = SubElement(ide, 'cUF')
ide_cUF.text = company_addr_default.state_id.ibge_code
ide_cNF = SubElement(ide, 'cNF')
ide_cNF.text = unicode(inv.internal_number).strip().rjust(8, u'0')
ide_natOp = SubElement(ide, 'natOp')
ide_natOp.text = inv.cfop_id.name
ide_indPag = SubElement(ide, 'indPag')
ide_indPag.text = "2"
ide_mod = SubElement(ide, 'mod')
ide_mod.text = inv.fiscal_document_id.code
ide_serie = SubElement(ide, 'serie')
ide_serie.text = inv.document_serie_id.code
ide_nNF = SubElement(ide, 'nNF')
ide_nNF.text = inv.internal_number
ide_dEmi = SubElement(ide, 'dEmi')
ide_dEmi.text = inv.date_invoice
ide_dSaiEnt = SubElement(ide, 'dSaiEnt')
ide_dSaiEnt.text = inv.date_invoice
ide_tpNF = SubElement(ide, 'tpNF')
if inv.type in ("out_invoice", "in_refuld"):
ide_tpNF.text = '0'
else:
ide_tpNF.text = '1'
ide_cMunFG = SubElement(ide, 'cMunFG')
ide_cMunFG.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code)
ide_tpImp = SubElement(ide, 'tpImp')
ide_tpImp.text = "1"
ide_tpEmis = SubElement(ide, 'tpEmis')
ide_tpEmis.text = "1"
ide_cDV = SubElement(ide, 'cDV')
ide_cDV.text = self.nfe_dv(nfe_key)
#Tipo de ambiente: 1 - Produção; 2 - Homologação
ide_tpAmb = SubElement(ide, 'tpAmb')
ide_tpAmb.text = "2"
#Finalidade da emissão da NF-e: 1 - NFe normal 2 - NFe complementar 3 - NFe de ajuste
ide_finNFe = SubElement(ide, 'finNFe')
ide_finNFe.text = "1"
ide_procEmi = SubElement(ide, 'procEmi')
ide_procEmi.text = "0"
ide_verProc = SubElement(ide, 'verProc')
ide_verProc.text = "2.0.4"
emit = SubElement(infNFe, 'emit')
emit_CNPJ = SubElement(emit, 'CNPJ')
emit_CNPJ.text = inv.company_id.partner_id.cnpj_cpf
emit_xNome = SubElement(emit, 'xNome')
emit_xNome.text = inv.company_id.partner_id.legal_name
emit_xFant = SubElement(emit, 'xFant')
emit_xFant.text = inv.company_id.partner_id.name
enderEmit = SubElement(emit, 'enderEmit')
enderEmit_xLgr = SubElement(enderEmit, 'xLgr')
enderEmit_xLgr.text = company_addr_default.street
enderEmit_nro = SubElement(enderEmit, 'nro')
enderEmit_nro.text = company_addr_default.number
enderEmit_xBairro = SubElement(enderEmit, 'xBairro')
enderEmit_xBairro.text = company_addr_default.district
enderEmit_cMun = SubElement(enderEmit, 'cMun')
enderEmit_cMun.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code)
enderEmit_xMun = SubElement(enderEmit, 'xMun')
enderEmit_xMun.text = company_addr_default.city_id.name
enderEmit_UF = SubElement(enderEmit, 'UF')
enderEmit_UF.text = company_addr_default.state_id.code
enderEmit_CEP = SubElement(enderEmit, 'CEP')
enderEmit_CEP.text = company_addr_default.zip
enderEmit_cPais = SubElement(enderEmit, 'cPais')
enderEmit_cPais.text = company_addr_default.country_id.bc_code
enderEmit_xPais = SubElement(enderEmit, 'xPais')
enderEmit_xPais.text = company_addr_default.country_id.name
enderEmit_fone = SubElement(enderEmit, 'fone')
enderEmit_fone.text = company_addr_default.phone
emit_IE = SubElement(emit, 'IE')
emit_IE.text = inv.company_id.partner_id.inscr_est
emit_IEST = SubElement(emit, 'IEST')
emit_IEST.text = '0000000000' #FIXME
emit_IM = SubElement(emit, 'IM')
emit_IM.text = '0000000000' #FIXME
emit_CNAE = SubElement(emit, 'CNAE')
emit_CNAE.text = '0111301' #FIXME
emit_CRT = SubElement(emit, 'CRT')
emit_CRT.text = '3' #FIXME
dest = SubElement(infNFe, 'dest')
dest_CNPJ = SubElement(dest, 'CNPJ')
dest_CNPJ.text = inv.partner_id.cnpj_cpf
dest_xNome = SubElement(dest, 'xNome')
dest_xNome.text = inv.partner_id.legal_name
enderDest = SubElement(dest, 'enderDest')
enderDest_xLgr = SubElement(enderDest, 'xLgr')
enderDest_xLgr.text = inv.address_invoice_id.street
enderDest_nro = SubElement(enderDest, 'nro')
enderDest_nro.text = inv.address_invoice_id.number
enderDest_xBairro = SubElement(enderDest, 'xBairro')
enderDest_xBairro.text = inv.address_invoice_id.district
enderDest_cMun = SubElement(enderDest, 'cMun')
enderDest_cMun.text = ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.city_id.ibge_code)
enderDest_xMun = SubElement(enderDest, 'xMun')
enderDest_xMun.text = inv.address_invoice_id.city_id.name
enderDest_UF = SubElement(enderDest, 'UF')
enderDest_UF.text = inv.address_invoice_id.state_id.code
enderDest_CEP = SubElement(enderDest, 'CEP')
enderDest_CEP.text = inv.address_invoice_id.zip
enderDest_cPais = SubElement(enderDest, 'cPais')
enderDest_cPais.text = inv.address_invoice_id.country_id.bc_code
enderDest_xPais = SubElement(enderDest, 'xPais')
enderDest_xPais.text = inv.address_invoice_id.country_id.name
enderDest_fone = SubElement(enderDest, 'fone')
enderDest_fone.text = inv.address_invoice_id.phone
dest_IE = SubElement(dest, 'IE')
dest_IE.text = inv.partner_id.inscr_est
for inv_line in inv.invoice_line:
i =+ 1
det = SubElement(infNFe, 'det', {'nItem': str(i)})
det_prod = SubElement(det, 'prod')
prod_cProd = SubElement(det_prod, 'cProd')
if inv_line.product_id.code:
prod_cProd.text = inv_line.product_id.code
else:
prod_cProd.text = unicode(i).strip().rjust(4, u'0')
prod_cEAN = SubElement(det_prod, 'cEAN')
prod_cEAN.text = inv_line.product_id.ean13
prod_xProd = SubElement(det_prod, 'xProd')
prod_xProd.text = inv_line.product_id.name
prod_NCM = SubElement(det_prod, 'NCM')
prod_NCM.text = inv_line.product_id.property_fiscal_classification.name
prod_CFOP = SubElement(det_prod, 'CFOP')
prod_CFOP.text = inv_line.cfop_id.code
prod_uCom = SubElement(det_prod, 'uCom')
prod_uCom.text = inv_line.uos_id.name
prod_qCom = SubElement(det_prod, 'qCom')
prod_qCom.text = str("%.4f" % inv_line.quantity)
prod_vUnCom = SubElement(det_prod, 'vUnCom')
prod_vUnCom.text = str("%.4f" % inv_line.price_unit)
prod_vProd = SubElement(det_prod, 'vProd')
prod_vProd.text = str("%.2f" % inv_line.price_subtotal)
prod_cEANTrib = SubElement(det_prod, 'cEANTrib')
#prod_vProd.text(inv_line.total)
prod_uTrib = SubElement(det_prod, 'uTrib')
prod_uTrib.text = inv_line.uos_id.name
prod_qTrib = SubElement(det_prod, 'qTrib')
prod_qTrib.text = '0.0000' #TODO
prod_vUnTrib = SubElement(det_prod, 'vUnTrib')
prod_vUnTrib.text = '0.00' #TODO
prod_vFrete = SubElement(det_prod, 'vFrete')
prod_vFrete.text = '0.00' #TODO - Valor do Frete
prod_vSeg = SubElement(det_prod, 'vSeg')
prod_vSeg.text = '0.00' #TODO - Valor do seguro
prod_vDesc = SubElement(det_prod, 'vDesc')
prod_vDesc.text = str("%.2f" % inv_line.discount) #TODO
prod_vOutro = SubElement(det_prod, 'vOutro')
prod_vOutro.text = '0.0000' #TODO
prod_indTot = SubElement(det_prod, 'indTot')
prod_indTot.text = '1' #TODO
prod_imposto = SubElement(det, 'imposto')
imposto_icms = SubElement(prod_imposto, 'ICMS' ) # + inv_line.icms_cst)
imposto_icms_cst = SubElement(imposto_icms, 'ICMS%s' % (inv_line.icms_cst))
icms_orig = SubElement(imposto_icms_cst, 'orig')
icms_orig.text = inv_line.product_id.origin
icms_CST = SubElement(imposto_icms_cst, 'CST')
icms_CST.text = inv_line.icms_cst
icms_modBC = SubElement(imposto_icms_cst, 'modBC')
icms_modBC.text = '0' # TODO
icms_vBC = SubElement(imposto_icms_cst, 'vBC')
icms_vBC.text = str("%.2f" % inv_line.icms_base)
icms_pICMS = SubElement(imposto_icms_cst, 'pICMS')
icms_pICMS.text = str("%.2f" % inv_line.icms_percent)
icms_vICMS = SubElement(imposto_icms_cst, 'vICMS')
icms_vICMS.text = str("%.2f" % inv_line.icms_value)
imposto_ipi = SubElement(prod_imposto, 'IPI')
icms_cEnq = SubElement(imposto_ipi, 'cEnq')
icms_cEnq.text = '999'
#Imposto Não Tributado
ipi_IPINT = SubElement(imposto_ipi, 'IPINT')
ipi_CST = SubElement(ipi_IPINT, 'CST')
ipi_CST.text = inv_line.ipi_cst
imposto_pis = SubElement(prod_imposto, 'PIS')
pis_PISAliq = SubElement(imposto_pis, 'PISAliq')
pis_CST = SubElement(pis_PISAliq, 'CST')
pis_CST.text = inv_line.pis_cst
pis_vBC = SubElement(pis_PISAliq, 'vBC')
pis_vBC.text = str("%.2f" % inv_line.pis_base)
pis_pPIS = SubElement(pis_PISAliq, 'pPIS')
pis_pPIS.text = str("%.2f" % inv_line.pis_percent)
pis_vPIS = SubElement(pis_PISAliq, 'vPIS')
pis_vPIS.text = str("%.2f" % inv_line.pis_value)
imposto_cofins = SubElement(prod_imposto, 'COFINS')
cofins_COFINSAliq = SubElement(imposto_cofins, 'COFINSAliq')
cofins_CST = SubElement(cofins_COFINSAliq, 'CST')
cofins_CST.text = inv_line.pis_cst
cofins_vBC = SubElement(cofins_COFINSAliq, 'vBC')
cofins_vBC.text = str("%.2f" % inv_line.cofins_base)
cofins_pCOFINS = SubElement(cofins_COFINSAliq, 'pCOFINS')
cofins_pCOFINS.text = str("%.2f" % inv_line.cofins_percent)
cofins_vCOFINS = SubElement(cofins_COFINSAliq, 'vCOFINS')
cofins_vCOFINS.text = str("%.2f" % inv_line.cofins_value)
total = SubElement(infNFe, 'total')
total_ICMSTot = SubElement(total, 'ICMSTot')
ICMSTot_vBC = SubElement(total_ICMSTot, 'vBC')
ICMSTot_vBC.text = str("%.2f" % inv.icms_base)
ICMSTot_vICMS = SubElement(total_ICMSTot, 'vICMS')
ICMSTot_vICMS.text = str("%.2f" % inv.icms_value)
ICMSTot_vBCST = SubElement(total_ICMSTot, 'vBCST')
ICMSTot_vBCST.text = '0.00' # TODO
ICMSTot_vST = SubElement(total_ICMSTot, 'vST')
ICMSTot_vST.text = '0.00' # TODO
ICMSTot_vProd = SubElement(total_ICMSTot, 'vProd')
ICMSTot_vProd.text = str("%.2f" % inv.amount_untaxed)
ICMSTot_vFrete = SubElement(total_ICMSTot, 'vFrete')
ICMSTot_vFrete.text = '0.00' # TODO
ICMSTot_vSeg = SubElement(total_ICMSTot, 'vSeg')
ICMSTot_vSeg.text = str("%.2f" % inv.amount_insurance)
ICMSTot_vDesc = SubElement(total_ICMSTot, 'vDesc')
ICMSTot_vDesc.text = '0.00' # TODO
ICMSTot_II = SubElement(total_ICMSTot, 'vII')
ICMSTot_II.text = '0.00' # TODO
ICMSTot_vIPI = SubElement(total_ICMSTot, 'vIPI')
ICMSTot_vIPI.text = str("%.2f" % inv.ipi_value)
ICMSTot_vPIS = SubElement(total_ICMSTot, 'vPIS')
ICMSTot_vPIS.text = str("%.2f" % inv.pis_value)
ICMSTot_vCOFINS = SubElement(total_ICMSTot, 'vCOFINS')
ICMSTot_vCOFINS.text = str("%.2f" % inv.cofins_value)
ICMSTot_vOutro = SubElement(total_ICMSTot, 'vOutro')
ICMSTot_vOutro.text = str("%.2f" % inv.amount_costs)
ICMSTot_vNF = SubElement(total_ICMSTot, 'vNF')
ICMSTot_vNF.text = str("%.2f" % inv.amount_total)
transp = SubElement(infNFe, 'transp')
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
transp_modFrete = SubElement(transp, 'modFrete')
transp_modFrete.text = '0' #TODO
if inv.carrier_id:
#Endereço do company
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
transp_transporta = SubElement(transp, 'transporta')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
transporta_CNPJ = SubElement(transp_transporta, 'CNPJ')
transporta_CNPJ.text = inv.carrier_id.partner_id.cnpj_cpf
else:
transporta_CPF = SubElement(transp_transporta, 'CPF')
transporta_CPF.text = inv.carrier_id.partner_id.cnpj_cpf
transporta_xNome = SubElement(transp_transporta, 'xNome')
if inv.carrier_id.partner_id.legal_name:
transporta_xNome.text = inv.carrier_id.partner_id.legal_name
else:
transporta_xNome.text = inv.carrier_id.partner_id.name
transporta_IE = SubElement(transp_transporta, 'IE')
transporta_IE.text = inv.carrier_id.partner_id.inscr_est
transporta_xEnder = SubElement(transp_transporta, 'xEnder')
transporta_xEnder.text = carrier_addr_default.street
transporta_xMun = SubElement(transp_transporta, 'xMun')
transporta_xMun.text = ('%s%s') % (carrier_addr_default.state_id.ibge_code, carrier_addr_default.city_id.ibge_code)
transporta_UF = SubElement(transp_transporta, 'UF')
transporta_UF.text = carrier_addr_default.state_id.code
if inv.number_of_packages:
transp_vol = SubElement(transp, 'vol')
vol_qVol = SubElement(transp_vol, 'qVol')
vol_qVol.text = inv.number_of_packages
vol_esp = SubElement(transp_vol, 'esp')
vol_esp.text = 'volume' #TODO
vol_pesoL = SubElement(transp_vol, 'pesoL')
vol_pesoL.text = inv.weight_net
vol_pesoB = SubElement(transp_vol, 'pesoB')
vol_pesoB.text = inv.weight
xml_string = ElementTree.tostring(nfeProc, 'utf-8')
return xml_string
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice, payment_term, partner_bank_id, company_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not result['value']['address_invoice_id']:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
partner_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [result['value']['address_invoice_id']])[0]
to_country = partner_addr_default.country_id.id
to_state = partner_addr_default.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
#for inv in self.browse(cr,uid,ids):
# for line in inv.invoice_line:
# line.cfop_id = obj_foperation.cfop_id.id
#line.write(cr, uid, line.id, {'cfop_id': obj_foperation.cfop_id.id})
return result
def onchange_company_id(self, cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not address_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [address_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr,uid,ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
def onchange_address_invoice_id(self, cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_address_invoice_id(cr,uid,ids,cpy_id,ptn_id,ptn_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not ptn_id or not cpy_id or not ptn_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [cpy_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [ptn_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [ptn_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=', cpy_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=', cpy_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr,uid,ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
def onchange_cfop_id(self, cr, uid, ids, cfop_id):
if not cfop_id:
return False
for inv in self.browse(cr, uid, ids):
for inv_line in inv.invoice_line:
self.pool.get('account.invoice.line').write(cr, uid, inv_line.id, {'cfop_id': inv.fiscal_operation_id.cfop_id.id})
return {'value': {'cfop_id': cfop_id}}
account_invoice()
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {} #super(account_invoice_line, self)._amount_line(cr, uid, ids, prop, unknow_none, unknow_dict)
tax_obj = self.pool.get('account.tax')
fsc_op_line_obj = self.pool.get('l10n_br_account.fiscal.operation.line')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
res[line.id] = {
'price_subtotal': 0.0,
'price_total': 0.0,
'icms_base': 0.0,
'icms_base_other': 0.0,
'icms_value': 0.0,
'icms_percent': 0.0,
'icms_percent_reduction': 0.0,
'icms_st_value': 0.0,
'icms_st_base': 0.0,
'icms_st_percent': 0.0,
'icms_st_mva': 0.0,
'icms_st_base_other': 0.0,
'icms_cst': '40', #Coloca como isento caso não tenha ICMS
'ipi_type': 'percent',
'ipi_base': 0.0,
'ipi_base_other': 0.0,
'ipi_value': 0.0,
'ipi_percent': 0.0,
'ipi_cst': '53', #Coloca como isento caso não tenha IPI
'pis_base': 0.0,
'pis_base_other': 0.0,
'pis_value': 0.0,
'pis_percent': 0.0,
'pis_cst': '99', #Coloca como isento caso não tenha PIS
'cofins_base': 0.0,
'cofins_base_other': 0.0,
'cofins_value': 0.0,
'cofins_percent': 0.0,
'cofins_cst': '99', #Coloca como isento caso não tenha COFINS
}
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, address_id=line.invoice_id.address_invoice_id, partner=line.invoice_id.partner_id)
icms_base = 0.0
icms_base_other = 0.0
icms_value = 0.0
icms_percent = 0.0
icms_percent_reduction = 0.0
icms_st_value = 0.0
icms_st_base = 0.0
icms_st_percent = 0.0
icms_st_mva = 0.0
icms_st_base_other = 0.0
icms_cst = ''
ipi_type = ''
ipi_base = 0.0
ipi_base_other = 0.0
ipi_value = 0.0
ipi_percent = 0.0
ipi_cst = ''
pis_base = 0.0
pis_base_other = 0.0
pis_value = 0.0
pis_percent = 0.0
pis_cst = ''
cofins_base = 0.0
cofins_base_other = 0.0
cofins_value = 0.0
cofins_percent = 0.0
cofins_cst = ''
if line.fiscal_operation_id:
fiscal_operation_ids = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id','=',line.company_id.id),('fiscal_operation_id','=',line.fiscal_operation_id.id),('fiscal_classification_id','=',False)])
for fo_line in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fiscal_operation_ids):
if fo_line.tax_code_id.domain == 'icms':
icms_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'ipi':
ipi_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'pis':
pis_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'cofins':
cofins_cst = fo_line.cst_id.code
if line.product_id:
fo_ids_ncm = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id','=',line.company_id.id),('fiscal_operation_id','=',line.fiscal_operation_id.id),('fiscal_classification_id','=',line.product_id.property_fiscal_classification.id)])
for fo_line_ncm in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fo_ids_ncm):
if fo_line_ncm.tax_code_id.domain == 'icms':
icms_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'ipi':
ipi_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'pis':
pis_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'cofins':
cofins_cst = fo_line_ncm.cst_id.code
for tax in taxes['taxes']:
fsc_op_line_ids = 0
fsc_fp_tax_ids = 0
tax_brw = tax_obj.browse(cr, uid, tax['id'])
if tax_brw.domain == 'icms':
icms_base += tax['total_base']
icms_base_other += taxes['total'] - tax['total_base']
icms_value += tax['amount']
icms_percent += tax_brw.amount * 100
icms_percent_reduction += tax_brw.base_reduction * 100
if tax_brw.domain == 'ipi':
ipi_type = tax_brw.type
ipi_base += tax['total_base']
ipi_value += tax['amount']
ipi_percent += tax_brw.amount * 100
if tax_brw.domain == 'pis':
pis_base += tax['total_base']
pis_base_other += taxes['total'] - tax['total_base']
pis_value += tax['amount']
pis_percent += tax_brw.amount * 100
if tax_brw.domain == 'cofins':
cofins_base += tax['total_base']
cofins_base_other += taxes['total'] - tax['total_base']
cofins_value += tax['amount']
cofins_percent += tax_brw.amount * 100
if tax_brw.domain == 'icmsst':
icms_st_value += tax['amount']
icms_st_base += tax['total_base']
icms_st_percent += icms_value
icms_st_mva += tax_brw.amount_mva * 100
icms_st_base_other += 0
res[line.id] = {
'price_subtotal': taxes['total'] - taxes['total_tax_discount'],
'price_total': taxes['total'],
'icms_base': icms_base,
'icms_base_other': icms_base_other,
'icms_value': icms_value,
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': icms_st_value,
'icms_st_base': icms_st_base,
'icms_st_percent' : icms_st_percent,
'icms_st_mva' : icms_st_mva,
'icms_st_base_other': icms_st_base_other,
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': ipi_base,
'ipi_base_other': ipi_base_other,
'ipi_value': ipi_value,
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': pis_base,
'pis_base_other': pis_base_other,
'pis_value': pis_value,
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cofins_base,
'cofins_base_other': cofins_base_other,
'cofins_value': cofins_value,
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = {
'price_subtotal': cur_obj.round(cr, uid, cur, res[line.id]['price_subtotal']),
'price_total': cur_obj.round(cr, uid, cur, res[line.id]['price_total']),
'icms_base': cur_obj.round(cr, uid, cur, icms_base),
'icms_base_other': cur_obj.round(cr, uid, cur, icms_base_other),
'icms_value': cur_obj.round(cr, uid, cur, icms_value),
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': cur_obj.round(cr, uid, cur, icms_st_value),
'icms_st_base': cur_obj.round(cr, uid, cur, icms_st_base),
'icms_st_percent' : icms_st_percent,
'icms_st_mva' : icms_st_mva,
'icms_st_base_other': cur_obj.round(cr, uid, cur, icms_st_base_other),
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': cur_obj.round(cr, uid, cur, ipi_base),
'ipi_base_other': cur_obj.round(cr, uid, cur, ipi_base_other),
'ipi_value': cur_obj.round(cr, uid, cur, ipi_value),
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': cur_obj.round(cr, uid, cur, pis_base),
'pis_base_other': cur_obj.round(cr, uid, cur, pis_base_other),
'pis_value': cur_obj.round(cr, uid, cur, pis_value),
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cur_obj.round(cr, uid, cur, cofins_base),
'cofins_base_other': cur_obj.round(cr, uid, cur, cofins_base_other),
'cofins_value': cur_obj.round(cr, uid, cur, cofins_value),
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
return res
_columns = {
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id','=',fiscal_operation_category_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP'),
'price_subtotal': fields.function(_amount_line, method=True, string='Subtotal', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'price_total': fields.function(_amount_line, method=True, string='Total', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_base': fields.function(_amount_line, method=True, string='Base ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_base_other': fields.function(_amount_line, method=True, string='Base ICMS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_value': fields.function(_amount_line, method=True, string='Valor ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_percent': fields.function(_amount_line, method=True, string='Perc ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_percent_reduction': fields.function(_amount_line, method=True, string='Perc Redução de Base ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_value': fields.function(_amount_line, method=True, string='Valor ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base': fields.function(_amount_line, method=True, string='Base ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_percent': fields.function(_amount_line, method=True, string='Percentual ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_mva': fields.function(_amount_line, method=True, string='MVA ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base_other': fields.function(_amount_line, method=True, string='Base ICMS ST Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_cst': fields.function(_amount_line, method=True, string='CST ICMS', type="char", size=2,
store=True, multi='all'),
'ipi_type': fields.function(_amount_line, method=True, string='Tipo do IPI', type="char", size=64,
store=True, multi='all'),
'ipi_base': fields.function(_amount_line, method=True, string='Base IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_base_other': fields.function(_amount_line, method=True, string='Base IPI Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_value': fields.function(_amount_line, method=True, string='Valor IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_percent': fields.function(_amount_line, method=True, string='Perc IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_cst': fields.function(_amount_line, method=True, string='CST IPI', type="char", size=2,
store=True, multi='all'),
'pis_base': fields.function(_amount_line, method=True, string='Base PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_base_other': fields.function(_amount_line, method=True, string='Base PIS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_value': fields.function(_amount_line, method=True, string='Valor PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_percent': fields.function(_amount_line, method=True, string='Perc PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_cst': fields.function(_amount_line, method=True, string='CST PIS', type="char", size=2,
store=True, multi='all'),
'cofins_base': fields.function(_amount_line, method=True, string='Base COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_base_other': fields.function(_amount_line, method=True, string='Base COFINS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_value': fields.function(_amount_line, method=True, string='Valor COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_percent': fields.function(_amount_line, method=True, string='Perc COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_cst': fields.function(_amount_line, method=True, string='Valor COFINS', type="char", size=2,
store=True, multi='all'),
}
def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, cfop_id=False):
result = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context)
if not cfop_id:
return result
result['value']['cfop_id'] = cfop_id
result['value']['fiscal_operation_category_id'] = cfop_id
result['value']['fiscal_operation_id'] = cfop_id
return result
account_invoice_line()
class account_invoice_tax(osv.osv):
_inherit = "account.invoice.tax"
_description = "Invoice Tax"
def compute(self, cr, uid, invoice_id, context={}):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context)
cur = inv.currency_id
company_currency = inv.company_id.currency_id.id
for line in inv.invoice_line:
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, inv.address_invoice_id.id, line.product_id, inv.partner_id)
for tax in taxes['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = tax['total_base']
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
account_invoice_tax()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
l10n_br_account - Corrigido a rotina de exportação de nfe a sessão da CST do ICMS
# -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import time
import netsvc
from osv import fields, osv
import decimal_precision as dp
import pooler
from tools import config
from tools.translate import _
import re, string
from unicodedata import normalize
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
from datetime import datetime
##############################################################################
# Fatura (Nota Fiscal) Personalizado
##############################################################################
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def _amount_all(self, cr, uid, ids, name, args, context=None):
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_tax_discount': 0.0,
'amount_total': 0.0,
'icms_base': 0.0,
'icms_value': 0.0,
'icms_st_base': 0.0,
'icms_st_value': 0.0,
'ipi_base': 0.0,
'ipi_value': 0.0,
'pis_base': 0.0,
'pis_value': 0.0,
'cofins_base': 0.0,
'cofins_value': 0.0,
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_total
res[invoice.id]['amount_tax_discount'] += line.price_total - line.price_subtotal
res[invoice.id]['icms_base'] += line.icms_base
res[invoice.id]['icms_value'] += line.icms_value
res[invoice.id]['icms_st_base'] += line.icms_st_base
res[invoice.id]['icms_st_value'] += line.icms_st_value
res[invoice.id]['ipi_base'] += line.ipi_base
res[invoice.id]['ipi_value'] += line.ipi_value
res[invoice.id]['pis_base'] += line.pis_base
res[invoice.id]['pis_value'] += line.pis_value
res[invoice.id]['cofins_base'] += line.cofins_base
res[invoice.id]['cofins_value'] += line.cofins_value
for invoice_tax in invoice.tax_line:
res[invoice.id]['amount_tax'] += invoice_tax.amount
if res[invoice.id]['amount_tax_discount'] > 0 and res[invoice.id]['amount_tax'] > 0:
res[invoice.id]['amount_tax'] = res[invoice.id]['ipi_value'] #FIXME round(res[invoice.id]['amount_tax'] - res[invoice.id]['amount_tax_discount'], prec)
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed'] + res[invoice.id]['icms_st_value']
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
_columns = {
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('sefaz_export','Enviar para Receita'),
('sefaz_exception','Erro de autorização da Receita'),
('paid','Paid'),
('cancel','Cancelled')
],'State', select=True, readonly=True,
help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma state,invoice does not have an invoice number. \
\n* The \'Open\' state is used when user create invoice,a invoice number is generated.Its in open state till user does not pay invoice. \
\n* The \'Paid\' state is set automatically when invoice is paid.\
\n* The \'sefaz_out\' Gerado aquivo de exportação para sistema daReceita.\
\n* The \'sefaz_aut\' Recebido arquivo de autolização da Receita.\
\n* The \'Cancelled\' state is used when user cancel invoice.'),
'nfe_access_key': fields.char('Chave de Acesso NFE', size=44, readonly=True, states={'draft':[('readonly',False)]}),
'nfe_status': fields.char('Status na Sefaz', size=44, readonly=True),
'nfe_date': fields.datetime('Data do Status NFE', readonly=True, states={'draft':[('readonly',False)]}),
'nfe_export_date': fields.datetime('Exportação NFE', readonly=True),
'fiscal_document_id': fields.many2one('l10n_br_account.fiscal.document', 'Documento', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_document_nfe': fields.related('fiscal_document_id', 'nfe', type='boolean', readonly=True, size=64, relation='l10n_br_account.fiscal.document', store=True, string='NFE'),
'document_serie_id': fields.many2one('l10n_br_account.document.serie', 'Serie', domain="[('fiscal_document_id','=',fiscal_document_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id','=',fiscal_operation_category_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP', readonly=True, states={'draft':[('readonly',False)]}),
'vendor_number': fields.char('NF Entrada', size=12, readonly=True, states={'draft':[('readonly',False)]}, help="Número da Nota Fiscal do Fornecedor"),
'vendor_serie': fields.char('Série NF Entrada', size=12, readonly=True, states={'draft':[('readonly',False)]}, help="Série do número da Nota Fiscal do Fornecedor"),
'amount_untaxed': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Untaxed',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
#'account.invoice.tax': (_get_invoice_tax, None, 20),
#'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_st_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
#'account.invoice.tax': (_get_invoice_tax, None, 20),
#'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'icms_st_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'ipi_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'ipi_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'pis_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'pis_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'cofins_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
'cofins_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
# 'account.invoice.tax': (_get_invoice_tax, None, 20),
# 'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount'], 20),
},
multi='all'),
}
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'internal_number': False,
'nfe_access_key': False,
'nfe_status': False,
'nfe_date': False,
'nfe_export_date': False,
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def action_internal_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
for obj_inv in self.browse(cr, uid, ids):
obj_sequence = self.pool.get('ir.sequence')
seq_no = obj_sequence.get_id(cr, uid, obj_inv.journal_id.internal_sequence.id, context=context)
self.write(cr, uid, obj_inv.id, {'internal_number': seq_no})
return True
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids):
id = obj_inv.id
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
#self.write(cr, uid, ids, {'internal_number':number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
for inv_id, name in self.name_get(cr, uid, [id]):
ctx = context.copy()
if obj_inv.type in ('out_invoice', 'out_refund'):
ctx = self.get_log_context(cr, uid, context=ctx)
message = _('Invoice ') + " '" + name + "' "+ _("is validated.")
self.log(cr, uid, inv_id, message, context=ctx)
return True
def nfe_dv(self, key):
return '2'
def nfe_check(self, cr, uid, ids, context=None):
strErro = ''
if context is None:
context = {}
for inv in self.browse(cr, uid, ids):
#Nota fiscal
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
if not inv.document_serie_id:
strErro = 'Nota Fiscal - Série da nota fiscal\n'
if not inv.fiscal_document_id:
strErro = 'Nota Fiscal - Tipo de documento fiscal\n'
#if not inv.date_invoice:
# strErro = 'Nota Fiscal - Data da nota fiscal\n'
if not inv.journal_id.internal_sequence:
strErro = 'Nota Fiscal - Número da nota fiscal, o diário deve ter uma sequência interna\n'
if not inv.cfop_id:
strErro = 'Nota Fiscal - CFOP\n'
else:
if not inv.cfop_id.small_name:
strErro = 'Nota Fiscal - Descrição reduzida do CFOP\n'
#Emitente
if not inv.company_id.partner_id.legal_name:
strErro = 'Emitente - Razão Social\n'
if not inv.company_id.partner_id.name:
strErro = 'Emitente - Fantasia\n'
if not inv.company_id.partner_id.cnpj_cpf:
strErro = 'Emitente - CNPJ/CPF\n'
if not company_addr_default.street:
strErro = 'Emitente / Endereço - Logradouro\n'
if not company_addr_default.number:
strErro = 'Emitente / Endereço - Número\n'
if not company_addr_default.zip:
strErro = 'Emitente / Endereço - CEP\n'
if not inv.company_id.cnae_main:
strErro = 'Emitente / CNAE Principal\n'
if not inv.company_id.partner_id.inscr_est:
strErro = 'Emitente / Inscrição Estadual\n'
if not company_addr_default.state_id:
strErro = 'Emitente / Endereço - Estado\n'
else:
if not company_addr_default.state_id.ibge_code:
strErro = 'Emitente / Endereço - Código do IBGE do estado\n'
if not company_addr_default.state_id.name:
strErro = 'Emitente / Endereço - Nome do estado\n'
if not company_addr_default.city_id:
strErro = 'Emitente / Endereço - municipio\n'
else:
if not company_addr_default.city_id.name:
strErro = 'Emitente / Endereço - Nome do municipio\n'
if not company_addr_default.city_id.ibge_code:
strErro = 'Emitente / Endereço - Código do IBGE do municipio\n'
if not company_addr_default.country_id:
strErro = 'Emitente / Endereço - país\n'
else:
if not company_addr_default.country_id.name:
strErro = 'Emitente / Endereço - Nome do país\n'
if not company_addr_default.country_id.bc_code:
strErro = 'Emitente / Endereço - Código do BC do país\n'
if not company_addr_default.country_id:
strErro = 'Emitente / Regime Tributário\n'
#Destinatário
if not inv.partner_id.legal_name:
strErro = 'Destinatário - Razão Social\n'
if not inv.partner_id.cnpj_cpf:
strErro = 'Destinatário - CNPJ/CPF\n'
if not inv.address_invoice_id.street:
strErro = 'Destinatário / Endereço - Logradouro\n'
if not inv.address_invoice_id.number:
strErro = 'Destinatário / Endereço - Número\n'
if not inv.address_invoice_id.zip:
strErro = 'Destinatário / Endereço - CEP\n'
if not inv.address_invoice_id.state_id:
strErro = 'Destinatário / Endereço - Estado\n'
else:
if not inv.address_invoice_id.state_id.ibge_code:
strErro = 'Destinatário / Endereço - Código do IBGE do estado\n'
if not inv.address_invoice_id.state_id.name:
strErro = 'Destinatário / Endereço - Nome do estado\n'
if not inv.address_invoice_id.city_id:
strErro = 'Destinatário / Endereço - Municipio\n'
else:
if not inv.address_invoice_id.city_id.name:
strErro = 'Destinatário / Endereço - Nome do municipio\n'
if not inv.address_invoice_id.city_id.ibge_code:
strErro = 'Destinatário / Endereço - Código do IBGE do municipio\n'
if not inv.address_invoice_id.country_id:
strErro = 'Destinatário / Endereço - País\n'
else:
if not inv.address_invoice_id.country_id.name:
strErro = 'Destinatário / Endereço - Nome do país\n'
if not inv.address_invoice_id.country_id.bc_code:
strErro = 'Destinatário / Endereço - Código do BC do país\n'
#endereco de entrega
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
if not inv.partner_shipping_id.street:
strErro = 'Destinatário / Endereço de Entrega - Logradouro\n'
if not inv.partner_shipping_id.number:
strErro = 'Destinatário / Endereço de Entrega - Número\n'
if not inv.address_invoice_id.zip:
strErro = 'Destinatário / Endereço de Entrega - CEP\n'
if not inv.partner_shipping_id.state_id:
strErro = 'Destinatário / Endereço de Entrega - Estado\n'
else:
if not inv.partner_shipping_id.state_id.ibge_code:
strErro = 'Destinatário / Endereço de Entrega - Código do IBGE do estado\n'
if not inv.partner_shipping_id.state_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do estado\n'
if not inv.partner_shipping_id.city_id:
strErro = 'Destinatário / Endereço - Municipio\n'
else:
if not inv.partner_shipping_id.city_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do municipio\n'
if not inv.partner_shipping_id.city_id.ibge_code:
strErro = 'Destinatário / Endereço de Entrega - Código do IBGE do municipio\n'
if not inv.partner_shipping_id.country_id:
strErro = 'Destinatário / Endereço de Entrega - País\n'
else:
if not inv.partner_shipping_id.country_id.name:
strErro = 'Destinatário / Endereço de Entrega - Nome do país\n'
if not inv.partner_shipping_id.country_id.bc_code:
strErro = 'Destinatário / Endereço de Entrega - Código do BC do país\n'
#produtos
for inv_line in inv.invoice_line:
if inv_line.product_id:
if not inv_line.product_id.code:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Código do produto\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.product_id.name:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Nome do produto\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.cfop_id:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CFOP\n' % (inv_line.product_id.name,inv_line.quantity)
else:
if not inv_line.cfop_id.code:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Código do CFOP\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.uos_id:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Unidade de medida\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.quantity:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Quantidade\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.price_unit:
strErro = 'Produtos e Servicos: %s, Qtde: %s - Preço unitário\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.icms_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do ICMS\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.ipi_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do IPI\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.pis_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do PIS\n' % (inv_line.product_id.name,inv_line.quantity)
if not inv_line.cofins_cst:
strErro = 'Produtos e Servicos: %s, Qtde: %s - CST do COFINS\n' % (inv_line.product_id.name,inv_line.quantity)
#Transportadora
if inv.carrier_id:
if not inv.carrier_id.partner_id.legal_name:
strErro = 'Transportadora - Razão Social\n'
if not inv.carrier_id.partner_id.cnpj_cpf:
strErro = 'Transportadora - CNPJ/CPF\n'
#Dados do Veiculo
if inv.vehicle_id:
if not inv.vehicle_id.plate:
strErro = 'Transportadora / Veículo - Placa\n'
if not inv.vehicle_id.plate.state_id.code:
strErro = 'Transportadora / Veículo - UF da Placa\n'
if not inv.vehicle_id.rntc_code:
strErro = 'Transportadora / Veículo - RNTC\n'
if inv.number_of_packages:
if not inv.weight_net:
strErro = 'Totais - Peso Liquido\n'
if not inv.weight:
strErro = 'Totais - Peso Bruto\n'
if strErro:
raise osv.except_osv(_('Error !'),_("Validação da Nota fiscal:\n '%s'") % (strErro,))
return True
def nfe_export_txt(self, cr, uid, ids, context=False):
StrFile = ''
StrNF = 'NOTA FISCAL|%s|\n' % len(ids)
StrFile = StrNF
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
#nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
#nfe_key += re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '')
#nfe_key += inv.fiscal_document_id.code
#nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
#nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
#fe_key += unicode('1').strip().rjust(1, u'0') # Homologação
#nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
#nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
StrA = 'A|%s|%s|\n' % ('2.00', '')
StrFile += StrA
StrRegB = {
'cUF': company_addr_default.state_id.ibge_code,
'cNF': '',
'NatOp': normalize('NFKD',unicode(inv.cfop_id.small_name or '')).encode('ASCII','ignore'),
'intPag': '2',
'mod': inv.fiscal_document_id.code,
'serie': inv.document_serie_id.code,
'nNF': inv.internal_number or '',
'dEmi': inv.date_invoice or '',
'dSaiEnt': inv.date_invoice or '',
'hSaiEnt': '',
'tpNF': '',
'cMunFG': ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code),
'TpImp': '1',
'TpEmis': '1',
'cDV': '',
'tpAmb': '2',
'finNFe': '1',
'procEmi': '0',
'VerProc': '2.0.4',
'dhCont': '',
'xJust': '',
}
if inv.cfop_id.type in ("input"):
StrRegB['tpNF'] = '0'
else:
StrRegB['tpNF'] = '1'
StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['intPag'],
StrRegB['mod'], StrRegB['serie'], StrRegB['nNF'], StrRegB['dEmi'], StrRegB['dSaiEnt'],
StrRegB['hSaiEnt'], StrRegB['tpNF'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'],
StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['procEmi'], StrRegB['VerProc'],
StrRegB['dhCont'], StrRegB['xJust'])
StrFile += StrB
StrRegC = {
'XNome': normalize('NFKD',unicode(inv.company_id.partner_id.legal_name or '')).encode('ASCII','ignore'),
'XFant': normalize('NFKD',unicode(inv.company_id.partner_id.name or '')).encode('ASCII','ignore'),
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_est or ''),
'IEST': '',
'IM': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_mun or ''),
'CNAE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.cnae_main or ''),
'CRT': inv.company_id.fiscal_type or '',
}
StrC = 'C|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC['XNome'], StrRegC['XFant'], StrRegC['IE'], StrRegC['IEST'],
StrRegC['IM'],StrRegC['CNAE'],StrRegC['CRT'])
StrFile += StrC
if inv.company_id.partner_id.tipo_pessoa == 'J':
StrC02 = 'C02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
else:
StrC02 = 'C02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
StrFile += StrC02
StrRegC05 = {
'XLgr': normalize('NFKD',unicode(company_addr_default.street or '')).encode('ASCII','ignore'),
'Nro': company_addr_default.number or '',
'Cpl': normalize('NFKD',unicode(company_addr_default.street2 or '')).encode('ASCII','ignore'),
'Bairro': normalize('NFKD',unicode(company_addr_default.district or 'Sem Bairro')).encode('ASCII','ignore'),
'CMun': '%s%s' % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code),
'XMun': normalize('NFKD',unicode(company_addr_default.city_id.name or '')).encode('ASCII','ignore'),
'UF': company_addr_default.state_id.code or '',
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.zip or '').replace(' ','')),
'cPais': company_addr_default.country_id.bc_code or '',
'xPais': normalize('NFKD',unicode(company_addr_default.country_id.name or '')).encode('ASCII','ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.phone or '').replace(' ','')),
}
StrC05 = 'C05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC05['XLgr'], StrRegC05['Nro'], StrRegC05['Cpl'], StrRegC05['Bairro'],
StrRegC05['CMun'], StrRegC05['XMun'], StrRegC05['UF'], StrRegC05['CEP'],
StrRegC05['cPais'], StrRegC05['xPais'], StrRegC05['fone'])
StrFile += StrC05
StrRegE = {
'xNome': normalize('NFKD',unicode(inv.partner_id.legal_name or '')).encode('ASCII','ignore'),
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.inscr_est or ''),
'ISUF': '',
'email': inv.partner_id.email or '',
}
StrE = 'E|%s|%s|%s|%s|\n' % (StrRegE['xNome'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['email'])
StrFile += StrE
if inv.partner_id.tipo_pessoa == 'J':
StrE0 = 'E02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrE0 = 'E03|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrE0
StrRegE05 = {
'xLgr': normalize('NFKD',unicode(inv.address_invoice_id.street or '')).encode('ASCII','ignore'),
'nro': normalize('NFKD',unicode(inv.address_invoice_id.number or '')).encode('ASCII','ignore'),
'xCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.address_invoice_id.street2 or '' )).encode('ASCII','ignore')),
'xBairro': normalize('NFKD',unicode(inv.address_invoice_id.district or 'Sem Bairro')).encode('ASCII','ignore'),
'cMun': ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.city_id.ibge_code),
'xMun': normalize('NFKD',unicode(inv.address_invoice_id.city_id.name or '')).encode('ASCII','ignore'),
'UF': inv.address_invoice_id.state_id.code,
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.zip or '').replace(' ','')),
'cPais': inv.address_invoice_id.country_id.bc_code,
'xPais': normalize('NFKD',unicode(inv.address_invoice_id.country_id.name or '')).encode('ASCII','ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.phone or '').replace(' ','')),
}
StrE05 = 'E05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegE05['xLgr'], StrRegE05['nro'], StrRegE05['xCpl'], StrRegE05['xBairro'],
StrRegE05['cMun'], StrRegE05['xMun'], StrRegE05['UF'], StrRegE05['CEP'],
StrRegE05['cPais'],StrRegE05['xPais'], StrRegE05['fone'],)
StrFile += StrE05
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
StrRegG = {
'XLgr': normalize('NFKD',unicode(inv.partner_shipping_id.street or '',)).encode('ASCII','ignore'),
'Nro': normalize('NFKD',unicode(inv.partner_shipping_id.number or '')).encode('ASCII','ignore'),
'XCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.partner_shipping_id.street2 or '' )).encode('ASCII','ignore')),
'XBairro': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD',unicode(inv.partner_shipping_id.district or 'Sem Bairro' )).encode('ASCII','ignore')),
'CMun': ('%s%s') % (inv.partner_shipping_id.state_id.ibge_code, inv.partner_shipping_id.city_id.ibge_code),
'XMun': normalize('NFKD',unicode(inv.partner_shipping_id.city_id.name or '')).encode('ASCII','ignore'),
'UF': inv.address_invoice_id.state_id.code,
}
StrG = 'G|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegG['XLgr'],StrRegG['Nro'],StrRegG['XCpl'],StrRegG['XBairro'],StrRegG['CMun'],StrRegG['XMun'],StrRegG['UF'])
StrFile += StrG
if inv.partner_id.tipo_pessoa == 'J':
StrG0 = 'G02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrG0 = 'G02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrG0
i = 0
for inv_line in inv.invoice_line:
i += 1
StrH = 'H|%s||\n' % (i)
StrFile += StrH
StrRegI = {
'CProd': normalize('NFKD',unicode(inv_line.product_id.code or '',)).encode('ASCII','ignore'),
'CEAN': inv_line.product_id.ean13 or '',
'XProd': normalize('NFKD',unicode(inv_line.product_id.name or '')).encode('ASCII','ignore'),
'NCM': re.sub('[%s]' % re.escape(string.punctuation), '', inv_line.product_id.property_fiscal_classification.name or ''),
'EXTIPI': '',
'CFOP': inv_line.cfop_id.code,
'UCom': normalize('NFKD',unicode(inv_line.uos_id.name or '',)).encode('ASCII','ignore'),
'QCom': str("%.4f" % inv_line.quantity),
'VUnCom': str("%.2f" % (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0))),
'VProd': str("%.2f" % inv_line.price_total),
'CEANTrib': '',
'UTrib': inv_line.uos_id.name,
'QTrib': str("%.4f" % inv_line.quantity),
'VUnTrib': str("%.2f" % inv_line.price_unit),
'VFrete': '',
'VSeg': '',
'VDesc': '',
'vOutro': '',
'indTot': '1',
'xPed': '',
'nItemPed': '',
}
if inv_line.product_id.code:
StrRegI['CProd'] = inv_line.product_id.code
else:
StrRegI['CProd'] = unicode(i).strip().rjust(4, u'0')
#No OpenERP já traz o valor unitário como desconto
#if inv_line.discount > 0:
# StrRegI['VDesc'] = str("%.2f" % (inv_line.quantity * (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0))))
StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'],
StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'], StrRegI['QCom'],
StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'], StrRegI['UTrib'],
StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'], StrRegI['VSeg'],
StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'], StrRegI['xPed'],
StrRegI['nItemPed'])
StrFile += StrI
StrM = 'M|\n'
StrFile += StrM
StrN = 'N|\n'
#TODO - Fazer alteração para cada tipo de cst
StrFile += StrN
StrRegN02 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN02 = 'N02|%s|%s|%s|%s|%s|%s|\n' % (StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['VBC'], StrRegN02['PICMS'],
StrRegN02['VICMS'])
StrRegN03 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', #TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN03 = 'N03|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN03['Orig'], StrRegN03['CST'], StrRegN03['ModBC'], StrRegN03['VBC'], StrRegN03['PICMS'],
StrRegN03['VICMS'], StrRegN03['ModBCST'], StrRegN03['PMVAST'], StrRegN03['PRedBCST'], StrRegN03['VBCST'],
StrRegN03['PICMSST'], StrRegN03['VICMSST'])
StrRegN04 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN04 = 'N04|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN04['Orig'], StrRegN04['CST'], StrRegN04['ModBC'], StrRegN04['PRedBC'], StrRegN04['VBC'], StrRegN04['PICMS'],
StrRegN04['VICMS'])
StrRegN06 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'vICMS': str("%.2f" % inv_line.icms_value),
'motDesICMS': '9', #FIXME
}
StrN06 = 'N06|%s|%s|%s|%s|\n' % (StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'], StrRegN06['motDesICMS'])
StrRegN09 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', #TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN09 = 'N09|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN09['Orig'], StrRegN09['CST'], StrRegN09['ModBC'], StrRegN09['PRedBC'], StrRegN09['VBC'], StrRegN09['PICMS'], StrRegN09['VICMS'], StrRegN09['ModBCST'], StrRegN09['PMVAST'], StrRegN09['PRedBCST'], StrRegN09['VBCST'], StrRegN09['PICMSST'], StrRegN09['VICMSST'])
StrRegN08 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'VBCST': str("%.2f" % 0.00),
'VICMSST': str("%.2f" % 0.00),
}
StrN08 = 'N08|%s|%s|%s|%s|\n' % (StrRegN08['Orig'], StrRegN08['CST'], StrRegN08['VBCST'], StrRegN08['VICMSST'])
#TODO - Fazer alteração para cada tipo de cst
if inv_line.icms_cst in ('00'):
StrFile += StrN02
if inv_line.icms_cst in ('20'):
StrFile += StrN04
if inv_line.icms_cst in ('10'):
StrFile += StrN03
if inv_line.icms_cst in ('40', '41', '50', '51'):
StrFile += StrN06
if inv_line.icms_cst in ('60'):
StrFile += StrN08
if inv_line.icms_cst in ('70'):
StrFile += StrN09
StrRegO = {
'ClEnq': '',
'CNPJProd': '',
'CSelo': '',
'QSelo': '',
'CEnq': '999',
}
StrO = 'O|%s|%s|%s|%s|%s|\n' % (StrRegO['ClEnq'], StrRegO['CNPJProd'], StrRegO['CSelo'], StrRegO['QSelo'], StrRegO['CEnq'])
StrFile += StrO
if inv_line.ipi_percent > 0:
StrRegO07 = {
'CST': inv_line.ipi_cst,
'VIPI': str("%.2f" % inv_line.ipi_value),
}
StrO07 = 'O07|%s|%s|\n' % (StrRegO07['CST'], StrRegO07['VIPI'])
StrFile += StrO07
if inv_line.ipi_type == 'percent':
StrRegO10 = {
'VBC': str("%.2f" % inv_line.ipi_base),
'PIPI': str("%.2f" % inv_line.ipi_percent),
}
StrO1 = 'O10|%s|%s|\n' % (StrRegO10['VBC'], StrRegO10['PIPI'])
if inv_line.ipi_type == 'quantity':
pesol = 0
if inv_line.product_id:
pesol = inv_line.product_id.weight_net
StrRegO11 = {
'QUnid': str("%.4f" % (inv_line.quantity * pesol)),
'VUnid': str("%.4f" % inv_line.ipi_percent),
}
StrO1 = 'O11|%s|%s|\n' % (StrRegO11['QUnid'], StrRegO11['VUnid'])
StrFile += StrO1
else:
StrO1 = 'O08|%s|\n' % inv_line.ipi_cst
StrFile += StrO1
StrQ = 'Q|\n'
StrFile += StrQ
if inv_line.pis_percent > 0:
StrRegQ02 = {
'CST': inv_line.pis_cst,
'VBC': str("%.2f" % inv_line.pis_base),
'PPIS': str("%.2f" % inv_line.pis_percent),
'VPIS': str("%.2f" % inv_line.pis_value),
}
StrQ02 = ('Q02|%s|%s|%s|%s|\n') % (StrRegQ02['CST'], StrRegQ02['VBC'], StrRegQ02['PPIS'], StrRegQ02['VPIS'])
else:
StrQ02 = 'Q04|%s|\n' % inv_line.pis_cst
StrFile += StrQ02
StrQ = 'S|\n'
StrFile += StrQ
if inv_line.cofins_percent > 0:
StrRegS02 = {
'CST': inv_line.cofins_cst,
'VBC': str("%.2f" % inv_line.cofins_base),
'PCOFINS': str("%.2f" % inv_line.cofins_percent),
'VCOFINS': str("%.2f" % inv_line.cofins_value),
}
StrS02 = ('S02|%s|%s|%s|%s|\n') % (StrRegS02['CST'], StrRegS02['VBC'], StrRegS02['PCOFINS'], StrRegS02['VCOFINS'])
else:
StrS02 = 'S04|%s|\n' % inv_line.cofins_cst
StrFile += StrS02
StrW = 'W|\n'
StrFile += StrW
StrRegW02 = {
'vBC': str("%.2f" % inv.icms_base),
'vICMS': str("%.2f" % inv.icms_value),
'vBCST': str("%.2f" % inv.icms_st_base),
'vST': str("%.2f" % inv.icms_st_value),
'vProd': str("%.2f" % inv.amount_untaxed),
'vFrete': str("%.2f" % inv.amount_freight),
'vSeg': str("%.2f" % inv.amount_insurance),
'vDesc': '0.00',
'vII': '0.00',
'vIPI': str("%.2f" % inv.ipi_value),
'vPIS': str("%.2f" % inv.pis_value),
'vCOFINS': str("%.2f" % inv.cofins_value),
'vOutro': str("%.2f" % inv.amount_costs),
'vNF': str("%.2f" % inv.amount_total),
}
StrW02 = 'W02|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegW02['vBC'], StrRegW02['vICMS'], StrRegW02['vBCST'], StrRegW02['vST'], StrRegW02['vProd'],
StrRegW02['vFrete'], StrRegW02['vSeg'], StrRegW02['vDesc'], StrRegW02['vII'], StrRegW02['vIPI'],
StrRegW02['vPIS'], StrRegW02['vCOFINS'], StrRegW02['vOutro'], StrRegW02['vNF'])
StrFile += StrW02
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
StrRegX0 = '0'
if inv.incoterm.code == 'FOB':
StrRegX0 = '0'
if inv.incoterm.code == 'CIF':
StrRegX0 = '1'
StrX = 'X|%s|\n' % (StrRegX0)
StrFile += StrX
StrRegX03 = {
'XNome': '',
'IE': '',
'XEnder': '',
'UF': '',
'XMun': '',
}
StrX0 = ''
if inv.carrier_id:
#Endereço da transportadora
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
if inv.carrier_id.partner_id.legal_name:
StrRegX03['XNome'] = normalize('NFKD',unicode(inv.carrier_id.partner_id.legal_name or '')).encode('ASCII','ignore')
else:
StrRegX03['XNome'] = normalize('NFKD',unicode(inv.carrier_id.partner_id.name or '')).encode('ASCII','ignore')
StrRegX03['IE'] = inv.carrier_id.partner_id.inscr_est or ''
StrRegX03['xEnder'] = normalize('NFKD',unicode(carrier_addr_default.street or '')).encode('ASCII','ignore')
StrRegX03['UF'] = carrier_addr_default.state_id.code or ''
if carrier_addr_default.city_id:
StrRegX03['xMun'] = normalize('NFKD',unicode(carrier_addr_default.city_id.name or '')).encode('ASCII','ignore')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
StrX0 = 'X04|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
else:
StrX0 = 'X05|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
StrX03 = 'X03|%s|%s|%s|%s|%s|\n' % (StrRegX03['XNome'], StrRegX03['IE'], StrRegX03['XEnder'], StrRegX03['UF'], StrRegX03['XMun'])
StrFile += StrX03
StrFile += StrX0
StrRegX18 = {
'Placa': '',
'UF': '',
'RNTC': '',
}
if inv.vehicle_id:
StrRegX18['Placa'] = inv.vehicle_id.plate or ''
StrRegX18['UF'] = inv.vehicle_id.plate.state_id.code or ''
StrRegX18['RNTC'] = inv.vehicle_id.rntc_code or ''
StrX18 = 'X18|%s|%s|%s|\n' % (StrRegX18['Placa'], StrRegX18['UF'], StrRegX18['RNTC'])
StrFile += StrX18
StrRegX26 = {
'QVol': '',
'Esp': '',
'Marca': '',
'NVol': '',
'PesoL': '',
'PesoB': '',
}
if inv.number_of_packages:
StrRegX26['QVol'] = inv.number_of_packages
StrRegX26['Esp'] = 'Volume' #TODO
StrRegX26['Marca']
StrRegX26['NVol']
StrRegX26['PesoL'] = str("%.3f" % inv.weight_net)
StrRegX26['PesoB'] = str("%.3f" % inv.weight)
StrX26 = 'X26|%s|%s|%s|%s|%s|%s|\n' % (StrRegX26['QVol'], StrRegX26['Esp'], StrRegX26['Marca'], StrRegX26['NVol'], StrRegX26['PesoL'], StrRegX26['PesoB'])
StrFile += StrX26
StrRegZ = {
'InfAdFisco': '',
'InfCpl': normalize('NFKD',unicode(inv.comment or '')).encode('ASCII','ignore'),
}
StrZ = 'Z|%s|%s|\n' % (StrRegZ['InfAdFisco'], StrRegZ['InfCpl'])
StrFile += StrZ
self.write(cr, uid, [inv.id], {'nfe_export_date': datetime.now()})
return unicode(StrFile.encode('utf-8'))
def nfe_export_xml(self, cr, uid, ids, context=False):
nfeProc = Element('nfeProc', {'versao': '2.00', 'xmlns': 'http://www.portalfiscal.inf.br/nfe' })
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#MontaChave da Nota Fiscal Eletronica
nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
nfe_key += '08478495000170' # unicode(inv.company_id.partner_id.cnpj_cpf).strip().rjust(14, u'0')
nfe_key += inv.fiscal_document_id.code
nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
nfe_key += unicode('1').strip().rjust(1, u'0') # Homologação
nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
NFe = SubElement(nfeProc, 'NFe', { 'xmlns': 'http://www.portalfiscal.inf.br/nfe' })
infNFe = SubElement(NFe, 'infNFe', {'versao': '2.00', 'Id': nfe_key })
#Dados da identificação da nota fiscal
ide = SubElement(infNFe, 'ide')
ide_cUF = SubElement(ide, 'cUF')
ide_cUF.text = company_addr_default.state_id.ibge_code
ide_cNF = SubElement(ide, 'cNF')
ide_cNF.text = unicode(inv.internal_number).strip().rjust(8, u'0')
ide_natOp = SubElement(ide, 'natOp')
ide_natOp.text = inv.cfop_id.name
ide_indPag = SubElement(ide, 'indPag')
ide_indPag.text = "2"
ide_mod = SubElement(ide, 'mod')
ide_mod.text = inv.fiscal_document_id.code
ide_serie = SubElement(ide, 'serie')
ide_serie.text = inv.document_serie_id.code
ide_nNF = SubElement(ide, 'nNF')
ide_nNF.text = inv.internal_number
ide_dEmi = SubElement(ide, 'dEmi')
ide_dEmi.text = inv.date_invoice
ide_dSaiEnt = SubElement(ide, 'dSaiEnt')
ide_dSaiEnt.text = inv.date_invoice
ide_tpNF = SubElement(ide, 'tpNF')
if inv.type in ("out_invoice", "in_refuld"):
ide_tpNF.text = '0'
else:
ide_tpNF.text = '1'
ide_cMunFG = SubElement(ide, 'cMunFG')
ide_cMunFG.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code)
ide_tpImp = SubElement(ide, 'tpImp')
ide_tpImp.text = "1"
ide_tpEmis = SubElement(ide, 'tpEmis')
ide_tpEmis.text = "1"
ide_cDV = SubElement(ide, 'cDV')
ide_cDV.text = self.nfe_dv(nfe_key)
#Tipo de ambiente: 1 - Produção; 2 - Homologação
ide_tpAmb = SubElement(ide, 'tpAmb')
ide_tpAmb.text = "2"
#Finalidade da emissão da NF-e: 1 - NFe normal 2 - NFe complementar 3 - NFe de ajuste
ide_finNFe = SubElement(ide, 'finNFe')
ide_finNFe.text = "1"
ide_procEmi = SubElement(ide, 'procEmi')
ide_procEmi.text = "0"
ide_verProc = SubElement(ide, 'verProc')
ide_verProc.text = "2.0.4"
emit = SubElement(infNFe, 'emit')
emit_CNPJ = SubElement(emit, 'CNPJ')
emit_CNPJ.text = inv.company_id.partner_id.cnpj_cpf
emit_xNome = SubElement(emit, 'xNome')
emit_xNome.text = inv.company_id.partner_id.legal_name
emit_xFant = SubElement(emit, 'xFant')
emit_xFant.text = inv.company_id.partner_id.name
enderEmit = SubElement(emit, 'enderEmit')
enderEmit_xLgr = SubElement(enderEmit, 'xLgr')
enderEmit_xLgr.text = company_addr_default.street
enderEmit_nro = SubElement(enderEmit, 'nro')
enderEmit_nro.text = company_addr_default.number
enderEmit_xBairro = SubElement(enderEmit, 'xBairro')
enderEmit_xBairro.text = company_addr_default.district
enderEmit_cMun = SubElement(enderEmit, 'cMun')
enderEmit_cMun.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.city_id.ibge_code)
enderEmit_xMun = SubElement(enderEmit, 'xMun')
enderEmit_xMun.text = company_addr_default.city_id.name
enderEmit_UF = SubElement(enderEmit, 'UF')
enderEmit_UF.text = company_addr_default.state_id.code
enderEmit_CEP = SubElement(enderEmit, 'CEP')
enderEmit_CEP.text = company_addr_default.zip
enderEmit_cPais = SubElement(enderEmit, 'cPais')
enderEmit_cPais.text = company_addr_default.country_id.bc_code
enderEmit_xPais = SubElement(enderEmit, 'xPais')
enderEmit_xPais.text = company_addr_default.country_id.name
enderEmit_fone = SubElement(enderEmit, 'fone')
enderEmit_fone.text = company_addr_default.phone
emit_IE = SubElement(emit, 'IE')
emit_IE.text = inv.company_id.partner_id.inscr_est
emit_IEST = SubElement(emit, 'IEST')
emit_IEST.text = '0000000000' #FIXME
emit_IM = SubElement(emit, 'IM')
emit_IM.text = '0000000000' #FIXME
emit_CNAE = SubElement(emit, 'CNAE')
emit_CNAE.text = '0111301' #FIXME
emit_CRT = SubElement(emit, 'CRT')
emit_CRT.text = '3' #FIXME
dest = SubElement(infNFe, 'dest')
dest_CNPJ = SubElement(dest, 'CNPJ')
dest_CNPJ.text = inv.partner_id.cnpj_cpf
dest_xNome = SubElement(dest, 'xNome')
dest_xNome.text = inv.partner_id.legal_name
enderDest = SubElement(dest, 'enderDest')
enderDest_xLgr = SubElement(enderDest, 'xLgr')
enderDest_xLgr.text = inv.address_invoice_id.street
enderDest_nro = SubElement(enderDest, 'nro')
enderDest_nro.text = inv.address_invoice_id.number
enderDest_xBairro = SubElement(enderDest, 'xBairro')
enderDest_xBairro.text = inv.address_invoice_id.district
enderDest_cMun = SubElement(enderDest, 'cMun')
enderDest_cMun.text = ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.city_id.ibge_code)
enderDest_xMun = SubElement(enderDest, 'xMun')
enderDest_xMun.text = inv.address_invoice_id.city_id.name
enderDest_UF = SubElement(enderDest, 'UF')
enderDest_UF.text = inv.address_invoice_id.state_id.code
enderDest_CEP = SubElement(enderDest, 'CEP')
enderDest_CEP.text = inv.address_invoice_id.zip
enderDest_cPais = SubElement(enderDest, 'cPais')
enderDest_cPais.text = inv.address_invoice_id.country_id.bc_code
enderDest_xPais = SubElement(enderDest, 'xPais')
enderDest_xPais.text = inv.address_invoice_id.country_id.name
enderDest_fone = SubElement(enderDest, 'fone')
enderDest_fone.text = inv.address_invoice_id.phone
dest_IE = SubElement(dest, 'IE')
dest_IE.text = inv.partner_id.inscr_est
for inv_line in inv.invoice_line:
i =+ 1
det = SubElement(infNFe, 'det', {'nItem': str(i)})
det_prod = SubElement(det, 'prod')
prod_cProd = SubElement(det_prod, 'cProd')
if inv_line.product_id.code:
prod_cProd.text = inv_line.product_id.code
else:
prod_cProd.text = unicode(i).strip().rjust(4, u'0')
prod_cEAN = SubElement(det_prod, 'cEAN')
prod_cEAN.text = inv_line.product_id.ean13
prod_xProd = SubElement(det_prod, 'xProd')
prod_xProd.text = inv_line.product_id.name
prod_NCM = SubElement(det_prod, 'NCM')
prod_NCM.text = inv_line.product_id.property_fiscal_classification.name
prod_CFOP = SubElement(det_prod, 'CFOP')
prod_CFOP.text = inv_line.cfop_id.code
prod_uCom = SubElement(det_prod, 'uCom')
prod_uCom.text = inv_line.uos_id.name
prod_qCom = SubElement(det_prod, 'qCom')
prod_qCom.text = str("%.4f" % inv_line.quantity)
prod_vUnCom = SubElement(det_prod, 'vUnCom')
prod_vUnCom.text = str("%.4f" % inv_line.price_unit)
prod_vProd = SubElement(det_prod, 'vProd')
prod_vProd.text = str("%.2f" % inv_line.price_subtotal)
prod_cEANTrib = SubElement(det_prod, 'cEANTrib')
#prod_vProd.text(inv_line.total)
prod_uTrib = SubElement(det_prod, 'uTrib')
prod_uTrib.text = inv_line.uos_id.name
prod_qTrib = SubElement(det_prod, 'qTrib')
prod_qTrib.text = '0.0000' #TODO
prod_vUnTrib = SubElement(det_prod, 'vUnTrib')
prod_vUnTrib.text = '0.00' #TODO
prod_vFrete = SubElement(det_prod, 'vFrete')
prod_vFrete.text = '0.00' #TODO - Valor do Frete
prod_vSeg = SubElement(det_prod, 'vSeg')
prod_vSeg.text = '0.00' #TODO - Valor do seguro
prod_vDesc = SubElement(det_prod, 'vDesc')
prod_vDesc.text = str("%.2f" % inv_line.discount) #TODO
prod_vOutro = SubElement(det_prod, 'vOutro')
prod_vOutro.text = '0.0000' #TODO
prod_indTot = SubElement(det_prod, 'indTot')
prod_indTot.text = '1' #TODO
prod_imposto = SubElement(det, 'imposto')
imposto_icms = SubElement(prod_imposto, 'ICMS' ) # + inv_line.icms_cst)
imposto_icms_cst = SubElement(imposto_icms, 'ICMS%s' % (inv_line.icms_cst))
icms_orig = SubElement(imposto_icms_cst, 'orig')
icms_orig.text = inv_line.product_id.origin
icms_CST = SubElement(imposto_icms_cst, 'CST')
icms_CST.text = inv_line.icms_cst
icms_modBC = SubElement(imposto_icms_cst, 'modBC')
icms_modBC.text = '0' # TODO
icms_vBC = SubElement(imposto_icms_cst, 'vBC')
icms_vBC.text = str("%.2f" % inv_line.icms_base)
icms_pICMS = SubElement(imposto_icms_cst, 'pICMS')
icms_pICMS.text = str("%.2f" % inv_line.icms_percent)
icms_vICMS = SubElement(imposto_icms_cst, 'vICMS')
icms_vICMS.text = str("%.2f" % inv_line.icms_value)
imposto_ipi = SubElement(prod_imposto, 'IPI')
icms_cEnq = SubElement(imposto_ipi, 'cEnq')
icms_cEnq.text = '999'
#Imposto Não Tributado
ipi_IPINT = SubElement(imposto_ipi, 'IPINT')
ipi_CST = SubElement(ipi_IPINT, 'CST')
ipi_CST.text = inv_line.ipi_cst
imposto_pis = SubElement(prod_imposto, 'PIS')
pis_PISAliq = SubElement(imposto_pis, 'PISAliq')
pis_CST = SubElement(pis_PISAliq, 'CST')
pis_CST.text = inv_line.pis_cst
pis_vBC = SubElement(pis_PISAliq, 'vBC')
pis_vBC.text = str("%.2f" % inv_line.pis_base)
pis_pPIS = SubElement(pis_PISAliq, 'pPIS')
pis_pPIS.text = str("%.2f" % inv_line.pis_percent)
pis_vPIS = SubElement(pis_PISAliq, 'vPIS')
pis_vPIS.text = str("%.2f" % inv_line.pis_value)
imposto_cofins = SubElement(prod_imposto, 'COFINS')
cofins_COFINSAliq = SubElement(imposto_cofins, 'COFINSAliq')
cofins_CST = SubElement(cofins_COFINSAliq, 'CST')
cofins_CST.text = inv_line.pis_cst
cofins_vBC = SubElement(cofins_COFINSAliq, 'vBC')
cofins_vBC.text = str("%.2f" % inv_line.cofins_base)
cofins_pCOFINS = SubElement(cofins_COFINSAliq, 'pCOFINS')
cofins_pCOFINS.text = str("%.2f" % inv_line.cofins_percent)
cofins_vCOFINS = SubElement(cofins_COFINSAliq, 'vCOFINS')
cofins_vCOFINS.text = str("%.2f" % inv_line.cofins_value)
total = SubElement(infNFe, 'total')
total_ICMSTot = SubElement(total, 'ICMSTot')
ICMSTot_vBC = SubElement(total_ICMSTot, 'vBC')
ICMSTot_vBC.text = str("%.2f" % inv.icms_base)
ICMSTot_vICMS = SubElement(total_ICMSTot, 'vICMS')
ICMSTot_vICMS.text = str("%.2f" % inv.icms_value)
ICMSTot_vBCST = SubElement(total_ICMSTot, 'vBCST')
ICMSTot_vBCST.text = '0.00' # TODO
ICMSTot_vST = SubElement(total_ICMSTot, 'vST')
ICMSTot_vST.text = '0.00' # TODO
ICMSTot_vProd = SubElement(total_ICMSTot, 'vProd')
ICMSTot_vProd.text = str("%.2f" % inv.amount_untaxed)
ICMSTot_vFrete = SubElement(total_ICMSTot, 'vFrete')
ICMSTot_vFrete.text = '0.00' # TODO
ICMSTot_vSeg = SubElement(total_ICMSTot, 'vSeg')
ICMSTot_vSeg.text = str("%.2f" % inv.amount_insurance)
ICMSTot_vDesc = SubElement(total_ICMSTot, 'vDesc')
ICMSTot_vDesc.text = '0.00' # TODO
ICMSTot_II = SubElement(total_ICMSTot, 'vII')
ICMSTot_II.text = '0.00' # TODO
ICMSTot_vIPI = SubElement(total_ICMSTot, 'vIPI')
ICMSTot_vIPI.text = str("%.2f" % inv.ipi_value)
ICMSTot_vPIS = SubElement(total_ICMSTot, 'vPIS')
ICMSTot_vPIS.text = str("%.2f" % inv.pis_value)
ICMSTot_vCOFINS = SubElement(total_ICMSTot, 'vCOFINS')
ICMSTot_vCOFINS.text = str("%.2f" % inv.cofins_value)
ICMSTot_vOutro = SubElement(total_ICMSTot, 'vOutro')
ICMSTot_vOutro.text = str("%.2f" % inv.amount_costs)
ICMSTot_vNF = SubElement(total_ICMSTot, 'vNF')
ICMSTot_vNF.text = str("%.2f" % inv.amount_total)
transp = SubElement(infNFe, 'transp')
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
transp_modFrete = SubElement(transp, 'modFrete')
transp_modFrete.text = '0' #TODO
if inv.carrier_id:
#Endereço do company
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
transp_transporta = SubElement(transp, 'transporta')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
transporta_CNPJ = SubElement(transp_transporta, 'CNPJ')
transporta_CNPJ.text = inv.carrier_id.partner_id.cnpj_cpf
else:
transporta_CPF = SubElement(transp_transporta, 'CPF')
transporta_CPF.text = inv.carrier_id.partner_id.cnpj_cpf
transporta_xNome = SubElement(transp_transporta, 'xNome')
if inv.carrier_id.partner_id.legal_name:
transporta_xNome.text = inv.carrier_id.partner_id.legal_name
else:
transporta_xNome.text = inv.carrier_id.partner_id.name
transporta_IE = SubElement(transp_transporta, 'IE')
transporta_IE.text = inv.carrier_id.partner_id.inscr_est
transporta_xEnder = SubElement(transp_transporta, 'xEnder')
transporta_xEnder.text = carrier_addr_default.street
transporta_xMun = SubElement(transp_transporta, 'xMun')
transporta_xMun.text = ('%s%s') % (carrier_addr_default.state_id.ibge_code, carrier_addr_default.city_id.ibge_code)
transporta_UF = SubElement(transp_transporta, 'UF')
transporta_UF.text = carrier_addr_default.state_id.code
if inv.number_of_packages:
transp_vol = SubElement(transp, 'vol')
vol_qVol = SubElement(transp_vol, 'qVol')
vol_qVol.text = inv.number_of_packages
vol_esp = SubElement(transp_vol, 'esp')
vol_esp.text = 'volume' #TODO
vol_pesoL = SubElement(transp_vol, 'pesoL')
vol_pesoL.text = inv.weight_net
vol_pesoB = SubElement(transp_vol, 'pesoB')
vol_pesoB.text = inv.weight
xml_string = ElementTree.tostring(nfeProc, 'utf-8')
return xml_string
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice, payment_term, partner_bank_id, company_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not result['value']['address_invoice_id']:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
partner_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [result['value']['address_invoice_id']])[0]
to_country = partner_addr_default.country_id.id
to_state = partner_addr_default.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
#for inv in self.browse(cr,uid,ids):
# for line in inv.invoice_line:
# line.cfop_id = obj_foperation.cfop_id.id
#line.write(cr, uid, line.id, {'cfop_id': obj_foperation.cfop_id.id})
return result
def onchange_company_id(self, cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not address_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [address_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=',company_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr,uid,ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
def onchange_address_invoice_id(self, cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_address_invoice_id(cr,uid,ids,cpy_id,ptn_id,ptn_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not ptn_id or not cpy_id or not ptn_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [cpy_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [ptn_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [ptn_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=', cpy_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('partner_fiscal_type_id','=',partner_fiscal_type),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id','=', cpy_id), ('from_country','=',from_country),('from_state','=',from_state),('to_country','=',to_country),('to_state','=',to_state),('use_invoice','=',True),('fiscal_operation_category_id','=',fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr,uid,ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
def onchange_cfop_id(self, cr, uid, ids, cfop_id):
if not cfop_id:
return False
for inv in self.browse(cr, uid, ids):
for inv_line in inv.invoice_line:
self.pool.get('account.invoice.line').write(cr, uid, inv_line.id, {'cfop_id': inv.fiscal_operation_id.cfop_id.id})
return {'value': {'cfop_id': cfop_id}}
account_invoice()
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {} #super(account_invoice_line, self)._amount_line(cr, uid, ids, prop, unknow_none, unknow_dict)
tax_obj = self.pool.get('account.tax')
fsc_op_line_obj = self.pool.get('l10n_br_account.fiscal.operation.line')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
res[line.id] = {
'price_subtotal': 0.0,
'price_total': 0.0,
'icms_base': 0.0,
'icms_base_other': 0.0,
'icms_value': 0.0,
'icms_percent': 0.0,
'icms_percent_reduction': 0.0,
'icms_st_value': 0.0,
'icms_st_base': 0.0,
'icms_st_percent': 0.0,
'icms_st_mva': 0.0,
'icms_st_base_other': 0.0,
'icms_cst': '40', #Coloca como isento caso não tenha ICMS
'ipi_type': 'percent',
'ipi_base': 0.0,
'ipi_base_other': 0.0,
'ipi_value': 0.0,
'ipi_percent': 0.0,
'ipi_cst': '53', #Coloca como isento caso não tenha IPI
'pis_base': 0.0,
'pis_base_other': 0.0,
'pis_value': 0.0,
'pis_percent': 0.0,
'pis_cst': '99', #Coloca como isento caso não tenha PIS
'cofins_base': 0.0,
'cofins_base_other': 0.0,
'cofins_value': 0.0,
'cofins_percent': 0.0,
'cofins_cst': '99', #Coloca como isento caso não tenha COFINS
}
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, address_id=line.invoice_id.address_invoice_id, partner=line.invoice_id.partner_id)
icms_base = 0.0
icms_base_other = 0.0
icms_value = 0.0
icms_percent = 0.0
icms_percent_reduction = 0.0
icms_st_value = 0.0
icms_st_base = 0.0
icms_st_percent = 0.0
icms_st_mva = 0.0
icms_st_base_other = 0.0
icms_cst = ''
ipi_type = ''
ipi_base = 0.0
ipi_base_other = 0.0
ipi_value = 0.0
ipi_percent = 0.0
ipi_cst = ''
pis_base = 0.0
pis_base_other = 0.0
pis_value = 0.0
pis_percent = 0.0
pis_cst = ''
cofins_base = 0.0
cofins_base_other = 0.0
cofins_value = 0.0
cofins_percent = 0.0
cofins_cst = ''
if line.fiscal_operation_id:
fiscal_operation_ids = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id','=',line.company_id.id),('fiscal_operation_id','=',line.fiscal_operation_id.id),('fiscal_classification_id','=',False)])
for fo_line in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fiscal_operation_ids):
if fo_line.tax_code_id.domain == 'icms':
icms_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'ipi':
ipi_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'pis':
pis_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'cofins':
cofins_cst = fo_line.cst_id.code
if line.product_id:
fo_ids_ncm = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id','=',line.company_id.id),('fiscal_operation_id','=',line.fiscal_operation_id.id),('fiscal_classification_id','=',line.product_id.property_fiscal_classification.id)])
for fo_line_ncm in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fo_ids_ncm):
if fo_line_ncm.tax_code_id.domain == 'icms':
icms_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'ipi':
ipi_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'pis':
pis_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'cofins':
cofins_cst = fo_line_ncm.cst_id.code
for tax in taxes['taxes']:
fsc_op_line_ids = 0
fsc_fp_tax_ids = 0
tax_brw = tax_obj.browse(cr, uid, tax['id'])
if tax_brw.domain == 'icms':
icms_base += tax['total_base']
icms_base_other += taxes['total'] - tax['total_base']
icms_value += tax['amount']
icms_percent += tax_brw.amount * 100
icms_percent_reduction += tax_brw.base_reduction * 100
if tax_brw.domain == 'ipi':
ipi_type = tax_brw.type
ipi_base += tax['total_base']
ipi_value += tax['amount']
ipi_percent += tax_brw.amount * 100
if tax_brw.domain == 'pis':
pis_base += tax['total_base']
pis_base_other += taxes['total'] - tax['total_base']
pis_value += tax['amount']
pis_percent += tax_brw.amount * 100
if tax_brw.domain == 'cofins':
cofins_base += tax['total_base']
cofins_base_other += taxes['total'] - tax['total_base']
cofins_value += tax['amount']
cofins_percent += tax_brw.amount * 100
if tax_brw.domain == 'icmsst':
icms_st_value += tax['amount']
icms_st_base += tax['total_base']
icms_st_percent += icms_value
icms_st_mva += tax_brw.amount_mva * 100
icms_st_base_other += 0
res[line.id] = {
'price_subtotal': taxes['total'] - taxes['total_tax_discount'],
'price_total': taxes['total'],
'icms_base': icms_base,
'icms_base_other': icms_base_other,
'icms_value': icms_value,
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': icms_st_value,
'icms_st_base': icms_st_base,
'icms_st_percent' : icms_st_percent,
'icms_st_mva' : icms_st_mva,
'icms_st_base_other': icms_st_base_other,
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': ipi_base,
'ipi_base_other': ipi_base_other,
'ipi_value': ipi_value,
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': pis_base,
'pis_base_other': pis_base_other,
'pis_value': pis_value,
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cofins_base,
'cofins_base_other': cofins_base_other,
'cofins_value': cofins_value,
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = {
'price_subtotal': cur_obj.round(cr, uid, cur, res[line.id]['price_subtotal']),
'price_total': cur_obj.round(cr, uid, cur, res[line.id]['price_total']),
'icms_base': cur_obj.round(cr, uid, cur, icms_base),
'icms_base_other': cur_obj.round(cr, uid, cur, icms_base_other),
'icms_value': cur_obj.round(cr, uid, cur, icms_value),
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': cur_obj.round(cr, uid, cur, icms_st_value),
'icms_st_base': cur_obj.round(cr, uid, cur, icms_st_base),
'icms_st_percent' : icms_st_percent,
'icms_st_mva' : icms_st_mva,
'icms_st_base_other': cur_obj.round(cr, uid, cur, icms_st_base_other),
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': cur_obj.round(cr, uid, cur, ipi_base),
'ipi_base_other': cur_obj.round(cr, uid, cur, ipi_base_other),
'ipi_value': cur_obj.round(cr, uid, cur, ipi_value),
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': cur_obj.round(cr, uid, cur, pis_base),
'pis_base_other': cur_obj.round(cr, uid, cur, pis_base_other),
'pis_value': cur_obj.round(cr, uid, cur, pis_value),
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cur_obj.round(cr, uid, cur, cofins_base),
'cofins_base_other': cur_obj.round(cr, uid, cur, cofins_base_other),
'cofins_value': cur_obj.round(cr, uid, cur, cofins_value),
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
return res
_columns = {
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft':[('readonly',False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id','=',fiscal_operation_category_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP'),
'price_subtotal': fields.function(_amount_line, method=True, string='Subtotal', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'price_total': fields.function(_amount_line, method=True, string='Total', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_base': fields.function(_amount_line, method=True, string='Base ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_base_other': fields.function(_amount_line, method=True, string='Base ICMS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_value': fields.function(_amount_line, method=True, string='Valor ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_percent': fields.function(_amount_line, method=True, string='Perc ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_percent_reduction': fields.function(_amount_line, method=True, string='Perc Redução de Base ICMS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_value': fields.function(_amount_line, method=True, string='Valor ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base': fields.function(_amount_line, method=True, string='Base ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_percent': fields.function(_amount_line, method=True, string='Percentual ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_mva': fields.function(_amount_line, method=True, string='MVA ICMS ST', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base_other': fields.function(_amount_line, method=True, string='Base ICMS ST Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'icms_cst': fields.function(_amount_line, method=True, string='CST ICMS', type="char", size=2,
store=True, multi='all'),
'ipi_type': fields.function(_amount_line, method=True, string='Tipo do IPI', type="char", size=64,
store=True, multi='all'),
'ipi_base': fields.function(_amount_line, method=True, string='Base IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_base_other': fields.function(_amount_line, method=True, string='Base IPI Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_value': fields.function(_amount_line, method=True, string='Valor IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_percent': fields.function(_amount_line, method=True, string='Perc IPI', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'ipi_cst': fields.function(_amount_line, method=True, string='CST IPI', type="char", size=2,
store=True, multi='all'),
'pis_base': fields.function(_amount_line, method=True, string='Base PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_base_other': fields.function(_amount_line, method=True, string='Base PIS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_value': fields.function(_amount_line, method=True, string='Valor PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_percent': fields.function(_amount_line, method=True, string='Perc PIS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'pis_cst': fields.function(_amount_line, method=True, string='CST PIS', type="char", size=2,
store=True, multi='all'),
'cofins_base': fields.function(_amount_line, method=True, string='Base COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_base_other': fields.function(_amount_line, method=True, string='Base COFINS Outras', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_value': fields.function(_amount_line, method=True, string='Valor COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_percent': fields.function(_amount_line, method=True, string='Perc COFINS', type="float",
digits_compute= dp.get_precision('Account'), store=True, multi='all'),
'cofins_cst': fields.function(_amount_line, method=True, string='Valor COFINS', type="char", size=2,
store=True, multi='all'),
}
def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, cfop_id=False):
result = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context)
if not cfop_id:
return result
result['value']['cfop_id'] = cfop_id
result['value']['fiscal_operation_category_id'] = cfop_id
result['value']['fiscal_operation_id'] = cfop_id
return result
account_invoice_line()
class account_invoice_tax(osv.osv):
_inherit = "account.invoice.tax"
_description = "Invoice Tax"
def compute(self, cr, uid, invoice_id, context={}):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context)
cur = inv.currency_id
company_currency = inv.company_id.currency_id.id
for line in inv.invoice_line:
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, inv.address_invoice_id.id, line.product_id, inv.partner_id)
for tax in taxes['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = tax['total_base']
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
account_invoice_tax()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
import gc
gc.disable()
import argparse
import json
import os
import re
import sys
import textwrap
from urllib.request import getproxies
from urllib.parse import quote as url_quote, urlparse, parse_qs
import appdirs
import requests
from cachelib import FileSystemCache, NullCache
from keep import utils as keep_utils
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import SSLError
from howdoi import __version__
# rudimentary standardized 3-level log output
def _print_err(err):
print("[ERROR] " + err)
_print_ok = print # noqa: E305
def _print_dbg(err):
print("[DEBUG] " + err)
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
SUPPORTED_SEARCH_ENGINES = ('google', 'bing', 'duckduckgo')
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}&hl=en',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}&hl=en',
'duckduckgo': SCHEME + 'duckduckgo.com/?q=site:{0}%20{1}&t=hj&ia=web'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer '
'network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
BLOCKED_QUESTION_FRAGMENTS = (
'webcache.googleusercontent.com',
)
STAR_HEADER = '\u2605'
ANSWER_HEADER = '{2} Answer from {0} {2}\n{1}'
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
HTML_CACHE_PATH = 'page_cache'
SUPPORTED_HELP_QUERIES = ['use howdoi', 'howdoi', 'run howdoi',
'do howdoi', 'howdoi howdoi', 'howdoi use howdoi']
# variables for text formatting, prepend to string to begin text formatting.
BOLD = '\033[1m'
GREEN = '\033[92m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
END_FORMAT = '\033[0m' # append to string to end text formatting.
# stash options
STASH_SAVE = 'save'
STASH_VIEW = 'view'
STASH_REMOVE = 'remove'
STASH_EMPTY = 'empty'
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
class IntRange:
def __init__(self, imin=None, imax=None):
self.imin = imin
self.imax = imax
def __call__(self, arg):
try:
value = int(arg)
except ValueError as value_error:
raise self.exception() from value_error
if (self.imin is not None and value < self.imin) or (self.imax is not None and value > self.imax):
raise self.exception()
return value
def exception(self):
if self.imin is not None and self.imax is not None:
return argparse.ArgumentTypeError('Must be an integer in the range [{imin}, {imax}]'.format(
imin=self.imin, imax=self.imax))
if self.imin is not None:
return argparse.ArgumentTypeError('Must be an integer >= {imin}'.format(imin=self.imin))
if self.imax is not None:
return argparse.ArgumentTypeError('Must be an integer <= {imax}'.format(imax=self.imax))
return argparse.ArgumentTypeError('Must be an integer')
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _format_url_to_filename(url, file_ext='html'):
filename = ''.join(ch for ch in url if ch.isalnum())
return filename + '.' + file_ext
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as error:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise error
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if copy == href:
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _clean_google_link(link):
if '/url?' in link:
parsed_link = urlparse(link)
query_params = parse_qs(parsed_link.query)
url_params = query_params.get('q', []) or query_params.get('url', [])
if url_params:
return url_params[0]
return link
def _extract_links_from_google(query_object):
html = query_object.html()
link_pattern = re.compile('<a href="([^"]*)"[^>]*>')
links = link_pattern.findall(html)
links = [_clean_google_link(link) for link in links]
return links
def _extract_links_from_duckduckgo(html):
html.remove_namespaces()
links_anchors = html.find('a.result__a')
results = []
for anchor in links_anchors:
link = anchor.attrib['href']
url_obj = urlparse(link)
parsed_url = parse_qs(url_obj.query).get('uddg', '')
if parsed_url:
results.append(parsed_url[0])
return results
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
if search_engine == 'duckduckgo':
return _extract_links_from_duckduckgo(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _is_blocked(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _is_blocked(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(args, code):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
for fragment in BLOCKED_QUESTION_FRAGMENTS:
if fragment in link:
return False
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
cache_key = link
page = cache.get(link) # pylint: disable=assignment-from-none
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answercell').eq(0) or html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
# make decision on answer body class.
if first_answer.find(".js-post-body"):
answer_body_cls = ".js-post-body"
else:
# rollback to post-text class
answer_body_cls = ".post-text"
if not instructions and not args['all']:
text = get_text(first_answer.find(answer_body_cls).eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('{} > *'.format(answer_body_cls)):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(args, current_text))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(args, get_text(instructions.eq(0)))
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key) # pylint: disable=assignment-from-none
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def build_splitter(splitter_character='=', splitter_length=80):
return '\n' + splitter_character * splitter_length + '\n\n'
def _get_answers(args):
"""
@args: command-line arguments
returns: array of answers and their respective metadata
False if unable to get answers
"""
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
answers = []
initial_position = args['pos']
multiple_answers = (args['num_answers'] > 1 or args['all'])
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not args['link'] and not args['json_output'] and multiple_answers:
answer = ANSWER_HEADER.format(link, answer, STAR_HEADER)
answer += '\n'
answers.append({
'answer': answer,
'link': link,
'position': current_position
})
return answers
def _clear_cache():
global cache # pylint: disable=global-statement,invalid-name
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def _is_help_query(query):
return any([query.lower() == help_query for help_query in SUPPORTED_HELP_QUERIES])
def _format_answers(args, res):
if "error" in res:
return res["error"]
if args["json_output"]:
return json.dumps(res)
formatted_answers = []
for answer in res:
next_ans = answer["answer"]
if args["link"]: # if we only want links
next_ans = answer["link"]
formatted_answers.append(next_ans)
return build_splitter().join(formatted_answers)
def _get_help_instructions():
instruction_splitter = build_splitter(' ', 60)
query = 'print hello world in python'
instructions = [
'Here are a few popular howdoi commands ',
'>>> howdoi {} (default query)',
'>>> howdoi {} -a (read entire answer)',
'>>> howdoi {} -n [number] (retrieve n number of answers)',
'>>> howdoi {} -l (display only a link to where the answer is from',
'>>> howdoi {} -c (Add colors to the output)',
'>>> howdoi {} -e (Specify the search engine you want to use e.g google,bing)'
]
instructions = map(lambda s: s.format(query), instructions)
return instruction_splitter.join(instructions)
def _get_cache_key(args):
return str(args) + __version__
def format_stash_item(fields, index=-1):
title = fields['alias']
description = fields['desc']
item_num = index + 1
if index == -1:
return '{underline}{bold}$ {title}{end_format}\n\n{description}\n'.format(
underline=UNDERLINE,
bold=BOLD,
title=title,
end_format=END_FORMAT,
description=description)
return '{underline}{bold}$ [{item_num}] {title}{end_format}\n\n{description}\n'.format(
underline=UNDERLINE,
bold=BOLD,
item_num=item_num,
title=title,
end_format=END_FORMAT,
description=description)
def print_stash(stash_list=None):
if not stash_list or len(stash_list) == 0:
stash_list = ['\nSTASH LIST:']
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
print('No commands found in stash. Add a command with "howdoi --{stash_save} <query>".'.format(
stash_save=STASH_SAVE))
return
for _, fields in commands.items():
stash_list.append(format_stash_item(fields))
else:
stash_list = [format_stash_item(x['fields'], i) for i, x in enumerate(stash_list)]
print(build_splitter('#').join(stash_list))
def _get_stash_key(args):
stash_args = {}
ignore_keys = [STASH_SAVE, STASH_VIEW, STASH_REMOVE, STASH_EMPTY, 'tags'] # ignore these for stash key
for key in args:
if key not in ignore_keys:
stash_args[key] = args[key]
return str(stash_args)
def _stash_remove(cmd_key, title):
commands = keep_utils.read_commands()
if commands is not None and cmd_key in commands:
keep_utils.remove_command(cmd_key)
print('\n{bold}{green}"{title}" removed from stash.{end_format}\n'.format(
bold=BOLD,
green=GREEN,
title=title,
end_format=END_FORMAT))
else:
print('\n{bold}{red}"{title}" not found in stash.{end_format}\n'.format(
bold=BOLD,
red=RED,
title=title,
end_format=END_FORMAT))
def _stash_save(cmd_key, title, answer):
try:
keep_utils.save_command(cmd_key, answer, title)
except FileNotFoundError:
os.system('keep init')
keep_utils.save_command(cmd_key, answer, title)
finally:
print_stash()
def _parse_cmd(args, res):
answer = _format_answers(args, res)
cmd_key = _get_stash_key(args)
title = ''.join(args['query'])
if args[STASH_SAVE]:
_stash_save(cmd_key, title, answer)
return ''
if args[STASH_REMOVE]:
_stash_remove(cmd_key, title)
return ''
return answer
def howdoi(raw_query):
args = raw_query
if isinstance(raw_query, str): # you can pass either a raw or a parsed query
parser = get_parser()
args = vars(parser.parse_args(raw_query.split(' ')))
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = _get_cache_key(args)
if _is_help_query(args['query']):
return _get_help_instructions() + '\n'
res = cache.get(cache_key) # pylint: disable=assignment-from-none
if res:
return _parse_cmd(args, res)
try:
res = _get_answers(args)
if not res:
res = {'error': 'Sorry, couldn\'t find any help with that topic\n'}
cache.set(cache_key, res)
except (RequestsConnectionError, SSLError):
res = {'error': 'Unable to reach {search_engine}. Do you need to use a proxy?\n'.format(
search_engine=args['search_engine'])}
return _parse_cmd(args, res)
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line',
epilog=textwrap.dedent('''\
environment variable examples:
HOWDOI_COLORIZE=1
HOWDOI_DISABLE_CACHE=1
HOWDOI_DISABLE_SSL=1
HOWDOI_SEARCH_ENGINE=google
HOWDOI_URL=serverfault.com
'''),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('query', metavar='QUERY', type=str, nargs='*', help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)',
default=1, type=IntRange(1, 20), metavar='POS')
parser.add_argument('-n', '--num', help='number of answers to return (default: 1)',
dest='num_answers', default=1, type=IntRange(1, 20), metavar='NUM')
parser.add_argument('--num-answers', help=argparse.SUPPRESS)
parser.add_argument('-a', '--all', help='display the full text of the answer', action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link', action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output', action='store_true')
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-j', '--json', help='return answers in raw json format', dest='json_output',
action='store_true')
parser.add_argument('--json-output', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
parser.add_argument('-e', '--engine', help='search engine for this query (google, bing, duckduckgo)',
dest='search_engine', nargs="?", default='google', metavar='ENGINE')
parser.add_argument('--save', '--stash', help='stash a howdoi answer',
action='store_true')
parser.add_argument('--view', help='view your stash',
action='store_true')
parser.add_argument('--remove', help='remove an entry in your stash',
action='store_true')
parser.add_argument('--empty', help='empty your stash',
action='store_true')
return parser
def prompt_stash_remove(args, stash_list, view_stash=True):
if view_stash:
print_stash(stash_list)
last_index = len(stash_list)
prompt = "{bold}> Select a stash command to remove [1-{last_index}] (0 to cancel): {end_format}".format(
bold=BOLD,
last_index=last_index,
end_format=END_FORMAT)
user_input = input(prompt)
try:
user_input = int(user_input)
if user_input == 0:
return
if user_input < 1 or user_input > last_index:
print("\n{red}Input index is invalid.{end_format}".format(red=RED, end_format=END_FORMAT))
prompt_stash_remove(args, stash_list, False)
return
cmd = stash_list[user_input - 1]
cmd_key = cmd['command']
cmd_name = cmd['fields']['alias']
_stash_remove(cmd_key, cmd_name)
return
except ValueError:
print("\n{red}Invalid input. Must specify index of command.{end_format}".format(
red=RED, end_format=END_FORMAT))
prompt_stash_remove(args, stash_list, False)
return
def command_line_runner(): # pylint: disable=too-many-return-statements,too-many-branches
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
if args[STASH_VIEW]:
print_stash()
return
if args[STASH_EMPTY]:
os.system('keep init')
return
if args[STASH_REMOVE] and len(args['query']) == 0:
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
print('No commands found in stash. Add a command with "howdoi --{stash_save} <query>".'.format(
stash_save=STASH_SAVE))
return
stash_list = [{'command': cmd, 'fields': field} for cmd, field in commands.items()]
prompt_stash_remove(args, stash_list)
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
if not args['search_engine'] in SUPPORTED_SEARCH_ENGINES:
_print_err('Unsupported engine.\nThe supported engines are: %s' % ', '.join(SUPPORTED_SEARCH_ENGINES))
return
if args['search_engine'] != 'google':
os.environ['HOWDOI_SEARCH_ENGINE'] = args['search_engine']
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
fix: make google query extract only stackoverflow links from page
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
import gc
gc.disable()
import argparse
import json
import os
import re
import sys
import textwrap
from urllib.request import getproxies
from urllib.parse import quote as url_quote, urlparse, parse_qs
import appdirs
import requests
from cachelib import FileSystemCache, NullCache
from keep import utils as keep_utils
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import SSLError
from howdoi import __version__
# rudimentary standardized 3-level log output
def _print_err(err):
print("[ERROR] " + err)
_print_ok = print # noqa: E305
def _print_dbg(err):
print("[DEBUG] " + err)
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
SUPPORTED_SEARCH_ENGINES = ('google', 'bing', 'duckduckgo')
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}&hl=en',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}&hl=en',
'duckduckgo': SCHEME + 'duckduckgo.com/?q=site:{0}%20{1}&t=hj&ia=web'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer '
'network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
BLOCKED_QUESTION_FRAGMENTS = (
'webcache.googleusercontent.com',
)
STAR_HEADER = '\u2605'
ANSWER_HEADER = '{2} Answer from {0} {2}\n{1}'
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
HTML_CACHE_PATH = 'page_cache'
SUPPORTED_HELP_QUERIES = ['use howdoi', 'howdoi', 'run howdoi',
'do howdoi', 'howdoi howdoi', 'howdoi use howdoi']
# variables for text formatting, prepend to string to begin text formatting.
BOLD = '\033[1m'
GREEN = '\033[92m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
END_FORMAT = '\033[0m' # append to string to end text formatting.
# stash options
STASH_SAVE = 'save'
STASH_VIEW = 'view'
STASH_REMOVE = 'remove'
STASH_EMPTY = 'empty'
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
class IntRange:
def __init__(self, imin=None, imax=None):
self.imin = imin
self.imax = imax
def __call__(self, arg):
try:
value = int(arg)
except ValueError as value_error:
raise self.exception() from value_error
if (self.imin is not None and value < self.imin) or (self.imax is not None and value > self.imax):
raise self.exception()
return value
def exception(self):
if self.imin is not None and self.imax is not None:
return argparse.ArgumentTypeError('Must be an integer in the range [{imin}, {imax}]'.format(
imin=self.imin, imax=self.imax))
if self.imin is not None:
return argparse.ArgumentTypeError('Must be an integer >= {imin}'.format(imin=self.imin))
if self.imax is not None:
return argparse.ArgumentTypeError('Must be an integer <= {imax}'.format(imax=self.imax))
return argparse.ArgumentTypeError('Must be an integer')
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _format_url_to_filename(url, file_ext='html'):
filename = ''.join(ch for ch in url if ch.isalnum())
return filename + '.' + file_ext
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as error:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise error
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if copy == href:
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _clean_google_link(link):
if '/url?' in link:
parsed_link = urlparse(link)
query_params = parse_qs(parsed_link.query)
url_params = query_params.get('q', []) or query_params.get('url', [])
if url_params:
return url_params[0]
return link
def _extract_links_from_google(query_object):
html = query_object.html()
link_pattern = re.compile('<a href="([^"]*stackoverflow\.com/questions[^"]*)"[^>]*>')
links = link_pattern.findall(html)
links = [_clean_google_link(link) for link in links]
return links
def _extract_links_from_duckduckgo(html):
html.remove_namespaces()
links_anchors = html.find('a.result__a')
results = []
for anchor in links_anchors:
link = anchor.attrib['href']
url_obj = urlparse(link)
parsed_url = parse_qs(url_obj.query).get('uddg', '')
if parsed_url:
results.append(parsed_url[0])
return results
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
if search_engine == 'duckduckgo':
return _extract_links_from_duckduckgo(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _is_blocked(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _is_blocked(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(args, code):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
for fragment in BLOCKED_QUESTION_FRAGMENTS:
if fragment in link:
return False
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
cache_key = link
page = cache.get(link) # pylint: disable=assignment-from-none
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answercell').eq(0) or html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
# make decision on answer body class.
if first_answer.find(".js-post-body"):
answer_body_cls = ".js-post-body"
else:
# rollback to post-text class
answer_body_cls = ".post-text"
if not instructions and not args['all']:
text = get_text(first_answer.find(answer_body_cls).eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('{} > *'.format(answer_body_cls)):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(args, current_text))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(args, get_text(instructions.eq(0)))
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key) # pylint: disable=assignment-from-none
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def build_splitter(splitter_character='=', splitter_length=80):
return '\n' + splitter_character * splitter_length + '\n\n'
def _get_answers(args):
"""
@args: command-line arguments
returns: array of answers and their respective metadata
False if unable to get answers
"""
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
answers = []
initial_position = args['pos']
multiple_answers = (args['num_answers'] > 1 or args['all'])
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not args['link'] and not args['json_output'] and multiple_answers:
answer = ANSWER_HEADER.format(link, answer, STAR_HEADER)
answer += '\n'
answers.append({
'answer': answer,
'link': link,
'position': current_position
})
return answers
def _clear_cache():
global cache # pylint: disable=global-statement,invalid-name
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def _is_help_query(query):
return any([query.lower() == help_query for help_query in SUPPORTED_HELP_QUERIES])
def _format_answers(args, res):
if "error" in res:
return res["error"]
if args["json_output"]:
return json.dumps(res)
formatted_answers = []
for answer in res:
next_ans = answer["answer"]
if args["link"]: # if we only want links
next_ans = answer["link"]
formatted_answers.append(next_ans)
return build_splitter().join(formatted_answers)
def _get_help_instructions():
instruction_splitter = build_splitter(' ', 60)
query = 'print hello world in python'
instructions = [
'Here are a few popular howdoi commands ',
'>>> howdoi {} (default query)',
'>>> howdoi {} -a (read entire answer)',
'>>> howdoi {} -n [number] (retrieve n number of answers)',
'>>> howdoi {} -l (display only a link to where the answer is from',
'>>> howdoi {} -c (Add colors to the output)',
'>>> howdoi {} -e (Specify the search engine you want to use e.g google,bing)'
]
instructions = map(lambda s: s.format(query), instructions)
return instruction_splitter.join(instructions)
def _get_cache_key(args):
return str(args) + __version__
def format_stash_item(fields, index=-1):
title = fields['alias']
description = fields['desc']
item_num = index + 1
if index == -1:
return '{underline}{bold}$ {title}{end_format}\n\n{description}\n'.format(
underline=UNDERLINE,
bold=BOLD,
title=title,
end_format=END_FORMAT,
description=description)
return '{underline}{bold}$ [{item_num}] {title}{end_format}\n\n{description}\n'.format(
underline=UNDERLINE,
bold=BOLD,
item_num=item_num,
title=title,
end_format=END_FORMAT,
description=description)
def print_stash(stash_list=None):
if not stash_list or len(stash_list) == 0:
stash_list = ['\nSTASH LIST:']
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
print('No commands found in stash. Add a command with "howdoi --{stash_save} <query>".'.format(
stash_save=STASH_SAVE))
return
for _, fields in commands.items():
stash_list.append(format_stash_item(fields))
else:
stash_list = [format_stash_item(x['fields'], i) for i, x in enumerate(stash_list)]
print(build_splitter('#').join(stash_list))
def _get_stash_key(args):
stash_args = {}
ignore_keys = [STASH_SAVE, STASH_VIEW, STASH_REMOVE, STASH_EMPTY, 'tags'] # ignore these for stash key
for key in args:
if key not in ignore_keys:
stash_args[key] = args[key]
return str(stash_args)
def _stash_remove(cmd_key, title):
commands = keep_utils.read_commands()
if commands is not None and cmd_key in commands:
keep_utils.remove_command(cmd_key)
print('\n{bold}{green}"{title}" removed from stash.{end_format}\n'.format(
bold=BOLD,
green=GREEN,
title=title,
end_format=END_FORMAT))
else:
print('\n{bold}{red}"{title}" not found in stash.{end_format}\n'.format(
bold=BOLD,
red=RED,
title=title,
end_format=END_FORMAT))
def _stash_save(cmd_key, title, answer):
try:
keep_utils.save_command(cmd_key, answer, title)
except FileNotFoundError:
os.system('keep init')
keep_utils.save_command(cmd_key, answer, title)
finally:
print_stash()
def _parse_cmd(args, res):
answer = _format_answers(args, res)
cmd_key = _get_stash_key(args)
title = ''.join(args['query'])
if args[STASH_SAVE]:
_stash_save(cmd_key, title, answer)
return ''
if args[STASH_REMOVE]:
_stash_remove(cmd_key, title)
return ''
return answer
def howdoi(raw_query):
args = raw_query
if isinstance(raw_query, str): # you can pass either a raw or a parsed query
parser = get_parser()
args = vars(parser.parse_args(raw_query.split(' ')))
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = _get_cache_key(args)
if _is_help_query(args['query']):
return _get_help_instructions() + '\n'
res = cache.get(cache_key) # pylint: disable=assignment-from-none
if res:
return _parse_cmd(args, res)
try:
res = _get_answers(args)
if not res:
res = {'error': 'Sorry, couldn\'t find any help with that topic\n'}
cache.set(cache_key, res)
except (RequestsConnectionError, SSLError):
res = {'error': 'Unable to reach {search_engine}. Do you need to use a proxy?\n'.format(
search_engine=args['search_engine'])}
return _parse_cmd(args, res)
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line',
epilog=textwrap.dedent('''\
environment variable examples:
HOWDOI_COLORIZE=1
HOWDOI_DISABLE_CACHE=1
HOWDOI_DISABLE_SSL=1
HOWDOI_SEARCH_ENGINE=google
HOWDOI_URL=serverfault.com
'''),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('query', metavar='QUERY', type=str, nargs='*', help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)',
default=1, type=IntRange(1, 20), metavar='POS')
parser.add_argument('-n', '--num', help='number of answers to return (default: 1)',
dest='num_answers', default=1, type=IntRange(1, 20), metavar='NUM')
parser.add_argument('--num-answers', help=argparse.SUPPRESS)
parser.add_argument('-a', '--all', help='display the full text of the answer', action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link', action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output', action='store_true')
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-j', '--json', help='return answers in raw json format', dest='json_output',
action='store_true')
parser.add_argument('--json-output', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
parser.add_argument('-e', '--engine', help='search engine for this query (google, bing, duckduckgo)',
dest='search_engine', nargs="?", default='google', metavar='ENGINE')
parser.add_argument('--save', '--stash', help='stash a howdoi answer',
action='store_true')
parser.add_argument('--view', help='view your stash',
action='store_true')
parser.add_argument('--remove', help='remove an entry in your stash',
action='store_true')
parser.add_argument('--empty', help='empty your stash',
action='store_true')
return parser
def prompt_stash_remove(args, stash_list, view_stash=True):
if view_stash:
print_stash(stash_list)
last_index = len(stash_list)
prompt = "{bold}> Select a stash command to remove [1-{last_index}] (0 to cancel): {end_format}".format(
bold=BOLD,
last_index=last_index,
end_format=END_FORMAT)
user_input = input(prompt)
try:
user_input = int(user_input)
if user_input == 0:
return
if user_input < 1 or user_input > last_index:
print("\n{red}Input index is invalid.{end_format}".format(red=RED, end_format=END_FORMAT))
prompt_stash_remove(args, stash_list, False)
return
cmd = stash_list[user_input - 1]
cmd_key = cmd['command']
cmd_name = cmd['fields']['alias']
_stash_remove(cmd_key, cmd_name)
return
except ValueError:
print("\n{red}Invalid input. Must specify index of command.{end_format}".format(
red=RED, end_format=END_FORMAT))
prompt_stash_remove(args, stash_list, False)
return
def command_line_runner(): # pylint: disable=too-many-return-statements,too-many-branches
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
if args[STASH_VIEW]:
print_stash()
return
if args[STASH_EMPTY]:
os.system('keep init')
return
if args[STASH_REMOVE] and len(args['query']) == 0:
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
print('No commands found in stash. Add a command with "howdoi --{stash_save} <query>".'.format(
stash_save=STASH_SAVE))
return
stash_list = [{'command': cmd, 'fields': field} for cmd, field in commands.items()]
prompt_stash_remove(args, stash_list)
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
if not args['search_engine'] in SUPPORTED_SEARCH_ENGINES:
_print_err('Unsupported engine.\nThe supported engines are: %s' % ', '.join(SUPPORTED_SEARCH_ENGINES))
return
if args['search_engine'] != 'google':
os.environ['HOWDOI_SEARCH_ENGINE'] = args['search_engine']
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
|
import json
from flask import session
from PyPaste import app
from PyPaste.models.pastes import Paste
class TestBase(object):
def setUp(self):
app.config['CSRF_ENABLED'] = False
self.app = app.test_client()
Paste.init_table()
def tearDown(self):
# drop the table so we can
# recreate it for other tests
cur = Paste._cursor()
cur.execute('DROP TABLE pastes')
cur.connection.commit()
cur.close()
class test_legacy_api_compat(TestBase):
def legacy_api_post(self, **kw):
return self.app.post('/api/add', data=kw)
def legacy_api_assertions_success(self, response):
assert response.status_code == 200
assert response.mimetype == 'application/json'
data = json.loads(response.data)
assert data['success']
# Check we're not returning relative URLs
assert data['url'].startswith('http')
def test_legacy_api_compat_1(self):
r = self.legacy_api_post(
contents='This is the only required field'
)
self.legacy_api_assertions_success(r)
def test_legacy_api_compat_2(self):
r = self.legacy_api_post(
contents='example data',
title='testing',
password='hunter2',
language='text',
unlisted=0
)
self.legacy_api_assertions_success(r)
data = json.loads(r.data)
assert data['password'] == 'hunter2'
def test_legacy_api_compat_3(self):
r = self.legacy_api_post(title='failure')
assert r.mimetype == 'application/json'
data = json.loads(r.data)
assert not data['success']
assert isinstance(data['error'], list)
class test_core_functionality(TestBase):
def test_paste_creation(self):
p = Paste.new("Look, we're testing!", password='hunter2')
# Pasting succeeded
assert p is not None
assert p['id'] == 1
# Check passwords are being hashed
# bcrypt outputs 60 bytes
assert p['password'] != 'hunter2'
assert len(p['password']) == 60
# Now check paste creation using the web
r = self.app.post('/', data=dict(
text='test',
title='',
password='',
language='text',
unlisted=None
))
# Grab the newly made paste
p = Paste.by_id(2)
assert p['text'] == 'test'
assert p['password'] is None
assert r.status_code == 302
def test_unlisted_paste(self):
p = Paste.new('Test', unlisted=True)
id = p['id']
hash = p['hash']
# Unlisted pastes should only be
# accessed via /u/:hash
r = self.app.get('/p/{0}/'.format(id))
assert r.status_code == 404
r = self.app.get('/u/{0}/'.format(hash))
assert r.status_code == 200
def test_password_protection(self):
Paste.new('Test', password='hunter2')
r = self.app.get('/p/1/')
# 401 = unauthorised
assert r.status_code == 401
assert r.mimetype == 'text/html'
def test_password_authentication(self):
p = Paste.new('Test', password='hunter2')
with app.test_client() as c:
r = c.post('/p/authorise', data=dict(
paste_hash=p['hash'],
password='hunter2',
redirect='http://localhost/p/1/',
))
# Check we've got the correct cookie
# and are being redirected
assert p['hash'] in session.get('authorised_pastes')
assert r.status_code == 302
rewrite: add test for 'view raw' feature
import json
from flask import session
from PyPaste import app
from PyPaste.models.pastes import Paste
class TestBase(object):
def setUp(self):
app.config['CSRF_ENABLED'] = False
self.app = app.test_client()
Paste.init_table()
def tearDown(self):
# drop the table so we can
# recreate it for other tests
cur = Paste._cursor()
cur.execute('DROP TABLE pastes')
cur.connection.commit()
cur.close()
class test_legacy_api_compat(TestBase):
def legacy_api_post(self, **kw):
return self.app.post('/api/add', data=kw)
def legacy_api_assertions_success(self, response):
assert response.status_code == 200
assert response.mimetype == 'application/json'
data = json.loads(response.data)
assert data['success']
# Check we're not returning relative URLs
assert data['url'].startswith('http')
def test_legacy_api_compat_1(self):
r = self.legacy_api_post(
contents='This is the only required field'
)
self.legacy_api_assertions_success(r)
def test_legacy_api_compat_2(self):
r = self.legacy_api_post(
contents='example data',
title='testing',
password='hunter2',
language='text',
unlisted=0
)
self.legacy_api_assertions_success(r)
data = json.loads(r.data)
assert data['password'] == 'hunter2'
def test_legacy_api_compat_3(self):
r = self.legacy_api_post(title='failure')
assert r.mimetype == 'application/json'
data = json.loads(r.data)
assert not data['success']
assert isinstance(data['error'], list)
class test_core_functionality(TestBase):
def test_paste_creation(self):
p = Paste.new("Look, we're testing!", password='hunter2')
# Pasting succeeded
assert p is not None
assert p['id'] == 1
# Check passwords are being hashed
# bcrypt outputs 60 bytes
assert p['password'] != 'hunter2'
assert len(p['password']) == 60
# Now check paste creation using the web
r = self.app.post('/', data=dict(
text='test',
title='',
password='',
language='text',
unlisted=None
))
# Grab the newly made paste
p = Paste.by_id(2)
assert p['text'] == 'test'
assert p['password'] is None
assert r.status_code == 302
def test_unlisted_paste(self):
p = Paste.new('Test', unlisted=True)
id = p['id']
hash = p['hash']
# Unlisted pastes should only be
# accessed via /u/:hash
r = self.app.get('/p/{0}/'.format(id))
assert r.status_code == 404
r = self.app.get('/u/{0}/'.format(hash))
assert r.status_code == 200
def test_password_protection(self):
Paste.new('Test', password='hunter2')
r = self.app.get('/p/1/')
# 401 = unauthorised
assert r.status_code == 401
assert r.mimetype == 'text/html'
def test_password_authentication(self):
p = Paste.new('Test', password='hunter2')
with app.test_client() as c:
r = c.post('/p/authorise', data=dict(
paste_hash=p['hash'],
password='hunter2',
redirect='http://localhost/p/1/',
))
# Check we've got the correct cookie
# and are being redirected
assert p['hash'] in session.get('authorised_pastes')
assert r.status_code == 302
def test_raw_paste(self):
Paste.new('Hello World!')
r = self.app.get('/p/1/raw/')
assert r.status_code == 200
assert r.mimetype == 'text/plain'
|
Disabled condor email notifications
|
#!/usr/bin/env python3
import argparse
import json
import numpy
import os
import random
import re
import subprocess
import sys
import time
args = None
logFile = None
unlockTimeout = 99999999999
fastUnstakeSystem = './fast.refund/eosio.system/eosio.system.wasm'
systemAccounts = [
'eosio.bpay',
'eosio.msig',
'eosio.names',
'eosio.ram',
'eosio.ramfee',
'eosio.saving',
'eosio.stake',
'eosio.token',
'eosio.vpay',
]
def jsonArg(a):
return " '" + json.dumps(a) + "' "
def run(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
if subprocess.call(args, shell=True):
print('bios-boot-tutorial.py: exiting because of error')
sys.exit(1)
def retry(args):
while True:
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
if subprocess.call(args, shell=True):
print('*** Retry')
else:
break
def background(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
return subprocess.Popen(args, shell=True)
def getOutput(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def getJsonOutput(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return json.loads(proc.communicate()[0])
def sleep(t):
print('sleep', t, '...')
time.sleep(t)
print('resume')
def startWallet():
run('rm -rf ' + os.path.abspath(args.wallet_dir))
run('mkdir -p ' + os.path.abspath(args.wallet_dir))
background(args.keosd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir)))
sleep(.4)
run(args.cleos + 'wallet create')
def importKeys():
run(args.cleos + 'wallet import ' + args.private_key)
keys = {}
for a in accounts:
key = a['pvt']
if not key in keys:
if len(keys) >= args.max_user_keys:
break
keys[key] = True
run(args.cleos + 'wallet import ' + key)
for i in range(firstProducer, firstProducer + numProducers):
a = accounts[i]
key = a['pvt']
if not key in keys:
keys[key] = True
run(args.cleos + 'wallet import ' + key)
def startNode(nodeIndex, account):
dir = args.nodes_dir + ('%02d-' % nodeIndex) + account['name'] + '/'
run('rm -rf ' + dir)
run('mkdir -p ' + dir)
otherOpts = ''.join(list(map(lambda i: ' --p2p-peer-address localhost:' + str(9000 + i), range(nodeIndex))))
if not nodeIndex: otherOpts += (
' --plugin eosio::history_plugin'
' --plugin eosio::history_api_plugin'
)
cmd = (
args.nodeos +
' --max-irreversible-block-age 9999999'
' --contracts-console'
' --genesis-json ' + os.path.abspath(args.genesis) +
' --blocks-dir ' + os.path.abspath(dir) + '/blocks'
' --config-dir ' + os.path.abspath(dir) +
' --data-dir ' + os.path.abspath(dir) +
' --chain-state-db-size-mb 1024'
' --http-server-address 127.0.0.1:' + str(8000 + nodeIndex) +
' --p2p-listen-endpoint 127.0.0.1:' + str(9000 + nodeIndex) +
' --max-clients ' + str(maxClients) +
' --enable-stale-production'
' --producer-name ' + account['name'] +
' --private-key \'["' + account['pub'] + '","' + account['pvt'] + '"]\''
' --plugin eosio::http_plugin'
' --plugin eosio::chain_api_plugin'
' --plugin eosio::producer_plugin' +
otherOpts)
with open(dir + 'stderr', mode='w') as f:
f.write(cmd + '\n\n')
background(cmd + ' 2>>' + dir + 'stderr')
def startProducers(b, e):
for i in range(b, e):
startNode(i - b + 1, accounts[i])
def createSystemAccounts():
for a in systemAccounts:
run(args.cleos + 'create account eosio ' + a + ' ' + args.public_key)
def intToCurrency(i):
return '%d.%04d %s' % (i // 10000, i % 10000, args.symbol)
def allocateFunds(b, e):
dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule
dist.sort()
dist.reverse()
factor = 1_000_000_000 / sum(dist)
total = 0
for i in range(b, e):
funds = round(factor * dist[i - b] * 10000)
if i >= firstProducer and i < firstProducer + numProducers:
funds = max(funds, round(args.min_producer_funds * 10000))
total += funds
accounts[i]['funds'] = funds
return total
def createStakedAccounts(b, e):
ramFunds = round(args.ram_funds * 10000)
configuredMinStake = round(args.min_stake * 10000)
maxUnstaked = round(args.max_unstaked * 10000)
for i in range(b, e):
a = accounts[i]
funds = a['funds']
if funds < ramFunds:
print('skipping %s: not enough funds to cover ram' % a['name'])
continue
minStake = min(funds - ramFunds, configuredMinStake)
unstaked = min(funds - ramFunds - minStake, maxUnstaked)
stake = funds - ramFunds - unstaked
stakeNet = round(stake / 2)
stakeCpu = stake - stakeNet
print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))
assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)
run(args.cleos + 'system newaccount --transfer eosio %s %s --stake-net "%s" --stake-cpu "%s" --buy-ram-EOS "%s" ' %
(a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))
run(args.cleos + 'transfer eosio %s "%s"' % (a['name'], intToCurrency(unstaked)))
def regProducers(b, e):
for i in range(b, e):
a = accounts[i]
retry(args.cleos + 'system regproducer ' + a['name'] + ' ' + a['pub'] + ' https://' + a['name'] + '.com' + '/' + a['pub'])
def listProducers():
run(args.cleos + 'system listproducers')
def vote(b, e):
for i in range(b, e):
voter = accounts[i]['name']
prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)
prods = ' '.join(map(lambda x: accounts[x]['name'], prods))
retry(args.cleos + 'system voteproducer prods ' + voter + ' ' + prods)
def claimRewards():
table = getJsonOutput(args.cleos + 'get table eosio eosio producers -l 100')
times = []
for row in table['rows']:
if row['unpaid_blocks'] and not row['last_claim_time']:
times.append(getJsonOutput(args.cleos + 'system claimrewards -j ' + row['owner'])['processed']['elapsed'])
print('Elapsed time for claimrewards:', times)
def vote(b, e):
for i in range(b, e):
voter = accounts[i]['name']
prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)
prods = ' '.join(map(lambda x: accounts[x]['name'], prods))
retry(args.cleos + 'system voteproducer prods ' + voter + ' ' + prods)
def proxyVotes(b, e):
vote(firstProducer, firstProducer + 1)
proxy = accounts[firstProducer]['name']
retry(args.cleos + 'system regproxy ' + proxy)
sleep(1.0)
for i in range(b, e):
voter = accounts[i]['name']
retry(args.cleos + 'system voteproducer proxy ' + voter + ' ' + proxy)
def updateAuth(account, permission, parent, controller):
run(args.cleos + 'push action eosio updateauth' + jsonArg({
'account': account,
'permission': permission,
'parent': parent,
'auth': {
'threshold': 1, 'keys': [], 'waits': [],
'accounts': [{
'weight': 1,
'permission': {'actor': controller, 'permission': 'active'}
}]
}
}) + '-p ' + account + '@' + permission)
def resign(account, controller):
updateAuth(account, 'owner', '', controller)
updateAuth(account, 'active', 'owner', controller)
sleep(1)
run(args.cleos + 'get account ' + account)
def randomTransfer(b, e):
for j in range(20):
src = accounts[random.randint(b, e - 1)]['name']
dest = src
while dest == src:
dest = accounts[random.randint(b, e - 1)]['name']
run(args.cleos + 'transfer -f ' + src + ' ' + dest + ' "0.0001 ' + args.symbol + '"' + ' || true')
def msigProposeReplaceSystem(proposer, proposalName):
requestedPermissions = []
for i in range(firstProducer, firstProducer + numProducers):
requestedPermissions.append({'actor': accounts[i]['name'], 'permission': 'active'})
trxPermissions = [{'actor': 'eosio', 'permission': 'active'}]
with open(fastUnstakeSystem, mode='rb') as f:
setcode = {'account': 'eosio', 'vmtype': 0, 'vmversion': 0, 'code': f.read().hex()}
run(args.cleos + 'multisig propose ' + proposalName + jsonArg(requestedPermissions) +
jsonArg(trxPermissions) + 'eosio setcode' + jsonArg(setcode) + ' -p ' + proposer)
def msigApproveReplaceSystem(proposer, proposalName):
for i in range(firstProducer, firstProducer + numProducers):
run(args.cleos + 'multisig approve ' + proposer + ' ' + proposalName +
jsonArg({'actor': accounts[i]['name'], 'permission': 'active'}) +
'-p ' + accounts[i]['name'])
def msigExecReplaceSystem(proposer, proposalName):
retry(args.cleos + 'multisig exec ' + proposer + ' ' + proposalName + ' -p ' + proposer)
def msigReplaceSystem():
run(args.cleos + 'push action eosio buyrambytes' + jsonArg(['eosio', accounts[0]['name'], 200000]) + '-p eosio')
sleep(1)
msigProposeReplaceSystem(accounts[0]['name'], 'fast.unstake')
sleep(1)
msigApproveReplaceSystem(accounts[0]['name'], 'fast.unstake')
msigExecReplaceSystem(accounts[0]['name'], 'fast.unstake')
def produceNewAccounts():
with open('newusers', 'w') as f:
for i in range(3000, 30000):
x = getOutput(args.cleos + 'create key')
r = re.match('Private key: *([^ \n]*)\nPublic key: *([^ \n]*)', x, re.DOTALL | re.MULTILINE)
name = 'user'
for j in range(7, -1, -1):
name += chr(ord('a') + ((i >> (j * 4)) & 15))
print(i, name)
f.write(' {"name":"%s", "pvt":"%s", "pub":"%s"},\n' % (name, r[1], r[2]))
def stepKillAll():
run('killall keosd nodeos || true')
sleep(1.5)
def stepStartWallet():
startWallet()
importKeys()
def stepStartBoot():
startNode(0, {'name': 'eosio', 'pvt': args.private_key, 'pub': args.public_key})
sleep(1.5)
def stepInstallSystemContracts():
run(args.cleos + 'set contract eosio.token ' + args.contracts_dir + 'eosio.token/')
run(args.cleos + 'set contract eosio.msig ' + args.contracts_dir + 'eosio.msig/')
def stepCreateTokens():
run(args.cleos + 'push action eosio.token create \'["eosio", "10000000000.0000 %s"]\' -p eosio.token' % (args.symbol))
totalAllocation = allocateFunds(0, len(accounts))
run(args.cleos + 'push action eosio.token issue \'["eosio", "%s", "memo"]\' -p eosio' % intToCurrency(totalAllocation))
sleep(1)
def stepSetSystemContract():
retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/')
sleep(1)
run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active')
def stepCreateStakedAccounts():
createStakedAccounts(0, len(accounts))
def stepRegProducers():
regProducers(firstProducer, firstProducer + numProducers)
sleep(1)
listProducers()
def stepStartProducers():
startProducers(firstProducer, firstProducer + numProducers)
sleep(args.producer_sync_delay)
def stepVote():
vote(0, 0 + args.num_voters)
sleep(1)
listProducers()
sleep(5)
def stepProxyVotes():
proxyVotes(0, 0 + args.num_voters)
def stepResign():
resign('eosio', 'eosio.prods')
for a in systemAccounts:
resign(a, 'eosio')
def stepTransfer():
while True:
randomTransfer(0, args.num_senders)
def stepLog():
run('tail -n 60 ' + args.nodes_dir + '00-eosio/stderr')
# Command Line Arguments
parser = argparse.ArgumentParser()
commands = [
('k', 'kill', stepKillAll, True, "Kill all nodeos and keosd processes"),
('w', 'wallet', stepStartWallet, True, "Start keosd, create wallet, fill with keys"),
('b', 'boot', stepStartBoot, True, "Start boot node"),
('s', 'sys', createSystemAccounts, True, "Create system accounts (eosio.*)"),
('c', 'contracts', stepInstallSystemContracts, True, "Install system contracts (token, msig)"),
('t', 'tokens', stepCreateTokens, True, "Create tokens"),
('S', 'sys-contract', stepSetSystemContract, True, "Set system contract"),
('T', 'stake', stepCreateStakedAccounts, True, "Create staked accounts"),
('p', 'reg-prod', stepRegProducers, True, "Register producers"),
('P', 'start-prod', stepStartProducers, True, "Start producers"),
('v', 'vote', stepVote, True, "Vote for producers"),
('R', 'claim', claimRewards, True, "Claim rewards"),
('x', 'proxy', stepProxyVotes, True, "Proxy votes"),
('q', 'resign', stepResign, True, "Resign eosio"),
('m', 'msg-replace', msigReplaceSystem, False, "Replace system contract using msig"),
('X', 'xfer', stepTransfer, False, "Random transfer tokens (infinite loop)"),
('l', 'log', stepLog, True, "Show tail of node's log"),
]
parser.add_argument('--public-key', metavar='', help="EOSIO Public Key", default='EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', dest="public_key")
parser.add_argument('--private-Key', metavar='', help="EOSIO Private Key", default='5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p', dest="private_key")
parser.add_argument('--cleos', metavar='', help="Cleos command", default='../../build/programs/cleos/cleos --wallet-url http://localhost:6666 ')
parser.add_argument('--nodeos', metavar='', help="Path to nodeos binary", default='../../build/programs/nodeos/nodeos')
parser.add_argument('--keosd', metavar='', help="Path to keosd binary", default='../../build/programs/keosd/keosd')
parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='../../build/contracts/')
parser.add_argument('--nodes-dir', metavar='', help="Path to nodes directory", default='./nodes/')
parser.add_argument('--genesis', metavar='', help="Path to genesis.json", default="./genesis.json")
parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/')
parser.add_argument('--log-path', metavar='', help="Path to log file", default='./output.log')
parser.add_argument('--symbol', metavar='', help="The eosio.system symbol", default='SYS')
parser.add_argument('--user-limit', metavar='', help="Max number of users. (0 = no limit)", type=int, default=3000)
parser.add_argument('--max-user-keys', metavar='', help="Maximum user keys to import into wallet", type=int, default=10)
parser.add_argument('--ram-funds', metavar='', help="How much funds for each user to spend on ram", type=float, default=0.1)
parser.add_argument('--min-stake', metavar='', help="Minimum stake before allocating unstaked funds", type=float, default=0.9)
parser.add_argument('--max-unstaked', metavar='', help="Maximum unstaked funds", type=float, default=10)
parser.add_argument('--producer-limit', metavar='', help="Maximum number of producers. (0 = no limit)", type=int, default=0)
parser.add_argument('--min-producer-funds', metavar='', help="Minimum producer funds", type=float, default=1000.0000)
parser.add_argument('--num-producers-vote', metavar='', help="Number of producers for which each user votes", type=int, default=20)
parser.add_argument('--num-voters', metavar='', help="Number of voters", type=int, default=10)
parser.add_argument('--num-senders', metavar='', help="Number of users to transfer funds randomly", type=int, default=10)
parser.add_argument('--producer-sync-delay', metavar='', help="Time (s) to sleep to allow producers to sync", type=int, default=80)
parser.add_argument('-a', '--all', action='store_true', help="Do everything marked with (*)")
parser.add_argument('-H', '--http-port', type=int, default=8000, metavar='', help='HTTP port for cleos')
for (flag, command, function, inAll, help) in commands:
prefix = ''
if inAll: prefix += '*'
if prefix: help = '(' + prefix + ') ' + help
if flag:
parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command)
else:
parser.add_argument('--' + command, action='store_true', help=help, dest=command)
args = parser.parse_args()
args.cleos += '--url http://localhost:%d ' % args.http_port
logFile = open(args.log_path, 'a')
logFile.write('\n\n' + '*' * 80 + '\n\n\n')
with open('accounts.json') as f:
a = json.load(f)
if args.user_limit:
del a['users'][args.user_limit:]
if args.producer_limit:
del a['producers'][args.producer_limit:]
firstProducer = len(a['users'])
numProducers = len(a['producers'])
accounts = a['users'] + a['producers']
maxClients = numProducers + 10
haveCommand = False
for (flag, command, function, inAll, help) in commands:
if getattr(args, command) or inAll and args.all:
if function:
haveCommand = True
function()
if not haveCommand:
print('bios-boot-tutorial.py: Tell me what to do. -a does almost everything. -h shows options.')
bios-boot-tutorial: --p2p-max-nodes-per-host
#!/usr/bin/env python3
import argparse
import json
import numpy
import os
import random
import re
import subprocess
import sys
import time
args = None
logFile = None
unlockTimeout = 99999999999
fastUnstakeSystem = './fast.refund/eosio.system/eosio.system.wasm'
systemAccounts = [
'eosio.bpay',
'eosio.msig',
'eosio.names',
'eosio.ram',
'eosio.ramfee',
'eosio.saving',
'eosio.stake',
'eosio.token',
'eosio.vpay',
]
def jsonArg(a):
return " '" + json.dumps(a) + "' "
def run(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
if subprocess.call(args, shell=True):
print('bios-boot-tutorial.py: exiting because of error')
sys.exit(1)
def retry(args):
while True:
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
if subprocess.call(args, shell=True):
print('*** Retry')
else:
break
def background(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
return subprocess.Popen(args, shell=True)
def getOutput(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def getJsonOutput(args):
print('bios-boot-tutorial.py:', args)
logFile.write(args + '\n')
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return json.loads(proc.communicate()[0])
def sleep(t):
print('sleep', t, '...')
time.sleep(t)
print('resume')
def startWallet():
run('rm -rf ' + os.path.abspath(args.wallet_dir))
run('mkdir -p ' + os.path.abspath(args.wallet_dir))
background(args.keosd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir)))
sleep(.4)
run(args.cleos + 'wallet create')
def importKeys():
run(args.cleos + 'wallet import ' + args.private_key)
keys = {}
for a in accounts:
key = a['pvt']
if not key in keys:
if len(keys) >= args.max_user_keys:
break
keys[key] = True
run(args.cleos + 'wallet import ' + key)
for i in range(firstProducer, firstProducer + numProducers):
a = accounts[i]
key = a['pvt']
if not key in keys:
keys[key] = True
run(args.cleos + 'wallet import ' + key)
def startNode(nodeIndex, account):
dir = args.nodes_dir + ('%02d-' % nodeIndex) + account['name'] + '/'
run('rm -rf ' + dir)
run('mkdir -p ' + dir)
otherOpts = ''.join(list(map(lambda i: ' --p2p-peer-address localhost:' + str(9000 + i), range(nodeIndex))))
if not nodeIndex: otherOpts += (
' --plugin eosio::history_plugin'
' --plugin eosio::history_api_plugin'
)
cmd = (
args.nodeos +
' --max-irreversible-block-age 9999999'
' --contracts-console'
' --genesis-json ' + os.path.abspath(args.genesis) +
' --blocks-dir ' + os.path.abspath(dir) + '/blocks'
' --config-dir ' + os.path.abspath(dir) +
' --data-dir ' + os.path.abspath(dir) +
' --chain-state-db-size-mb 1024'
' --http-server-address 127.0.0.1:' + str(8000 + nodeIndex) +
' --p2p-listen-endpoint 127.0.0.1:' + str(9000 + nodeIndex) +
' --max-clients ' + str(maxClients) +
' --p2p-max-nodes-per-host ' + str(maxClients) +
' --enable-stale-production'
' --producer-name ' + account['name'] +
' --private-key \'["' + account['pub'] + '","' + account['pvt'] + '"]\''
' --plugin eosio::http_plugin'
' --plugin eosio::chain_api_plugin'
' --plugin eosio::producer_plugin' +
otherOpts)
with open(dir + 'stderr', mode='w') as f:
f.write(cmd + '\n\n')
background(cmd + ' 2>>' + dir + 'stderr')
def startProducers(b, e):
for i in range(b, e):
startNode(i - b + 1, accounts[i])
def createSystemAccounts():
for a in systemAccounts:
run(args.cleos + 'create account eosio ' + a + ' ' + args.public_key)
def intToCurrency(i):
return '%d.%04d %s' % (i // 10000, i % 10000, args.symbol)
def allocateFunds(b, e):
dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule
dist.sort()
dist.reverse()
factor = 1_000_000_000 / sum(dist)
total = 0
for i in range(b, e):
funds = round(factor * dist[i - b] * 10000)
if i >= firstProducer and i < firstProducer + numProducers:
funds = max(funds, round(args.min_producer_funds * 10000))
total += funds
accounts[i]['funds'] = funds
return total
def createStakedAccounts(b, e):
ramFunds = round(args.ram_funds * 10000)
configuredMinStake = round(args.min_stake * 10000)
maxUnstaked = round(args.max_unstaked * 10000)
for i in range(b, e):
a = accounts[i]
funds = a['funds']
if funds < ramFunds:
print('skipping %s: not enough funds to cover ram' % a['name'])
continue
minStake = min(funds - ramFunds, configuredMinStake)
unstaked = min(funds - ramFunds - minStake, maxUnstaked)
stake = funds - ramFunds - unstaked
stakeNet = round(stake / 2)
stakeCpu = stake - stakeNet
print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))
assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)
run(args.cleos + 'system newaccount --transfer eosio %s %s --stake-net "%s" --stake-cpu "%s" --buy-ram-EOS "%s" ' %
(a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))
run(args.cleos + 'transfer eosio %s "%s"' % (a['name'], intToCurrency(unstaked)))
def regProducers(b, e):
for i in range(b, e):
a = accounts[i]
retry(args.cleos + 'system regproducer ' + a['name'] + ' ' + a['pub'] + ' https://' + a['name'] + '.com' + '/' + a['pub'])
def listProducers():
run(args.cleos + 'system listproducers')
def vote(b, e):
for i in range(b, e):
voter = accounts[i]['name']
prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)
prods = ' '.join(map(lambda x: accounts[x]['name'], prods))
retry(args.cleos + 'system voteproducer prods ' + voter + ' ' + prods)
def claimRewards():
table = getJsonOutput(args.cleos + 'get table eosio eosio producers -l 100')
times = []
for row in table['rows']:
if row['unpaid_blocks'] and not row['last_claim_time']:
times.append(getJsonOutput(args.cleos + 'system claimrewards -j ' + row['owner'])['processed']['elapsed'])
print('Elapsed time for claimrewards:', times)
def vote(b, e):
for i in range(b, e):
voter = accounts[i]['name']
prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)
prods = ' '.join(map(lambda x: accounts[x]['name'], prods))
retry(args.cleos + 'system voteproducer prods ' + voter + ' ' + prods)
def proxyVotes(b, e):
vote(firstProducer, firstProducer + 1)
proxy = accounts[firstProducer]['name']
retry(args.cleos + 'system regproxy ' + proxy)
sleep(1.0)
for i in range(b, e):
voter = accounts[i]['name']
retry(args.cleos + 'system voteproducer proxy ' + voter + ' ' + proxy)
def updateAuth(account, permission, parent, controller):
run(args.cleos + 'push action eosio updateauth' + jsonArg({
'account': account,
'permission': permission,
'parent': parent,
'auth': {
'threshold': 1, 'keys': [], 'waits': [],
'accounts': [{
'weight': 1,
'permission': {'actor': controller, 'permission': 'active'}
}]
}
}) + '-p ' + account + '@' + permission)
def resign(account, controller):
updateAuth(account, 'owner', '', controller)
updateAuth(account, 'active', 'owner', controller)
sleep(1)
run(args.cleos + 'get account ' + account)
def randomTransfer(b, e):
for j in range(20):
src = accounts[random.randint(b, e - 1)]['name']
dest = src
while dest == src:
dest = accounts[random.randint(b, e - 1)]['name']
run(args.cleos + 'transfer -f ' + src + ' ' + dest + ' "0.0001 ' + args.symbol + '"' + ' || true')
def msigProposeReplaceSystem(proposer, proposalName):
requestedPermissions = []
for i in range(firstProducer, firstProducer + numProducers):
requestedPermissions.append({'actor': accounts[i]['name'], 'permission': 'active'})
trxPermissions = [{'actor': 'eosio', 'permission': 'active'}]
with open(fastUnstakeSystem, mode='rb') as f:
setcode = {'account': 'eosio', 'vmtype': 0, 'vmversion': 0, 'code': f.read().hex()}
run(args.cleos + 'multisig propose ' + proposalName + jsonArg(requestedPermissions) +
jsonArg(trxPermissions) + 'eosio setcode' + jsonArg(setcode) + ' -p ' + proposer)
def msigApproveReplaceSystem(proposer, proposalName):
for i in range(firstProducer, firstProducer + numProducers):
run(args.cleos + 'multisig approve ' + proposer + ' ' + proposalName +
jsonArg({'actor': accounts[i]['name'], 'permission': 'active'}) +
'-p ' + accounts[i]['name'])
def msigExecReplaceSystem(proposer, proposalName):
retry(args.cleos + 'multisig exec ' + proposer + ' ' + proposalName + ' -p ' + proposer)
def msigReplaceSystem():
run(args.cleos + 'push action eosio buyrambytes' + jsonArg(['eosio', accounts[0]['name'], 200000]) + '-p eosio')
sleep(1)
msigProposeReplaceSystem(accounts[0]['name'], 'fast.unstake')
sleep(1)
msigApproveReplaceSystem(accounts[0]['name'], 'fast.unstake')
msigExecReplaceSystem(accounts[0]['name'], 'fast.unstake')
def produceNewAccounts():
with open('newusers', 'w') as f:
for i in range(3000, 30000):
x = getOutput(args.cleos + 'create key')
r = re.match('Private key: *([^ \n]*)\nPublic key: *([^ \n]*)', x, re.DOTALL | re.MULTILINE)
name = 'user'
for j in range(7, -1, -1):
name += chr(ord('a') + ((i >> (j * 4)) & 15))
print(i, name)
f.write(' {"name":"%s", "pvt":"%s", "pub":"%s"},\n' % (name, r[1], r[2]))
def stepKillAll():
run('killall keosd nodeos || true')
sleep(1.5)
def stepStartWallet():
startWallet()
importKeys()
def stepStartBoot():
startNode(0, {'name': 'eosio', 'pvt': args.private_key, 'pub': args.public_key})
sleep(1.5)
def stepInstallSystemContracts():
run(args.cleos + 'set contract eosio.token ' + args.contracts_dir + 'eosio.token/')
run(args.cleos + 'set contract eosio.msig ' + args.contracts_dir + 'eosio.msig/')
def stepCreateTokens():
run(args.cleos + 'push action eosio.token create \'["eosio", "10000000000.0000 %s"]\' -p eosio.token' % (args.symbol))
totalAllocation = allocateFunds(0, len(accounts))
run(args.cleos + 'push action eosio.token issue \'["eosio", "%s", "memo"]\' -p eosio' % intToCurrency(totalAllocation))
sleep(1)
def stepSetSystemContract():
retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/')
sleep(1)
run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active')
def stepCreateStakedAccounts():
createStakedAccounts(0, len(accounts))
def stepRegProducers():
regProducers(firstProducer, firstProducer + numProducers)
sleep(1)
listProducers()
def stepStartProducers():
startProducers(firstProducer, firstProducer + numProducers)
sleep(args.producer_sync_delay)
def stepVote():
vote(0, 0 + args.num_voters)
sleep(1)
listProducers()
sleep(5)
def stepProxyVotes():
proxyVotes(0, 0 + args.num_voters)
def stepResign():
resign('eosio', 'eosio.prods')
for a in systemAccounts:
resign(a, 'eosio')
def stepTransfer():
while True:
randomTransfer(0, args.num_senders)
def stepLog():
run('tail -n 60 ' + args.nodes_dir + '00-eosio/stderr')
# Command Line Arguments
parser = argparse.ArgumentParser()
commands = [
('k', 'kill', stepKillAll, True, "Kill all nodeos and keosd processes"),
('w', 'wallet', stepStartWallet, True, "Start keosd, create wallet, fill with keys"),
('b', 'boot', stepStartBoot, True, "Start boot node"),
('s', 'sys', createSystemAccounts, True, "Create system accounts (eosio.*)"),
('c', 'contracts', stepInstallSystemContracts, True, "Install system contracts (token, msig)"),
('t', 'tokens', stepCreateTokens, True, "Create tokens"),
('S', 'sys-contract', stepSetSystemContract, True, "Set system contract"),
('T', 'stake', stepCreateStakedAccounts, True, "Create staked accounts"),
('p', 'reg-prod', stepRegProducers, True, "Register producers"),
('P', 'start-prod', stepStartProducers, True, "Start producers"),
('v', 'vote', stepVote, True, "Vote for producers"),
('R', 'claim', claimRewards, True, "Claim rewards"),
('x', 'proxy', stepProxyVotes, True, "Proxy votes"),
('q', 'resign', stepResign, True, "Resign eosio"),
('m', 'msg-replace', msigReplaceSystem, False, "Replace system contract using msig"),
('X', 'xfer', stepTransfer, False, "Random transfer tokens (infinite loop)"),
('l', 'log', stepLog, True, "Show tail of node's log"),
]
parser.add_argument('--public-key', metavar='', help="EOSIO Public Key", default='EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', dest="public_key")
parser.add_argument('--private-Key', metavar='', help="EOSIO Private Key", default='5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p', dest="private_key")
parser.add_argument('--cleos', metavar='', help="Cleos command", default='../../build/programs/cleos/cleos --wallet-url http://localhost:6666 ')
parser.add_argument('--nodeos', metavar='', help="Path to nodeos binary", default='../../build/programs/nodeos/nodeos')
parser.add_argument('--keosd', metavar='', help="Path to keosd binary", default='../../build/programs/keosd/keosd')
parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='../../build/contracts/')
parser.add_argument('--nodes-dir', metavar='', help="Path to nodes directory", default='./nodes/')
parser.add_argument('--genesis', metavar='', help="Path to genesis.json", default="./genesis.json")
parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/')
parser.add_argument('--log-path', metavar='', help="Path to log file", default='./output.log')
parser.add_argument('--symbol', metavar='', help="The eosio.system symbol", default='SYS')
parser.add_argument('--user-limit', metavar='', help="Max number of users. (0 = no limit)", type=int, default=3000)
parser.add_argument('--max-user-keys', metavar='', help="Maximum user keys to import into wallet", type=int, default=10)
parser.add_argument('--ram-funds', metavar='', help="How much funds for each user to spend on ram", type=float, default=0.1)
parser.add_argument('--min-stake', metavar='', help="Minimum stake before allocating unstaked funds", type=float, default=0.9)
parser.add_argument('--max-unstaked', metavar='', help="Maximum unstaked funds", type=float, default=10)
parser.add_argument('--producer-limit', metavar='', help="Maximum number of producers. (0 = no limit)", type=int, default=0)
parser.add_argument('--min-producer-funds', metavar='', help="Minimum producer funds", type=float, default=1000.0000)
parser.add_argument('--num-producers-vote', metavar='', help="Number of producers for which each user votes", type=int, default=20)
parser.add_argument('--num-voters', metavar='', help="Number of voters", type=int, default=10)
parser.add_argument('--num-senders', metavar='', help="Number of users to transfer funds randomly", type=int, default=10)
parser.add_argument('--producer-sync-delay', metavar='', help="Time (s) to sleep to allow producers to sync", type=int, default=80)
parser.add_argument('-a', '--all', action='store_true', help="Do everything marked with (*)")
parser.add_argument('-H', '--http-port', type=int, default=8000, metavar='', help='HTTP port for cleos')
for (flag, command, function, inAll, help) in commands:
prefix = ''
if inAll: prefix += '*'
if prefix: help = '(' + prefix + ') ' + help
if flag:
parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command)
else:
parser.add_argument('--' + command, action='store_true', help=help, dest=command)
args = parser.parse_args()
args.cleos += '--url http://localhost:%d ' % args.http_port
logFile = open(args.log_path, 'a')
logFile.write('\n\n' + '*' * 80 + '\n\n\n')
with open('accounts.json') as f:
a = json.load(f)
if args.user_limit:
del a['users'][args.user_limit:]
if args.producer_limit:
del a['producers'][args.producer_limit:]
firstProducer = len(a['users'])
numProducers = len(a['producers'])
accounts = a['users'] + a['producers']
maxClients = numProducers + 10
haveCommand = False
for (flag, command, function, inAll, help) in commands:
if getattr(args, command) or inAll and args.all:
if function:
haveCommand = True
function()
if not haveCommand:
print('bios-boot-tutorial.py: Tell me what to do. -a does almost everything. -h shows options.')
|
#!/usr/bin/python3
import tkinter
import turtle
from functools import partial
from pdb import set_trace as debug
class CalculatorTurtle(turtle.RawTurtle):
def __init__(self, canvas):
turtle.RawTurtle.__init__(self, canvas)
self.penup()
self.width = 48
self.height = 80
self.speed(0) # max speed for testing purposes; comment out (or use speed menu) for demos
self.symbols = {'0':self.zero, '1':self.one, '2':self.two,
'3':self.three, '4':self.four, '5':self.five, '6':self.six,
'7':self.seven, '8':self.eight, '9':self.nine, '+':self.plus,
'-':self.minus}
class HalfTurtle(object):
def __init__(self, turtle):
self.turtle = turtle
def __enter__(self):
self.turtle.width /= 2
self.turtle.height /= 2
def __exit__(self, _type, _value, _traceback):
self.turtle.width *= 2
self.turtle.height *= 2
self.do_half = HalfTurtle(self)
def to_waypoint(self, a, b, x, y):
wp = ((x+a)*self.width, (y+b)*self.height)
self.setheading(self.towards(wp[0], wp[1]))
self.forward(self.distance(wp[0], wp[1]))
# Note the many TODOs below; I should redesign all the digits,
# but I made these sloppy digits first because I'm inspired by the
# vision of having a minimal viable program as soon as possible
def zero(self, x, y):
# TODO: make this a proper elliptical zero, rather than a small circle
self.penup()
self.to_waypoint(0.8, 0.5, x, y)
self.pendown()
self.setheading(90)
self.circle(0.3*self.width)
self.penup()
def one(self, x, y):
# TODO?-- a better numeral "1" than just a straight line?
self.penup()
self.to_waypoint(0.5, 0.8, x, y)
self.pendown()
self.setheading(270)
self.forward(0.6*self.height)
self.penup()
def two(self, x, y):
# TODO: This isn't such a terrible "2" but it could probably be better
self.penup()
self.to_waypoint(0.2, 0.6, x, y)
self.pendown()
self.setheading(90)
self.circle(-0.3*self.width, 180)
self.to_waypoint(0.2, 0.2, x, y)
self.to_waypoint(0.8, 0.2, x, y)
self.penup()
def three(self, x, y):
# TODO: Not terrible "3"; could be more natural
self.penup()
self.to_waypoint(0.2, 0.7, x, y)
self.pendown()
self.setheading(45)
self.circle(-0.15*self.height, 225)
self.setheading(0)
self.circle(-0.15*self.height, 225)
self.penup()
def four(self, x, y):
# TODO: this is a crappy "4" in more ways than one
self.penup()
self.to_waypoint(0.2, 0.8, x, y)
self.pendown()
self.to_waypoint(0.2, 0.4, x, y)
self.to_waypoint(0.8, 0.4, x, y)
self.penup()
self.to_waypoint(0.8, 0.8, x, y)
self.pendown()
self.to_waypoint(0.8, 0.2, x, y)
self.penup()
def five(self, x, y):
# TODO: design a better "5"
self.penup()
self.to_waypoint(0.8, 0.8, x, y)
self.pendown()
self.to_waypoint(0.2, 0.8, x, y)
self.to_waypoint(0.2, 0.5, x, y)
self.setheading(0)
self.forward(0.3*self.width)
self.circle(-0.15*self.height, 240)
self.penup()
def six(self, x, y):
# TODO: more natural "6"
self.penup()
self.to_waypoint(0.8, 0.7, x, y)
self.pendown()
self.setheading(90)
self.circle(0.3*self.width, 180)
self.forward(0.4*self.height)
self.circle(0.3*self.width)
self.penup()
def seven(self, x, y):
# TODO: more natural "7"
self.penup()
self.to_waypoint(0.2, 0.8, x, y)
self.pendown()
self.to_waypoint(0.8, 0.8, x, y)
self.to_waypoint(0.2, 0.2, x, y)
self.penup()
def eight(self, x, y):
# TODO: more natural "8"
self.penup()
self.to_waypoint(0.5, 0.5, x, y)
self.pendown()
self.setheading(0)
self.circle(0.15*self.height)
self.circle(-0.15*self.height)
self.penup()
def nine(self, x, y):
# TODO: more natural "9"
self.penup()
self.to_waypoint(0.8, 0.7, x, y)
self.pendown()
self.setheading(270)
self.circle(-0.3*self.width)
self.forward(0.6*self.height)
self.penup()
def plus(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.5, x, y)
self.pendown()
self.setheading(0)
self.forward(0.8*self.width)
self.penup()
self.to_waypoint(0.5, 0.74, x, y)
self.pendown()
self.setheading(270)
self.forward(0.48*self.height)
self.penup()
def minus(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.5, x, y)
self.pendown()
self.setheading(0)
self.forward(0.8*self.width)
self.penup()
def times(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.74, x, y)
self.pendown()
self.to_waypoint(0.9, 0.26, x, y)
self.penup()
self.to_waypoint(0.9, 0.74, x, y)
self.pendown()
self.to_waypoint(0.1, 0.26, x, y)
self.penup()
def bottom_line(self, x, y, length):
self.penup()
self.to_waypoint(0, 0, x, y)
self.pendown()
self.setheading(0)
self.forward(length*self.width)
self.penup()
def slash(self, x, y):
self.penup()
self.to_waypoint(0, 1, x, y)
self.pendown()
self.to_waypoint(1, 0, x, y)
self.penup()
def r(self, x, y):
# TODO: a better 'r' is possible
self.penup()
self.to_waypoint(0.2, 0.5, x, y)
self.setheading(-90)
self.pendown()
self.forward(0.4*self.height)
self.penup()
self.setheading(90)
self.forward(0.35*self.height)
self.pendown()
self.setheading(15)
self.circle(-0.5*self.width, 60)
self.penup()
def number(self, digits, x, y):
for i, d in enumerate(digits):
self.symbols[d](x+i, y)
def statement(self, arg1, arg2, op, x, y):
args_length = max([len(a) for a in (arg1, arg2)])
args = [s.zfill(args_length) for s in (arg1, arg2)]
for i, s in enumerate(args):
leading_zeros = True
for j, figure in enumerate(s):
if not leading_zeros or figure!='0' or j == args_length-1:
leading_zeros = False
draw_digit = self.symbols[figure]
draw_digit(x+j+1, y-i)
if i == len(args)-2:
if op == '+':
self.plus(x, y-1)
elif op == '-':
self.minus(x, y-1)
elif op == 'x':
self.times(x, y-1)
if i == len(args)-1:
self.bottom_line(x, y-1, args_length+1)
return args
def add(self, summand1, summand2, x, y):
summands = ['0'+s for s in self.statement(summand1, summand2, '+', x, y)]
summands_length = len(summands[0])
carry = 0
for i in range(1, summands_length+1):
place_sum = sum([int(s[-i]) for s in summands])
place_sum += carry
place_sum = str(place_sum).zfill(2)
if not (i==summands_length and place_sum[-1]=='0'):
draw_result_digit = self.symbols[place_sum[-1]]
draw_result_digit(x+summands_length-i, y-2)
if place_sum[-2]!='0':
draw_carry_digit = self.symbols[place_sum[-2]]
draw_carry_digit(x+summands_length-1-i, y+1)
carry = int(place_sum[-2])
self.forward(45)
def subtract(self, minuhend, subtrahend, x, y):
# TODO: add support for multiple borrowings, strip leading
# zeros, refrain from retracing already drawn slashes and
# digits
self.statement(minuhend, subtrahend, '-', x, y)
subtrahend = subtrahend.zfill(len(minuhend))
for i in range(1, len(minuhend)+1):
place_difference = int(minuhend[-i]) - int(subtrahend[-i])
if place_difference >= 0:
draw_result_digit = self.symbols[str(place_difference)]
draw_result_digit(x+len(minuhend)+1-i, y-2)
else:
# I worry that temporarily adjusting the length and
# width properties like this is a kludge; how _should_
# it be done?
self.slash(x+len(minuhend)-i, y)
with self.do_half:
draw_creditor_digit = self.symbols[str(int(minuhend[-(i+1)]) - 1)]
draw_creditor_digit(2*(x+len(minuhend)-i)+1, 2*(y+1))
minuhend = minuhend[:-(i+1)] + str(int(minuhend[-(i+1)]) - 1) + minuhend[-i:]
self.slash(x+len(minuhend)-i+1, y)
with self.do_half:
self.one(2*(x+len(minuhend)+1-i), 2*(y+1))
draw_debtor_digit = self.symbols[minuhend[-i]]
draw_debtor_digit(2*(x+len(minuhend)+1-i)+1, 2*(y+1))
place_difference = int('1' + minuhend[-i]) - int(subtrahend[-i])
draw_result_digit = self.symbols[str(place_difference)]
draw_result_digit(x+len(minuhend)+1-i, y-2)
self.forward(45)
def multiply(self, factor1, factor2, x, y):
self.statement(factor1, factor2, 'x', x, y)
factors_length = max(len(factor1), len(factor2))
carry = 0
summands = [[0]*n for n in range(len(factor2))]
for i in range(1, len(factor2)+1):
for j in range(1, len(factor1)+1):
place_product = (int(factor1[-j])*int(factor2[-i]))
place_product += carry
place_product = str(place_product).zfill(2)
draw_result_digit = self.symbols[place_product[-1]]
draw_result_digit(x+factors_length-i-j+2, y-1-i)
summands[i-1].append(int(place_product[-1]))
carry = int(place_product[-2])
if j == len(factor1):
draw_last_digit = self.symbols[place_product[-2]]
draw_last_digit(x+factors_length-i-j+1, y-1-i)
summands[i-1].append(int(place_product[-2]))
carry = 0
summands_length = max(len(s) for s in summands)
self.bottom_line(x-(summands_length-factors_length)+1, y-len(factor2)-1, summands_length)
for s in summands:
for p in range(summands_length - len(s)):
s.append(0)
summands.append([0]*summands_length) # carry digits
for p in range(1, summands_length+1):
place_sum = str(sum(s[p-1] for s in summands))
draw_final_digit = self.symbols[place_sum[-1]]
draw_final_digit(x+factors_length-p+1, y-len(factor2)-2)
for i, d in enumerate(map(int, reversed(list(place_sum[:-1])))):
summands[-1][p+i] += d
# but also need to support final carry into result
# wait, perhaps not
def division_tableau(self, x, y, length):
self.penup()
# maybe tweak first arg to -0.1ish for curved bar---
self.to_waypoint(0, 0, x, y-1)
self.pendown()
# straight bar, not as cool
self.to_waypoint(0, 0, x, y)
# curved bar #TODO calculate parameters of right-paren shape
# self.setheading(??)
# self.circle(self.height, ??)
self.bottom_line(x, y, length)
def division_statement(self, dividend, divisor, x, y):
self.penup()
self.number(divisor, x, y-1)
self.division_tableau(x+len(divisor), y, len(dividend))
self.number(dividend, x+len(divisor), y-1)
def divide(self, dividend, divisor, x, y):
# STILL WRONG
self.division_statement(dividend, divisor, x, y)
i = 0
while int(dividend[:i+1]) < int(divisor):
i += 1
place_dividend = dividend[:i]
for j, d in enumerate(dividend[i:]):
place_dividend += d
place_quotient = str(int(place_dividend)//int(divisor))
self.number(place_quotient, x+len(divisor)+i+j, y)
place_subtrahend = int(place_quotient)*divisor
self.number(place_subtrahend, x+len(divisor)+i+j-(len(place_subtrahend)-1), (-2)*(j+1))
self.bottom_line(x+len(divisor)+i+j-(len(place_subtrahend)-1), (-2)*(j+1), len(place_subtrahend))
place_dividend = str(int(place_dividend)-int(place_subtrahend))
self.number(place_dividend, x+len(divisor)+i+j-(len(place_dividend)-1), (-2)*(j+1)-1)
if j != len(dividend[i:])-1:
self.symbols[dividend[i+j+1]](x+len(divisor)+i+j-(len(place_dividend)), (-2)*(j+1)-1)
remainder = place_dividend
self.r(x+len(divisor)+len(dividend), 0)
self.number(remainder, x+len(divisor)+len(dividend)+1, 0)
class TurtleArithmetic(tkinter.Tk):
def __init__(self):
tkinter.Tk.__init__(self)
self.title("Turtle Arithmetic (in development)")
self.resizable(width='FALSE', height='FALSE')
self.menu_bar = tkinter.Menu(self)
self.file_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Quit", command=self.destroy)
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
# TODO: submenu to choose chalk colors?
# IDEA: optional general colorpicker popup window?
# BUG: appearance and speed menus appear to behave as part of same radio group
self.appearance_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.appearance_menu.add_radiobutton(label="Chalkboard", command=self.chalkboard_appearance)
self.appearance_menu.add_radiobutton(label="Whiteboard", command=self.whiteboard_appearance)
self.menu_bar.add_cascade(label="Appearance", menu=self.appearance_menu)
# TODO: test speed menu for odd behavior; it seems to work
# inconsistently (!?!) when changed during turtle action; in
# the worst case it could be disabled during action
self.speed_menu = tkinter.Menu(self.menu_bar, tearoff=0)
for s in ['slowest', 'slow', 'normal', 'fast', 'fastest']:
self.speed_menu.add_radiobutton(label=s, command=lambda s=s: self.our_heroine.speed(s))
self.menu_bar.add_cascade(label="Speed", menu=self.speed_menu)
self.config(menu=self.menu_bar)
self.canvas_width = 500
self.canvas_height = 500
self.turtle_canvas = tkinter.Canvas(self, width=self.canvas_width, height=self.canvas_height)
self.turtle_canvas.grid(row=0, columnspan=4)
self.first_number_label = tkinter.Label(self, text="First number:")
self.first_number_label.grid(row=1, column=1, sticky='E')
self.first_number_field = tkinter.Entry(self)
self.first_number_field.configure(width=5)
self.first_number_field.grid(row=1, column=2, sticky='W')
self.second_number_label = tkinter.Label(self, text="Second number:")
self.second_number_label.grid(row=2, column=1, sticky='E')
self.second_number_field = tkinter.Entry(self)
self.second_number_field.configure(width=5)
self.second_number_field.grid(row=2, column=2, sticky='W')
self.add_button = tkinter.Button(self, text="Add", command=self.operation('+'))
self.add_button.grid(row=3, column=0)
self.add_button = tkinter.Button(self, text="Subtract", command=self.operation('-'))
self.add_button.grid(row=3, column=1)
self.add_button = tkinter.Button(self, text="Multiply", command=self.operation('x'))
self.add_button.grid(row=3, column=2)
self.add_button = tkinter.Button(self, text="Divide", command=self.operation('/'))
self.add_button.grid(row=3, column=3)
self.setting = turtle.TurtleScreen(self.turtle_canvas)
self.setting.setworldcoordinates(0, 0, 500, 500)
self.our_heroine = CalculatorTurtle(self.setting)
self.chalkboard_appearance()
self.our_heroine.setheading(self.our_heroine.towards(250, 250))
self.our_heroine.forward(self.our_heroine.distance(250, 250))
self.mainloop()
def chalkboard_appearance(self):
self.our_heroine.clear()
self.setting.bgcolor("#2B502B")
self.our_heroine.shape("turtle")
self.our_heroine.pencolor("#FFFFFF")
self.our_heroine.pensize(4)
def whiteboard_appearance(self):
self.our_heroine.clear()
self.setting.bgcolor("#F5F5F5")
self.our_heroine.shape("turtle")
self.our_heroine.pencolor("#0000CD")
self.our_heroine.pensize(4)
def aspect(self, op, arg1, arg2):
# BUG: while this crude first draft of a dynamic resizing
# routine sort-of works, it is obviously not correct (e.g.,
# sums of four-digit numbers display larger than their
# one-digit counterparts)
if op == '+' or op == '-':
calculation_width = max(len(arg1), len(arg2))+1
calculation_height = 4
max_block_width = self.canvas_width / (calculation_width + 2)
max_block_height = self.canvas_height / (calculation_height + 2)
if max_block_width < max_block_height:
true_block_width = max_block_width
true_block_height = (5/3)*(max_block_width)
elif max_block_width >= max_block_height:
true_block_height = max_block_height
true_block_width = (3/5)*(max_block_height)
self.our_heroine.width = true_block_width
self.our_heroine.height = true_block_height
return (true_block_width, true_block_height)
elif op == 'x':
pass # TODO
elif op == '/':
pass # TODO
def operation(self, op):
return partial(self.do_operation, op)
def do_operation(self, op):
# TODO: check for spaces---Python's int() handles them
# intelligently, but my 'add' (&c.) method does not
# also, leading zeros
a, b = self.first_number_field.get().strip(), self.second_number_field.get().strip()
nonnumbers = []
try:
m = int(a)
except ValueError:
nonnumbers.append(a)
try:
n = int(b)
except ValueError:
nonnumbers.append(b)
if nonnumbers:
if len(nonnumbers) == 1:
error_tail = '"' + nonnumbers[0] + '" is not a number.'
elif len(nonnumbers) == 2:
error_tail = '"' + nonnumbers[0] + '" and "' + nonnumbers[1] + '" are not numbers.'
tkinter.messagebox.showerror("Turtle Comprehension Error", "The turtle doesn't understand: " + error_tail)
return
elif m < 0 or n < 0:
tkinter.messagebox.showerror("Turtle Ignorant of Negative Numbers", "The turtle doesn't understand negative numbers.")
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
return
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.DISABLED)
self.our_heroine.clear()
if op == '+':
true_blocks = self.aspect('+', a, b)
self.our_heroine.add(a, b, 1, (self.canvas_height/true_blocks[1])-2)
elif op == '-':
if m < n:
tkinter.messagebox.showerror("Turtle Ignorant of Negative Numbers", "The turtle doesn't know how to subtract a bigger number from a smaller one.")
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
return
true_blocks = self.aspect('-', a, b)
self.our_heroine.subtract(a, b, 1, (self.canvas_height/true_blocks[1])-2)
elif op == 'x':
self.our_heroine.multiply(a, b, 2, 5)
elif op == '/':
self.our_heroine.divide(a, b, 2, 5)
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
if __name__ == "__main__":
TurtleArithmetic()
division
#!/usr/bin/python3
import tkinter
import turtle
from functools import partial
from pdb import set_trace as debug
class CalculatorTurtle(turtle.RawTurtle):
def __init__(self, canvas):
turtle.RawTurtle.__init__(self, canvas)
self.penup()
self.width = 48
self.height = 80
self.speed(0) # max speed for testing purposes; comment out (or use speed menu) for demos
self.symbols = {'0':self.zero, '1':self.one, '2':self.two,
'3':self.three, '4':self.four, '5':self.five, '6':self.six,
'7':self.seven, '8':self.eight, '9':self.nine, '+':self.plus,
'-':self.minus}
class HalfTurtle(object):
def __init__(self, turtle):
self.turtle = turtle
def __enter__(self):
self.turtle.width /= 2
self.turtle.height /= 2
def __exit__(self, _type, _value, _traceback):
self.turtle.width *= 2
self.turtle.height *= 2
self.do_half = HalfTurtle(self)
def to_waypoint(self, a, b, x, y):
wp = ((x+a)*self.width, (y+b)*self.height)
self.setheading(self.towards(wp[0], wp[1]))
self.forward(self.distance(wp[0], wp[1]))
# Note the many TODOs below; I should redesign all the digits,
# but I made these sloppy digits first because I'm inspired by the
# vision of having a minimal viable program as soon as possible
def zero(self, x, y):
# TODO: make this a proper elliptical zero, rather than a small circle
self.penup()
self.to_waypoint(0.8, 0.5, x, y)
self.pendown()
self.setheading(90)
self.circle(0.3*self.width)
self.penup()
def one(self, x, y):
# TODO?-- a better numeral "1" than just a straight line?
self.penup()
self.to_waypoint(0.5, 0.8, x, y)
self.pendown()
self.setheading(270)
self.forward(0.6*self.height)
self.penup()
def two(self, x, y):
# TODO: This isn't such a terrible "2" but it could probably be better
self.penup()
self.to_waypoint(0.2, 0.6, x, y)
self.pendown()
self.setheading(90)
self.circle(-0.3*self.width, 180)
self.to_waypoint(0.2, 0.2, x, y)
self.to_waypoint(0.8, 0.2, x, y)
self.penup()
def three(self, x, y):
# TODO: Not terrible "3"; could be more natural
self.penup()
self.to_waypoint(0.2, 0.7, x, y)
self.pendown()
self.setheading(45)
self.circle(-0.15*self.height, 225)
self.setheading(0)
self.circle(-0.15*self.height, 225)
self.penup()
def four(self, x, y):
# TODO: this is a crappy "4" in more ways than one
self.penup()
self.to_waypoint(0.2, 0.8, x, y)
self.pendown()
self.to_waypoint(0.2, 0.4, x, y)
self.to_waypoint(0.8, 0.4, x, y)
self.penup()
self.to_waypoint(0.8, 0.8, x, y)
self.pendown()
self.to_waypoint(0.8, 0.2, x, y)
self.penup()
def five(self, x, y):
# TODO: design a better "5"
self.penup()
self.to_waypoint(0.8, 0.8, x, y)
self.pendown()
self.to_waypoint(0.2, 0.8, x, y)
self.to_waypoint(0.2, 0.5, x, y)
self.setheading(0)
self.forward(0.3*self.width)
self.circle(-0.15*self.height, 240)
self.penup()
def six(self, x, y):
# TODO: more natural "6"
self.penup()
self.to_waypoint(0.8, 0.7, x, y)
self.pendown()
self.setheading(90)
self.circle(0.3*self.width, 180)
self.forward(0.4*self.height)
self.circle(0.3*self.width)
self.penup()
def seven(self, x, y):
# TODO: more natural "7"
self.penup()
self.to_waypoint(0.2, 0.8, x, y)
self.pendown()
self.to_waypoint(0.8, 0.8, x, y)
self.to_waypoint(0.2, 0.2, x, y)
self.penup()
def eight(self, x, y):
# TODO: more natural "8"
self.penup()
self.to_waypoint(0.5, 0.5, x, y)
self.pendown()
self.setheading(0)
self.circle(0.15*self.height)
self.circle(-0.15*self.height)
self.penup()
def nine(self, x, y):
# TODO: more natural "9"
self.penup()
self.to_waypoint(0.8, 0.7, x, y)
self.pendown()
self.setheading(270)
self.circle(-0.3*self.width)
self.forward(0.6*self.height)
self.penup()
def plus(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.5, x, y)
self.pendown()
self.setheading(0)
self.forward(0.8*self.width)
self.penup()
self.to_waypoint(0.5, 0.74, x, y)
self.pendown()
self.setheading(270)
self.forward(0.48*self.height)
self.penup()
def minus(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.5, x, y)
self.pendown()
self.setheading(0)
self.forward(0.8*self.width)
self.penup()
def times(self, x, y):
self.penup()
self.to_waypoint(0.1, 0.74, x, y)
self.pendown()
self.to_waypoint(0.9, 0.26, x, y)
self.penup()
self.to_waypoint(0.9, 0.74, x, y)
self.pendown()
self.to_waypoint(0.1, 0.26, x, y)
self.penup()
def bottom_line(self, x, y, length):
self.penup()
self.to_waypoint(0, 0, x, y)
self.pendown()
self.setheading(0)
self.forward(length*self.width)
self.penup()
def slash(self, x, y):
self.penup()
self.to_waypoint(0, 1, x, y)
self.pendown()
self.to_waypoint(1, 0, x, y)
self.penup()
def r(self, x, y):
# TODO: a better 'r' is possible
self.penup()
self.to_waypoint(0.2, 0.5, x, y)
self.setheading(-90)
self.pendown()
self.forward(0.4*self.height)
self.penup()
self.setheading(90)
self.forward(0.35*self.height)
self.pendown()
self.setheading(15)
self.circle(-0.5*self.width, 60)
self.penup()
def number(self, digits, x, y):
for i, d in enumerate(digits):
self.symbols[d](x+i, y)
def statement(self, arg1, arg2, op, x, y):
args_length = max([len(a) for a in (arg1, arg2)])
args = [s.zfill(args_length) for s in (arg1, arg2)]
for i, s in enumerate(args):
leading_zeros = True
for j, figure in enumerate(s):
if not leading_zeros or figure!='0' or j == args_length-1:
leading_zeros = False
draw_digit = self.symbols[figure]
draw_digit(x+j+1, y-i)
if i == len(args)-2:
if op == '+':
self.plus(x, y-1)
elif op == '-':
self.minus(x, y-1)
elif op == 'x':
self.times(x, y-1)
if i == len(args)-1:
self.bottom_line(x, y-1, args_length+1)
return args
def add(self, summand1, summand2, x, y):
summands = ['0'+s for s in self.statement(summand1, summand2, '+', x, y)]
summands_length = len(summands[0])
carry = 0
for i in range(1, summands_length+1):
place_sum = sum([int(s[-i]) for s in summands])
place_sum += carry
place_sum = str(place_sum).zfill(2)
if not (i==summands_length and place_sum[-1]=='0'):
draw_result_digit = self.symbols[place_sum[-1]]
draw_result_digit(x+summands_length-i, y-2)
if place_sum[-2]!='0':
draw_carry_digit = self.symbols[place_sum[-2]]
draw_carry_digit(x+summands_length-1-i, y+1)
carry = int(place_sum[-2])
self.forward(45)
def subtract(self, minuhend, subtrahend, x, y):
# TODO: add support for multiple borrowings, strip leading
# zeros, refrain from retracing already drawn slashes and
# digits
self.statement(minuhend, subtrahend, '-', x, y)
subtrahend = subtrahend.zfill(len(minuhend))
for i in range(1, len(minuhend)+1):
place_difference = int(minuhend[-i]) - int(subtrahend[-i])
if place_difference >= 0:
draw_result_digit = self.symbols[str(place_difference)]
draw_result_digit(x+len(minuhend)+1-i, y-2)
else:
# I worry that temporarily adjusting the length and
# width properties like this is a kludge; how _should_
# it be done?
self.slash(x+len(minuhend)-i, y)
with self.do_half:
draw_creditor_digit = self.symbols[str(int(minuhend[-(i+1)]) - 1)]
draw_creditor_digit(2*(x+len(minuhend)-i)+1, 2*(y+1))
minuhend = minuhend[:-(i+1)] + str(int(minuhend[-(i+1)]) - 1) + minuhend[-i:]
self.slash(x+len(minuhend)-i+1, y)
with self.do_half:
self.one(2*(x+len(minuhend)+1-i), 2*(y+1))
draw_debtor_digit = self.symbols[minuhend[-i]]
draw_debtor_digit(2*(x+len(minuhend)+1-i)+1, 2*(y+1))
place_difference = int('1' + minuhend[-i]) - int(subtrahend[-i])
draw_result_digit = self.symbols[str(place_difference)]
draw_result_digit(x+len(minuhend)+1-i, y-2)
self.forward(45)
def multiply(self, factor1, factor2, x, y):
self.statement(factor1, factor2, 'x', x, y)
factors_length = max(len(factor1), len(factor2))
carry = 0
summands = [[0]*n for n in range(len(factor2))]
for i in range(1, len(factor2)+1):
for j in range(1, len(factor1)+1):
place_product = (int(factor1[-j])*int(factor2[-i]))
place_product += carry
place_product = str(place_product).zfill(2)
draw_result_digit = self.symbols[place_product[-1]]
draw_result_digit(x+factors_length-i-j+2, y-1-i)
summands[i-1].append(int(place_product[-1]))
carry = int(place_product[-2])
if j == len(factor1):
draw_last_digit = self.symbols[place_product[-2]]
draw_last_digit(x+factors_length-i-j+1, y-1-i)
summands[i-1].append(int(place_product[-2]))
carry = 0
summands_length = max(len(s) for s in summands)
self.bottom_line(x-(summands_length-factors_length)+1, y-len(factor2)-1, summands_length)
for s in summands:
for p in range(summands_length - len(s)):
s.append(0)
summands.append([0]*summands_length) # carry digits
for p in range(1, summands_length+1):
place_sum = str(sum(s[p-1] for s in summands))
draw_final_digit = self.symbols[place_sum[-1]]
draw_final_digit(x+factors_length-p+1, y-len(factor2)-2)
for i, d in enumerate(map(int, reversed(list(place_sum[:-1])))):
summands[-1][p+i] += d
# but also need to support final carry into result
# wait, perhaps not
def division_tableau(self, x, y, length):
self.penup()
# maybe tweak first arg to -0.1ish for curved bar---
self.to_waypoint(0, 0, x, y-1)
self.pendown()
# straight bar, not as cool
self.to_waypoint(0, 0, x, y)
# curved bar #TODO calculate parameters of right-paren shape
# self.setheading(??)
# self.circle(self.height, ??)
self.bottom_line(x, y, length)
def division_statement(self, dividend, divisor, x, y):
self.penup()
self.number(divisor, x, y-1)
self.division_tableau(x+len(divisor), y, len(dividend))
self.number(dividend, x+len(divisor), y-1)
def divide(self, dividend, divisor, x, y):
self.division_statement(dividend, divisor, x, y)
i = 0
while int(dividend[:i+1]) < int(divisor) and i < len(dividend)-1:
i += 1
place_dividend = dividend[:i]
for j, d in enumerate(dividend[i:]):
place_dividend += d
place_quotient = str(int(place_dividend)//int(divisor))
self.number(place_quotient, x+len(divisor)+i+j, y)
place_subtrahend = str(int(place_quotient)*int(divisor))
self.number(place_subtrahend, x+len(divisor)+i+j-(len(place_subtrahend)-1), y+(-2)*(j+1))
self.bottom_line(x+len(divisor)+i+j-(len(place_subtrahend)-1), y+(-2)*(j+1), len(place_subtrahend))
place_dividend = str(int(place_dividend)-int(place_subtrahend))
self.number(place_dividend, x+len(divisor)+i+j-(len(place_dividend)-1), y+(-2)*(j+1)-1)
if j != len(dividend[i:])-1:
self.symbols[dividend[i+j+1]](x+len(divisor)+i+j+1, y+(-2)*(j+1)-1)
remainder = place_dividend
self.r(x+len(divisor)+len(dividend), y)
self.number(remainder, x+len(divisor)+len(dividend)+1, y)
self.forward(20)
class TurtleArithmetic(tkinter.Tk):
def __init__(self):
tkinter.Tk.__init__(self)
self.title("Turtle Arithmetic (in development)")
self.resizable(width='FALSE', height='FALSE')
self.menu_bar = tkinter.Menu(self)
self.file_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Quit", command=self.destroy)
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
# TODO: submenu to choose chalk colors?
# IDEA: optional general colorpicker popup window?
# BUG: appearance and speed menus appear to behave as part of same radio group
self.appearance_menu = tkinter.Menu(self.menu_bar, tearoff=0)
self.appearance_menu.add_radiobutton(label="Chalkboard", command=self.chalkboard_appearance)
self.appearance_menu.add_radiobutton(label="Whiteboard", command=self.whiteboard_appearance)
self.menu_bar.add_cascade(label="Appearance", menu=self.appearance_menu)
# TODO: test speed menu for odd behavior; it seems to work
# inconsistently (!?!) when changed during turtle action; in
# the worst case it could be disabled during action
self.speed_menu = tkinter.Menu(self.menu_bar, tearoff=0)
for s in ['slowest', 'slow', 'normal', 'fast', 'fastest']:
self.speed_menu.add_radiobutton(label=s, command=lambda s=s: self.our_heroine.speed(s))
self.menu_bar.add_cascade(label="Speed", menu=self.speed_menu)
self.config(menu=self.menu_bar)
self.canvas_width = 500
self.canvas_height = 500
self.turtle_canvas = tkinter.Canvas(self, width=self.canvas_width, height=self.canvas_height)
self.turtle_canvas.grid(row=0, columnspan=4)
self.first_number_label = tkinter.Label(self, text="First number:")
self.first_number_label.grid(row=1, column=1, sticky='E')
self.first_number_field = tkinter.Entry(self)
self.first_number_field.configure(width=5)
self.first_number_field.grid(row=1, column=2, sticky='W')
self.second_number_label = tkinter.Label(self, text="Second number:")
self.second_number_label.grid(row=2, column=1, sticky='E')
self.second_number_field = tkinter.Entry(self)
self.second_number_field.configure(width=5)
self.second_number_field.grid(row=2, column=2, sticky='W')
self.add_button = tkinter.Button(self, text="Add", command=self.operation('+'))
self.add_button.grid(row=3, column=0)
self.add_button = tkinter.Button(self, text="Subtract", command=self.operation('-'))
self.add_button.grid(row=3, column=1)
self.add_button = tkinter.Button(self, text="Multiply", command=self.operation('x'))
self.add_button.grid(row=3, column=2)
self.add_button = tkinter.Button(self, text="Divide", command=self.operation('/'))
self.add_button.grid(row=3, column=3)
self.setting = turtle.TurtleScreen(self.turtle_canvas)
self.setting.setworldcoordinates(0, 0, 500, 500)
self.our_heroine = CalculatorTurtle(self.setting)
self.chalkboard_appearance()
self.our_heroine.setheading(self.our_heroine.towards(250, 250))
self.our_heroine.forward(self.our_heroine.distance(250, 250))
self.mainloop()
def chalkboard_appearance(self):
self.our_heroine.clear()
self.setting.bgcolor("#2B502B")
self.our_heroine.shape("turtle")
self.our_heroine.pencolor("#FFFFFF")
self.our_heroine.pensize(4)
def whiteboard_appearance(self):
self.our_heroine.clear()
self.setting.bgcolor("#F5F5F5")
self.our_heroine.shape("turtle")
self.our_heroine.pencolor("#0000CD")
self.our_heroine.pensize(4)
def aspect(self, op, arg1, arg2):
# BUG: while this crude first draft of a dynamic resizing
# routine sort-of works, it is obviously not correct (e.g.,
# sums of four-digit numbers display larger than their
# one-digit counterparts)
if op == '+' or op == '-':
calculation_width = max(len(arg1), len(arg2))+1
calculation_height = 4
max_block_width = self.canvas_width / (calculation_width + 2)
max_block_height = self.canvas_height / (calculation_height + 2)
if max_block_width < max_block_height:
true_block_width = max_block_width
true_block_height = (5/3)*(max_block_width)
elif max_block_width >= max_block_height:
true_block_height = max_block_height
true_block_width = (3/5)*(max_block_height)
self.our_heroine.width = true_block_width
self.our_heroine.height = true_block_height
return (true_block_width, true_block_height)
elif op == 'x':
pass # TODO
elif op == '/':
pass # TODO
def operation(self, op):
return partial(self.do_operation, op)
def do_operation(self, op):
# TODO: check for spaces---Python's int() handles them
# intelligently, but my 'add' (&c.) method does not
# also, leading zeros
a, b = self.first_number_field.get().strip(), self.second_number_field.get().strip()
nonnumbers = []
try:
m = int(a)
except ValueError:
nonnumbers.append(a)
try:
n = int(b)
except ValueError:
nonnumbers.append(b)
if nonnumbers:
if len(nonnumbers) == 1:
error_tail = '"' + nonnumbers[0] + '" is not a number.'
elif len(nonnumbers) == 2:
error_tail = '"' + nonnumbers[0] + '" and "' + nonnumbers[1] + '" are not numbers.'
tkinter.messagebox.showerror("Turtle Comprehension Error", "The turtle doesn't understand: " + error_tail)
return
elif m < 0 or n < 0:
tkinter.messagebox.showerror("Turtle Ignorant of Negative Numbers", "The turtle doesn't understand negative numbers.")
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
return
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.DISABLED)
self.our_heroine.clear()
if op == '+':
true_blocks = self.aspect('+', a, b)
self.our_heroine.add(a, b, 1, (self.canvas_height/true_blocks[1])-2)
elif op == '-':
if m < n:
tkinter.messagebox.showerror("Turtle Ignorant of Negative Numbers", "The turtle doesn't know how to subtract a bigger number from a smaller one.")
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
return
true_blocks = self.aspect('-', a, b)
self.our_heroine.subtract(a, b, 1, (self.canvas_height/true_blocks[1])-2)
elif op == 'x':
self.our_heroine.multiply(a, b, 2, 5)
elif op == '/':
with self.our_heroine.do_half:
self.our_heroine.divide(a, b, 2, 11)
for i in range(2):
self.appearance_menu.entryconfig(i, state=tkinter.NORMAL)
if __name__ == "__main__":
TurtleArithmetic()
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Exception classes - Subclassing allows you to check for specific errors
#
class BotoClientError(Exception):
def __init__(self, reason):
self.reason = reason
def __repr__(self):
return 'S3Error: %s' % self.reason
def __str__(self):
return 'S3Error: %s' % self.reason
class S3PermissionsError(BotoClientError):
pass
class BotoServerError(Exception):
def __init__(self, status, reason, body=''):
self.status = status
self.reason = reason
self.body = body
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
class S3CreateError(BotoServerError):
pass
class SQSError(BotoServerError):
pass
class S3ResponseError(BotoServerError):
pass
class EC2ResponseError(BotoServerError):
pass
class SDBResponseError(BotoServerError):
pass
class AWSConnectionError(BotoClientError):
pass
class S3DataError(BotoClientError):
pass
Adding an exception for the SDB persistance stuff
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Exception classes - Subclassing allows you to check for specific errors
#
class BotoClientError(Exception):
def __init__(self, reason):
self.reason = reason
def __repr__(self):
return 'S3Error: %s' % self.reason
def __str__(self):
return 'S3Error: %s' % self.reason
class SDBPersistanceError(Exception):
pass
class S3PermissionsError(BotoClientError):
pass
class BotoServerError(Exception):
def __init__(self, status, reason, body=''):
self.status = status
self.reason = reason
self.body = body
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
class S3CreateError(BotoServerError):
pass
class SQSError(BotoServerError):
pass
class S3ResponseError(BotoServerError):
pass
class EC2ResponseError(BotoServerError):
pass
class SDBResponseError(BotoServerError):
pass
class AWSConnectionError(BotoClientError):
pass
class S3DataError(BotoClientError):
pass
|
#
# handler.py
# Lamba functions for scheduling check-ins via the Southwest API
#
import json
import logging
import os
import sys
import uuid
import boto3
# Add vendored dependencies to path
sys.path.append('./vendor')
from lib import swa, email, exceptions # NOQA
# Set up logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def schedule_check_in(event, context):
"""
This function serves two purposes:
1) For new executions, it looks up the reservation via the Southwest
API and returns the check-in times (described below).
2) In the event there are multiple check-ins, this function is called
again by the AWS Step state machine to schedule the next available
check-in time. It does this by popping a value from
`check_in_times.remaining` into `check_in_times.next`.
Returns a dictionary of the next and remaining check-in times in RFC 3339
format. Ex:
{
"check_in_times": {
"next": "2017-05-06T20:40:00-04:00",
"remaining": [
"2017-05-12T20:40:00-04:00",
"2017-05-09T20:40:00-04:00"
]
}
}
"""
# We already have the check-in times, just schedule the next one.
if 'check_in_times' in event:
event['check_in_times']['next'] = \
event['check_in_times']['remaining'].pop()
return event
# New check-in, fetch reservation
first_name = event['first_name']
last_name = event['last_name']
confirmation_number = event['confirmation_number']
email = event['email']
log.info("Looking up reservation {} for {} {}".format(confirmation_number,
first_name, last_name))
reservation = swa.Reservation.from_passenger_info(
first_name, last_name, confirmation_number
)
log.debug("Reservation: {}".format(reservation))
result = {
'check_in_times': {
'remaining': reservation.check_in_times,
},
'passengers': reservation.passengers,
'confirmation_number': confirmation_number,
'email': email
}
# Call ourself now that we have some check-in times.
return schedule_check_in(result, None)
def check_in(event, context):
"""
This function is triggered at check-in time and completes the check-in via
the Southwest API and emails the reservation, if requested.
"""
first_name = event['first_name']
last_name = event['last_name']
confirmation_number = event['confirmation_number']
email = event.get('email')
log.info("Checking in {} {} ({})".format(first_name, last_name,
confirmation_number))
try:
resp = swa.check_in(first_name, last_name, confirmation_number)
log.info("Checked in {} {}!".format(first_name, last_name))
log.debug("Check-in response: {}".format(resp))
except Exception as e:
log.error("Error checking in: {}".format(e))
raise
if email:
log.info("Emailing boarding pass to {}".format(email))
try:
swa.email_boarding_pass(
first_name, last_name, confirmation_number, email
)
except Exception as e:
log.error("Error emailing boarding pass: {}".format(e))
# Raise exception to schedule the next check-in
# This is caught by AWS Step and then schedule_check_in is called again
if len(event['check_in_times']['remaining']) > 0:
raise exceptions.NotLastCheckIn()
def _get_sfn_execution_name(reservation):
"""
Generate a human-readable execution named composed of the passenger's
first and last name follwed by a UUID
"""
name = "{}-{}-{}".format(
reservation['last_name'].lower(),
reservation['first_name'].lower(),
uuid.uuid4()
)
return name
def receive_email(event, context):
"""
This function is triggered when as an SES Action when a new e-mail is
received. It scrapes the email to find the name and confirmation
number of the passenger to check-in, and then executes the AWS Step
state machine provided in the `STATE_MACHINE_ARN` environment variable.
"""
sfn = boto3.client('stepfunctions')
ses_notification = event['Records'][0]['ses']
# ARN of the AWS Step State Machine to execute when an email
# is successfully parsed and a new check-in should run.
state_machine_arn = os.getenv('STATE_MACHINE_ARN')
log.debug("State Machine ARN: {}".format(state_machine_arn))
log.debug("SES Notification: {}".format(ses_notification))
ses_msg = email.SesMailNotification(ses_notification['mail'])
try:
reservation = email.find_name_and_confirmation_number(ses_msg)
log.info("Found reservation: {}".format(reservation))
except Exception as e:
log.error("Error scraping email {}: {}".format(ses_msg.message_id, e))
return
# Don't add the email if it's straight from southwest.com
if not ses_msg.source.endswith('southwest.com'):
reservation['email'] = ses_msg.source
execution = sfn.start_execution(
stateMachineArn=state_machine_arn,
name=_get_sfn_execution_name(reservation),
input=json.dumps(reservation)
)
log.debug("State machine started at: {}".format(execution['startDate']))
log.debug("Execution ARN: {}".format(execution['executionArn']))
# Remove the startDate from the return value because datetime objects don't
# easily serialize to JSON.
del(execution['startDate'])
return execution
Add checkin support for multi-passengers
#
# handler.py
# Lamba functions for scheduling check-ins via the Southwest API
#
import json
import logging
import os
import sys
import uuid
import boto3
# Add vendored dependencies to path
sys.path.append('./vendor')
from lib import swa, email, exceptions # NOQA
# Set up logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def schedule_check_in(event, context):
"""
This function serves two purposes:
1) For new executions, it looks up the reservation via the Southwest
API and returns the check-in times (described below).
2) In the event there are multiple check-ins, this function is called
again by the AWS Step state machine to schedule the next available
check-in time. It does this by popping a value from
`check_in_times.remaining` into `check_in_times.next`.
Returns a dictionary of the next and remaining check-in times in RFC 3339
format. Ex:
{
"check_in_times": {
"next": "2017-05-06T20:40:00-04:00",
"remaining": [
"2017-05-12T20:40:00-04:00",
"2017-05-09T20:40:00-04:00"
]
}
}
"""
# We already have the check-in times, just schedule the next one.
if 'check_in_times' in event:
event['check_in_times']['next'] = \
event['check_in_times']['remaining'].pop()
return event
# New check-in, fetch reservation
first_name = event['first_name']
last_name = event['last_name']
confirmation_number = event['confirmation_number']
email = event['email']
log.info("Looking up reservation {} for {} {}".format(confirmation_number,
first_name, last_name))
reservation = swa.Reservation.from_passenger_info(
first_name, last_name, confirmation_number
)
log.debug("Reservation: {}".format(reservation))
result = {
'check_in_times': {
'remaining': reservation.check_in_times,
},
'passengers': reservation.passengers,
'confirmation_number': confirmation_number,
'email': email
}
# Call ourself now that we have some check-in times.
return schedule_check_in(result, None)
def check_in(event, context):
"""
This function is triggered at check-in time and completes the check-in via
the Southwest API and emails the reservation, if requested.
"""
confirmation_number = event['confirmation_number']
email = event['email']
# Support older check-ins which did not support multiple passengers
if 'passengers' not in event:
event['passengers'] = [
(event['first_name'], event['last_name'])
]
for first_name, last_name in event['passengers']:
log.info("Checking in {} {} ({})".format(
first_name, last_name, confirmation_number
))
try:
resp = swa.check_in(first_name, last_name, confirmation_number)
log.info("Checked in {} {}!".format(first_name, last_name))
log.debug("Check-in response: {}".format(resp))
except Exception as e:
log.error("Error checking in: {}".format(e))
raise
log.info("Emailing boarding pass to {}".format(email))
try:
swa.email_boarding_pass(
first_name, last_name, confirmation_number, email
)
except Exception as e:
log.error("Error emailing boarding pass: {}".format(e))
# Raise exception to schedule the next check-in
# This is caught by AWS Step and then schedule_check_in is called again
if len(event['check_in_times']['remaining']) > 0:
raise exceptions.NotLastCheckIn()
def _get_sfn_execution_name(reservation):
"""
Generate a human-readable execution named composed of the passenger's
first and last name follwed by a UUID
"""
name = "{}-{}-{}".format(
reservation['last_name'].lower(),
reservation['first_name'].lower(),
uuid.uuid4()
)
return name
def receive_email(event, context):
"""
This function is triggered when as an SES Action when a new e-mail is
received. It scrapes the email to find the name and confirmation
number of the passenger to check-in, and then executes the AWS Step
state machine provided in the `STATE_MACHINE_ARN` environment variable.
"""
sfn = boto3.client('stepfunctions')
ses_notification = event['Records'][0]['ses']
# ARN of the AWS Step State Machine to execute when an email
# is successfully parsed and a new check-in should run.
state_machine_arn = os.getenv('STATE_MACHINE_ARN')
log.debug("State Machine ARN: {}".format(state_machine_arn))
log.debug("SES Notification: {}".format(ses_notification))
ses_msg = email.SesMailNotification(ses_notification['mail'])
try:
reservation = email.find_name_and_confirmation_number(ses_msg)
log.info("Found reservation: {}".format(reservation))
except Exception as e:
log.error("Error scraping email {}: {}".format(ses_msg.message_id, e))
return
# Don't add the email if it's straight from southwest.com
if not ses_msg.source.endswith('southwest.com'):
reservation['email'] = ses_msg.source
execution = sfn.start_execution(
stateMachineArn=state_machine_arn,
name=_get_sfn_execution_name(reservation),
input=json.dumps(reservation)
)
log.debug("State machine started at: {}".format(execution['startDate']))
log.debug("Execution ARN: {}".format(execution['executionArn']))
# Remove the startDate from the return value because datetime objects don't
# easily serialize to JSON.
del(execution['startDate'])
return execution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.