gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar location: Required. Resource Location.
:vartype location: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword location: Required. Resource Location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.location = location
self.type = None
self.tags = tags
class Certificate(Resource):
"""SSL certificate for an app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar location: Required. Resource Location.
:vartype location: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:ivar host_names: Host names the certificate applies to.
:vartype host_names: list[str]
:ivar pfx_blob: Pfx blob.
:vartype pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:ivar password: Certificate password.
:vartype password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2018_11_01.models.HostingEnvironmentProfile
:ivar key_vault_id: Key Vault Csm resource Id.
:vartype key_vault_id: str
:ivar key_vault_secret_name: Key Vault secret name.
:vartype key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2018_11_01.models.KeyVaultSecretStatus
:ivar server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:vartype server_farm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword location: Required. Resource Location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword host_names: Host names the certificate applies to.
:paramtype host_names: list[str]
:keyword pfx_blob: Pfx blob.
:paramtype pfx_blob: bytearray
:keyword password: Certificate password.
:paramtype password: str
:keyword key_vault_id: Key Vault Csm resource Id.
:paramtype key_vault_id: str
:keyword key_vault_secret_name: Key Vault secret name.
:paramtype key_vault_secret_name: str
:keyword server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:paramtype server_farm_id: str
"""
super(Certificate, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
class CertificateCollection(msrest.serialization.Model):
"""Collection of certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. Collection of resources.
:vartype value: list[~azure.mgmt.web.v2018_11_01.models.Certificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Certificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Certificate"],
**kwargs
):
"""
:keyword value: Required. Collection of resources.
:paramtype value: list[~azure.mgmt.web.v2018_11_01.models.Certificate]
"""
super(CertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProxyOnlyResource(msrest.serialization.Model):
"""Azure proxy only resource. This resource is not tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
"""
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.type = None
class CertificatePatchResource(ProxyOnlyResource):
"""ARM resource for a certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar type: Resource type.
:vartype type: str
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:ivar host_names: Host names the certificate applies to.
:vartype host_names: list[str]
:ivar pfx_blob: Pfx blob.
:vartype pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:ivar password: Certificate password.
:vartype password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2018_11_01.models.HostingEnvironmentProfile
:ivar key_vault_id: Key Vault Csm resource Id.
:vartype key_vault_id: str
:ivar key_vault_secret_name: Key Vault secret name.
:vartype key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2018_11_01.models.KeyVaultSecretStatus
:ivar server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:vartype server_farm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword host_names: Host names the certificate applies to.
:paramtype host_names: list[str]
:keyword pfx_blob: Pfx blob.
:paramtype pfx_blob: bytearray
:keyword password: Certificate password.
:paramtype password: str
:keyword key_vault_id: Key Vault Csm resource Id.
:paramtype key_vault_id: str
:keyword key_vault_secret_name: Key Vault secret name.
:paramtype key_vault_secret_name: str
:keyword server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:paramtype server_farm_id: str
"""
super(CertificatePatchResource, self).__init__(kind=kind, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
class DefaultErrorResponse(msrest.serialization.Model):
"""App Service error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: Error model.
:vartype error: ~azure.mgmt.web.v2018_11_01.models.DefaultErrorResponseError
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'DefaultErrorResponseError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(DefaultErrorResponse, self).__init__(**kwargs)
self.error = None
class DefaultErrorResponseError(msrest.serialization.Model):
"""Error model.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
:ivar details:
:vartype details: list[~azure.mgmt.web.v2018_11_01.models.DefaultErrorResponseErrorDetailsItem]
:ivar innererror: More information to debug error.
:vartype innererror: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'innererror': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[DefaultErrorResponseErrorDetailsItem]'},
'innererror': {'key': 'innererror', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["DefaultErrorResponseErrorDetailsItem"]] = None,
**kwargs
):
"""
:keyword details:
:paramtype details:
list[~azure.mgmt.web.v2018_11_01.models.DefaultErrorResponseErrorDetailsItem]
"""
super(DefaultErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = details
self.innererror = None
class DefaultErrorResponseErrorDetailsItem(msrest.serialization.Model):
"""Detailed errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(DefaultErrorResponseErrorDetailsItem, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID of the App Service Environment.
:vartype id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource ID of the App Service Environment.
:paramtype id: str
"""
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for management of individual students' peer review assignments."""
__author__ = 'Sean Lip (sll@google.com)'
import os
import urllib
import jinja2
import messages
from controllers.lessons import create_readonly_assessment_params
from controllers.utils import ApplicationHandler
from models import courses
from models import models
from models import review
from models import roles
from models import student_work
from models import transforms
from modules.review import domain
class AssignmentsRights(object):
"""Manages view/edit rights for assignments and reviews."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class AssignmentManager(ApplicationHandler):
"""A view for managing human-reviewed assignments."""
def get_assignment_html(
self, peer_reviewed_units, unit_id=None, reviewee_id=None,
error_msg=None, readonly_assessment=None, review_steps=None,
reviewers=None, reviews_params=None, model_version=None):
"""Renders a template allowing an admin to select an assignment."""
edit_url = self.canonicalize_url('/dashboard')
return jinja2.utils.Markup(self.get_template(
'assignments_menu.html', [os.path.dirname(__file__)]
).render({
'REVIEW_STATE_COMPLETED': domain.REVIEW_STATE_COMPLETED,
'add_reviewer_action': self.get_action_url('add_reviewer'),
'add_reviewer_xsrf_token': self.create_xsrf_token('add_reviewer'),
'delete_reviewer_action': self.get_action_url('delete_reviewer'),
'delete_reviewer_xsrf_token': self.create_xsrf_token(
'delete_reviewer'),
'edit_assignment_action': 'edit_assignment',
'edit_url': edit_url,
'error_msg': error_msg,
'peer_reviewed_units': peer_reviewed_units,
'readonly_student_assessment': readonly_assessment,
'reviewee_id': reviewee_id or '',
'reviewers': reviewers,
'reviews_params': reviews_params,
'review_steps': review_steps,
'unit_id': unit_id,
'model_version': model_version
}, autoescape=True))
def parse_request(self, course, unit_id, reviewee_id, reviewer_id=None):
"""Parses request parameters in a GET or POST request.
Args:
course: Course. A course object.
unit_id: str. The id of the unit.
reviewee_id: str. The email address of the reviewee.
reviewer_id: str. The email address of the reviewer.
Returns:
- a dict containing some subset of the following keys: unit,
reviewee, reviewer.
- if necessary, an error message to be passed to the frontend.
"""
request_params = {}
# Check unit validity.
if not unit_id:
return request_params, ''
unit = course.find_unit_by_id(unit_id)
if not unit:
return request_params, '404: Unit not found.'
if (unit.workflow.get_grader() != courses.HUMAN_GRADER or
unit.workflow.get_matcher() != review.PEER_MATCHER):
return request_params, '412: This unit is not peer-graded.'
request_params['unit'] = unit
# Check reviewee validity.
if not reviewee_id:
return request_params, '412: No student email supplied.'
reviewee = models.Student.get_enrolled_student_by_email(reviewee_id)
if not reviewee:
return (request_params,
'412: No student with this email address exists.')
request_params['reviewee'] = reviewee
# Check reviewer validity, if applicable.
if reviewer_id is not None:
if not reviewer_id:
return request_params, '412: No reviewer email supplied.'
reviewer = models.Student.get_enrolled_student_by_email(reviewer_id)
if not reviewer:
return (request_params,
'412: No reviewer with this email address exists.')
request_params['reviewer'] = reviewer
return request_params, ''
def get_edit_assignment(self):
"""Shows interface for selecting and viewing a student assignment."""
if not AssignmentsRights.can_view(self):
self.error(401)
return
course = courses.Course(self)
peer_reviewed_units = course.get_peer_reviewed_units()
template_values = {}
template_values['page_title'] = self.format_title('Peer Review')
template_values['page_description'] = (
messages.ASSIGNMENTS_MENU_DESCRIPTION)
unit_id = self.request.get('unit_id')
if not unit_id:
# No unit has been set yet, so display an empty form.
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units)
self.render_page(template_values)
return
reviewee_id = self.request.get('reviewee_id')
# This field may be populated due to a redirect from a POST method.
post_error_msg = self.request.get('post_error_msg')
request_params, error_msg = self.parse_request(
course, unit_id, reviewee_id)
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
if error_msg:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg=error_msg)
self.render_page(template_values)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
get_readonly_assessment = self.get_readonly_assessment_1_4
get_readonly_review = self.get_readonly_review_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
get_readonly_assessment = self.get_readonly_assessment_1_5
get_readonly_review = self.get_readonly_review_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
# Render content.
rp = course.get_reviews_processor()
submission_and_review_steps = rp.get_submission_and_review_steps(
unit.unit_id, reviewee.get_key())
if not submission_and_review_steps:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg='412: This student hasn\'t submitted the assignment.'
)
self.render_page(template_values)
return
readonly_assessment = get_readonly_assessment(
unit, submission_and_review_steps[0])
review_steps = submission_and_review_steps[1]
reviews = rp.get_reviews_by_keys(
unit.unit_id,
[review_step.review_key for review_step in review_steps],
handle_empty_keys=True)
reviews_params = []
reviewers = []
for idx, review_step in enumerate(review_steps):
params = get_readonly_review(unit, reviews[idx])
reviews_params.append(params)
reviewer = models.Student.get_student_by_user_id(
review_step.reviewer_key.name()).key().name()
reviewers.append(reviewer)
assert len(reviewers) == len(review_steps)
assert len(reviews_params) == len(review_steps)
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
readonly_assessment=readonly_assessment, review_steps=review_steps,
error_msg=post_error_msg, reviewers=reviewers,
reviews_params=reviews_params,
model_version=model_version)
self.render_page(template_values)
def get_readonly_assessment_1_4(self, unit, submission_content):
return create_readonly_assessment_params(
courses.Course(self).get_assessment_content(unit),
student_work.StudentWorkUtils.get_answer_list(submission_content))
def get_readonly_assessment_1_5(self, unit, submission_content):
return {
'content': unit.html_content,
'saved_answers': transforms.dumps(submission_content)
}
def get_readonly_review_1_4(self, unit, review_content):
return create_readonly_assessment_params(
courses.Course(self).get_review_form_content(unit),
student_work.StudentWorkUtils.get_answer_list(review_content))
def get_readonly_review_1_5(self, unit, review_content):
return {
'content': unit.html_review_form,
'saved_answers': transforms.dumps(review_content)
}
def post_add_reviewer(self):
"""Adds a new reviewer to a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
reviewer_id = self.request.get('reviewer_id')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id, reviewer_id=reviewer_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'reviewer_id': reviewer_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
reviewer = request_params.get('reviewer')
rp = course.get_reviews_processor()
reviewee_key = reviewee.get_key()
reviewer_key = reviewer.get_key()
try:
rp.add_reviewer(unit.unit_id, reviewee_key, reviewer_key)
except domain.TransitionError:
redirect_params['post_error_msg'] = (
'412: The reviewer is already assigned to this submission.')
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
def post_delete_reviewer(self):
"""Deletes a reviewer from a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
review_step_key = self.request.get('key')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
rp = course.get_reviews_processor()
unit = request_params.get('unit')
rp.delete_reviewer(unit.unit_id, review_step_key)
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import base64
import binascii
import os
import shutil
import tarfile
import tempfile
import boto.s3.connection
import eventlet
from lxml import etree
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import utils
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='parent dir for tempdir used for image decryption'),
cfg.StrOpt('s3_access_key',
default='notchecked',
help='access key to use for s3 server for images'),
cfg.StrOpt('s3_secret_key',
default='notchecked',
help='secret key to use for s3 server for images'),
cfg.BoolOpt('s3_use_ssl',
default=False,
help='whether to use ssl when talking to s3'),
cfg.BoolOpt('s3_affix_tenant',
default=False,
help='whether to affix the tenant id to the access key '
'when downloading from s3'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(s3_opts)
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
image_copy = image.copy()
try:
image_uuid = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.glance_id_to_id(context, image_uuid)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_id = ec2utils.glance_id_to_id(context, image_uuid)
image_copy['properties'][prop] = image_id
return image_copy
def _translate_id_to_uuid(self, context, image):
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.id_to_glance_id(context, image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_id = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image_copy['properties'][prop] = image_uuid
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
metadata = self._translate_id_to_uuid(context, metadata)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def index(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.index(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def detail(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.detail(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
def show_by_name(self, context, name):
image = self.service.show_by_name(context, name)
return self._translate_uuid_to_id(context, image)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
if FLAGS.s3_affix_tenant:
access = '%s:%s' % (access, context.project_id)
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=FLAGS.s3_use_ssl,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = etree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['architecture'] = arch
def _translate_dependent_image_id(image_key, image_id):
image_uuid = ec2utils.ec2_id_to_glance_id(context, image_id)
properties[image_key] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
#TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
image_id = metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = ec2utils.glance_id_to_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifest from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
context.update_store()
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_uuid, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_uuid, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(context, enc_filename, encrypted_key,
encrypted_iv, dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_uuid, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_uuid, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_uuid,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_uuid, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
def _decrypt_image(self, context, encrypted_filename, encrypted_key,
encrypted_iv, decrypted_filename):
elevated = context.elevated()
try:
key = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_key))
except Exception, exc:
msg = _('Failed to decrypt private key: %s') % exc
raise exception.NovaException(msg)
try:
iv = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_iv))
except Exception, exc:
raise exception.NovaException(_('Failed to decrypt initialization '
'vector: %s') % exc)
try:
utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,))
except exception.ProcessExecutionError, exc:
raise exception.NovaException(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': exc.stdout})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path"""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.NovaException(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
|
|
#!/usr/bin/env python
from argparse import ArgumentParser
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TransactionTestCase, TestCase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils._os import upath
from django.utils import six
warnings.simplefilter("default", RemovedInDjango19Warning)
warnings.simplefilter("default", RemovedInDjango20Warning)
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'data',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
def get_test_modules():
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
if HAS_SPATIAL_DB:
discovery_paths.append(
('django.contrib.gis.tests', os.path.join(CONTRIB_DIR, 'gis', 'tests'))
)
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
if not connection.vendor == 'postgresql' and f == 'postgres_tests':
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
# Ensure the middleware classes are seen as overridden otherwise we get a compatibility warning.
settings._explicit_settings.add('MIDDLEWARE_CLASSES')
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'django.contrib.contenttypes.tests.migrations',
}
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 2.0.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
RemovedInDjango19Warning
)
warnings.filterwarnings(
'ignore',
'IPAddressField has been deprecated. Use GenericIPAddressField instead.',
RemovedInDjango19Warning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options = parser.parse_args()
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.modules)
if failures:
sys.exit(bool(failures))
|
|
# graphicsUtils.py
# ----------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import sys
import math
import random
import string
import time
import types
import tkinter as tk
_Windows = sys.platform == 'win32' # True if on Win95/98/NT
_root_window = tk.Tk() # The root window for graphics output
_canvas = None # The canvas which holds graphics
_canvas_xs = None # Size of canvas object
_canvas_ys = None
_canvas_x = None # Current position on canvas
_canvas_y = None
_canvas_col = None # Current colour (set to black below)
_canvas_tsize = 12
_canvas_tserifs = 0
def formatColor(r, g, b):
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def colorToVector(color):
return [int(x, 16) / 256.0 for x in [color[1:3], color[3:5], color[5:7]]]
if _Windows:
_canvas_tfonts = ['times new roman', 'lucida console']
else:
_canvas_tfonts = ['times', 'lucidasans-24']
pass # XXX need defaults here
def sleep(secs):
global _root_window
if _root_window == None:
time.sleep(secs)
else:
_root_window.update_idletasks()
_root_window.after(int(1000 * secs), _root_window.quit)
_root_window.mainloop()
def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
# Check for duplicate call
if _root_window is not None:
# Lose the window.
_root_window.destroy()
# Save the canvas size parameters
_canvas_xs, _canvas_ys = width - 1, height - 1
_canvas_x, _canvas_y = 0, _canvas_ys
_bg_color = color
# Create the root window
_root_window = tk.Tk()
_root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
_root_window.title(title or 'Graphics Window')
_root_window.resizable(0, 0)
# Create the canvas object
try:
_canvas = tk.Canvas(_root_window, width=width, height=height)
_canvas.pack()
draw_background()
_canvas.update()
except:
_root_window = None
raise
# Bind to key-down and key-up events
_root_window.bind( "<KeyPress>", _keypress )
_root_window.bind( "<KeyRelease>", _keyrelease )
_root_window.bind( "<FocusIn>", _clear_keys )
_root_window.bind( "<FocusOut>", _clear_keys )
_root_window.bind( "<Button-1>", _leftclick )
_root_window.bind( "<Button-2>", _rightclick )
_root_window.bind( "<Button-3>", _rightclick )
_root_window.bind( "<Control-Button-1>", _ctrl_leftclick)
_clear_keys()
_leftclick_loc = None
_rightclick_loc = None
_ctrl_leftclick_loc = None
def _leftclick(event):
global _leftclick_loc
_leftclick_loc = (event.x, event.y)
def _rightclick(event):
global _rightclick_loc
_rightclick_loc = (event.x, event.y)
def _ctrl_leftclick(event):
global _ctrl_leftclick_loc
_ctrl_leftclick_loc = (event.x, event.y)
def wait_for_click():
while True:
global _leftclick_loc
global _rightclick_loc
global _ctrl_leftclick_loc
if _leftclick_loc != None:
val = _leftclick_loc
_leftclick_loc = None
return val, 'left'
if _rightclick_loc != None:
val = _rightclick_loc
_rightclick_loc = None
return val, 'right'
if _ctrl_leftclick_loc != None:
val = _ctrl_leftclick_loc
_ctrl_leftclick_loc = None
return val, 'ctrl_left'
sleep(0.05)
def draw_background():
corners = [(0,0), (0, _canvas_ys), (_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
polygon(corners, _bg_color, fillColor=_bg_color, filled=True, smoothed=False)
def _destroy_window(event=None):
sys.exit(0)
# global _root_window
# _root_window.destroy()
# _root_window = None
#print "DESTROY"
def end_graphics():
global _root_window, _canvas, _mouse_enabled
try:
try:
sleep(1)
if _root_window != None:
_root_window.destroy()
except SystemExit as e:
print(('Ending graphics raised an exception:', e))
finally:
_root_window = None
_canvas = None
_mouse_enabled = 0
_clear_keys()
def clear_screen(background=None):
global _canvas_x, _canvas_y
_canvas.delete('all')
draw_background()
_canvas_x, _canvas_y = 0, _canvas_ys
def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
c = []
for coord in coords:
c.append(coord[0])
c.append(coord[1])
if fillColor == None: fillColor = outlineColor
if filled == 0: fillColor = ""
poly = _canvas.create_polygon(c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
if behind > 0:
_canvas.tag_lower(poly, behind) # Higher should be more visible
return poly
def square(pos, r, color, filled=1, behind=0):
x, y = pos
coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
return polygon(coords, color, color, filled, 0, behind=behind)
def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
x, y = pos
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
extent=e[1] - e[0], start=e[0], style=style, width=width)
def image(pos, file="../../blueghost.gif"):
x, y = pos
# img = PhotoImage(file=file)
return _canvas.create_image(x, y, image = tk.PhotoImage(file=file), anchor = tk.NW)
def refresh():
_canvas.update_idletasks()
def moveCircle(id, pos, r, endpoints=None):
global _canvas_x, _canvas_y
x, y = pos
# x0, x1 = x - r, x + r + 1
# y0, y1 = y - r, y + r + 1
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
move_to(id, x0, y0)
def edit(id, *args):
_canvas.itemconfigure(id, **dict(args))
def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
global _canvas_x, _canvas_y
x, y = pos
font = (font, str(size), style)
return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
def changeText(id, newText, font=None, size=12, style='normal'):
_canvas.itemconfigure(id, text=newText)
if font != None:
_canvas.itemconfigure(id, font=(font, '-%d' % size, style))
def changeColor(id, newColor):
_canvas.itemconfigure(id, fill=newColor)
def line(here, there, color=formatColor(0, 0, 0), width=2):
x0, y0 = here[0], here[1]
x1, y1 = there[0], there[1]
return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
##############################################################################
### Keypress handling ########################################################
##############################################################################
# We bind to key-down and key-up events.
_keysdown = {}
_keyswaiting = {}
# This holds an unprocessed key release. We delay key releases by up to
# one call to keys_pressed() to get round a problem with auto repeat.
_got_release = None
def _keypress(event):
global _got_release
#remap_arrows(event)
_keysdown[event.keysym] = 1
_keyswaiting[event.keysym] = 1
# print event.char, event.keycode
_got_release = None
def _keyrelease(event):
global _got_release
#remap_arrows(event)
try:
del _keysdown[event.keysym]
except:
pass
_got_release = 1
def remap_arrows(event):
# TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
if event.char in ['a', 's', 'd', 'w']:
return
if event.keycode in [37, 101]: # LEFT ARROW (win / x)
event.char = 'a'
if event.keycode in [38, 99]: # UP ARROW
event.char = 'w'
if event.keycode in [39, 102]: # RIGHT ARROW
event.char = 'd'
if event.keycode in [40, 104]: # DOWN ARROW
event.char = 's'
def _clear_keys(event=None):
global _keysdown, _got_release, _keyswaiting
_keysdown = {}
_keyswaiting = {}
_got_release = None
def keys_pressed(d_o_e=_root_window.dooneevent,
d_w=tk._tkinter.DONT_WAIT):
d_o_e(d_w)
if _got_release:
d_o_e(d_w)
return list(_keysdown.keys())
def keys_waiting():
global _keyswaiting
keys = list(_keyswaiting.keys())
_keyswaiting = {}
return keys
# Block for a list of keys...
def wait_for_keys():
keys = []
while keys == []:
keys = keys_pressed()
sleep(0.05)
return keys
def remove_from_screen(x,
d_o_e=_root_window.dooneevent,
d_w=tk._tkinter.DONT_WAIT):
_canvas.delete(x)
d_o_e(d_w)
def _adjust_coords(coord_list, x, y):
for i in range(0, len(coord_list), 2):
coord_list[i] = coord_list[i] + x
coord_list[i + 1] = coord_list[i + 1] + y
return coord_list
def move_to(object, x, y=None,
d_o_e=_root_window.dooneevent,
d_w=tk._tkinter.DONT_WAIT):
if y is None:
try: x, y = x
except: raise Exception('incomprehensible coordinates')
horiz = True
newCoords = []
current_x, current_y = _canvas.coords(object)[0:2] # first point
for coord in _canvas.coords(object):
if horiz:
inc = x - current_x
else:
inc = y - current_y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def move_by(object, x, y=None,
d_o_e=_root_window.dooneevent,
d_w=tk._tkinter.DONT_WAIT):
if y is None:
try: x, y = x
except: raise Exception('incomprehensible coordinates')
horiz = True
newCoords = []
for coord in _canvas.coords(object):
if horiz:
inc = x
else:
inc = y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def writePostscript(filename):
"Writes the current canvas to a postscript file."
psfile = file(filename, 'w')
psfile.write(_canvas.postscript(pageanchor='sw',
y='0.c',
x='0.c'))
psfile.close()
ghost_shape = [
(0, - 0.5),
(0.25, - 0.75),
(0.5, - 0.5),
(0.75, - 0.75),
(0.75, 0.5),
(0.5, 0.75),
(- 0.5, 0.75),
(- 0.75, 0.5),
(- 0.75, - 0.75),
(- 0.5, - 0.5),
(- 0.25, - 0.75)
]
if __name__ == '__main__':
begin_graphics()
clear_screen()
ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
g = polygon(ghost_shape, formatColor(1, 1, 1))
move_to(g, (50, 50))
circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
sleep(2)
|
|
#!/usr/bin/env python
"""Tests reparenting is picked up in topology."""
import json
import threading
import time
import numpy
import logging
import base_end2end_test
import utils
import vtctl_helper
from vtproto import topodata_pb2
from vttest import sharding_utils
def setUpModule():
pass
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.kill_sub_processes()
utils.remove_tmp_files()
class ReparentTest(base_end2end_test.BaseEnd2EndTest):
@classmethod
def setUpClass(cls):
super(ReparentTest, cls).setUpClass()
# number of reparent iterations
cls.num_reparents = int(cls.test_params.get('num_reparents', '1'))
# max allowable median master downtime in seconds
cls.master_downtime_threshold = int(cls.test_params.get(
'master_downtime_threshold', '20'))
# seconds to wait for reparent to result in a new master
cls.reparent_timeout_threshold = int(cls.test_params.get(
'reparent_timeout_threshold', '300'))
@classmethod
def tearDownClass(cls):
logging.info('Tearing down ReparentTest, verifying tablets are healthy.')
cls.env.wait_for_healthy_tablets()
super(ReparentTest, cls).tearDownClass()
@classmethod
def create_default_local_environment(cls):
pass
def explicit_reparent(self, keyspace, num_shards, external=False,
cross_cell=False):
"""Performs an explicit reparent.
This function will explicitly select a new master and verify that the
topology is updated.
Args:
keyspace: Name of the keyspace to reparent (string)
num_shards: Total number of shards (int)
external: Whether the reparent should be external or through vtctl (bool)
cross_cell: Whether to reparent to a different cell (bool)
Returns:
How long we waited for the reparent.
The time begins just before calling an explicit reparent.
This is a list of floats, one for each shard.
For cross-cell reparents, it returns [].
"""
next_masters = []
durations = []
for shard in xrange(num_shards):
shard_name = sharding_utils.get_shard_name(shard, num_shards)
original_master = self.env.get_current_master_name(keyspace, shard_name)
next_master = self.env.get_next_master(keyspace, shard_name, cross_cell)
next_masters.append(next_master)
self.env.wait_for_good_failover_status(keyspace, shard_name)
# Call Reparent in a separate thread.
def reparent_shard(shard, shard_name, original_master, next_master):
logging.info('Reparenting %s/%s from %s to %s', keyspace, shard_name,
original_master, next_master[2])
if external:
return_code, return_output = self.env.external_reparent(
keyspace, next_master[0], shard, new_task_num=next_master[1])
else:
return_code, return_output = self.env.internal_reparent(
keyspace, shard_name, next_master[2])
logging.info('Reparent returned %d for %s/%s: %s',
return_code, keyspace, shard_name, return_output)
thread = threading.Thread(target=reparent_shard,
args=[shard, shard_name, original_master,
next_master])
start_time = time.time()
thread.start()
# Wait for the reparent.
while time.time() - start_time < self.reparent_timeout_threshold:
try:
tablet_health = json.loads(
self.env.vtctl_helper.execute_vtctl_command(
['VtTabletStreamHealth', next_master[2]]))
if tablet_health['target']['tablet_type'] == topodata_pb2.MASTER:
duration = time.time() - start_time
durations.append(duration)
logging.info('Reparent took %f seconds', duration)
break
except (IndexError, KeyError, vtctl_helper.VtctlClientError):
pass
else:
self.fail('Timed out waiting for reparent on %s/%s' % (
keyspace, shard_name))
thread.join()
return durations
def implicit_reparent(self, keyspace, shard, num_shards):
"""Performs an implicit reparent.
This function will call borg restart on the current master task and
verify that a new task was selected to be the master.
Args:
keyspace: Name of the keyspace to reparent (string)
shard: Numeric ID of the shard to reparent (zero based int)
num_shards: Total number of shards (int)
"""
shard_name = sharding_utils.get_shard_name(shard, num_shards)
original_master_name = (
self.env.get_current_master_name(keyspace, shard_name))
original_master_cell = self.env.get_tablet_cell(original_master_name)
master_task_num = self.env.get_tablet_task_number(original_master_name)
logging.info('Restarting %s/%s, current master: %s, task: %d',
keyspace, shard_name, original_master_name, master_task_num)
ret_val = self.env.restart_mysql_task(
original_master_cell, keyspace, shard, master_task_num, 'replica',
'mysql-alloc', True)
self.assertEquals(ret_val, 0,
msg='restartalloc failed (returned %d)' % ret_val)
start_time = time.time()
while time.time() - start_time < self.reparent_timeout_threshold:
new_master_name = self.env.get_current_master_name(keyspace, shard_name)
new_master_task_num = self.env.get_tablet_task_number(new_master_name)
if new_master_name != original_master_name:
break
time.sleep(1)
self.assertNotEquals(
new_master_name, original_master_name,
msg='Expected master tablet to change, but it remained as %s' % (
new_master_name))
logging.info('restartalloc on %s/%s resulted in new master: %s, task: %d',
keyspace, shard_name, new_master_name, new_master_task_num)
# TODO(thompsonja): re-enable this test after Orchestrator integration
def _test_implicit_reparent(self):
logging.info('Performing %s implicit reparents', self.num_reparents)
for attempt in xrange(1, self.num_reparents + 1):
logging.info('Implicit reparent iteration number %d of %d', attempt,
self.num_reparents)
for keyspace, num_shards in zip(self.env.keyspaces, self.env.num_shards):
for shard in xrange(num_shards):
self.implicit_reparent(keyspace, shard, num_shards)
self.env.wait_for_healthy_tablets()
def test_explicit_reparent(self):
logging.info('Performing %s explicit reparents', self.num_reparents)
durations = []
for attempt in xrange(1, self.num_reparents + 1):
logging.info('Explicit reparent iteration number %d of %d', attempt,
self.num_reparents)
for keyspace, num_shards in zip(self.env.keyspaces, self.env.num_shards):
durations.extend(self.explicit_reparent(keyspace, num_shards))
durations = numpy.array(durations)
median_duration = numpy.median(durations)
logging.info('%d total reparents, median duration %f seconds',
len(durations), median_duration)
self.assertLessEqual(median_duration, self.master_downtime_threshold,
'master downtime too high (performance regression)')
# TODO(thompsonja): re-enable this test after Orchestrator integration
def _test_explicit_external_reparent(self):
logging.info('Performing %s explicit external reparents',
self.num_reparents)
durations = []
for attempt in xrange(1, self.num_reparents + 1):
logging.info('Explicit external reparent iteration number %d of %d',
attempt, self.num_reparents)
for keyspace, num_shards in zip(self.env.keyspaces, self.env.num_shards):
durations.extend(
self.explicit_reparent(keyspace, num_shards, external=True))
durations = numpy.array(durations)
median_duration = numpy.median(durations)
logging.info('%d total reparents, median duration %f seconds',
len(durations), median_duration)
self.assertLessEqual(median_duration, self.master_downtime_threshold,
'master downtime too high (performance regression)')
def test_explicit_reparent_cross_cell(self):
if len(self.env.cells) < 2:
logging.info('Not enough cells to test cross_cell reparents!')
return
logging.info('Performing %s cross-cell explicit reparents',
self.num_reparents)
for attempt in xrange(1, self.num_reparents + 1):
logging.info('Cross-cell explicit reparent iteration number %d of %d',
attempt, self.num_reparents)
for keyspace, num_shards in zip(self.env.keyspaces, self.env.num_shards):
self.explicit_reparent(keyspace, num_shards, cross_cell=True)
# TODO(thompsonja): re-enable this test after Orchestrator integration
def _test_explicit_external_reparent_cross_cell(self):
if len(self.env.cells) < 2:
logging.info('Not enough cells to test cross_cell reparents!')
return
logging.info('Performing %s cross-cell explicit external reparents',
self.num_reparents)
for attempt in xrange(1, self.num_reparents + 1):
logging.info('Cross-cell explicit external reparent iteration number %d '
'of %d', attempt, self.num_reparents)
for keyspace, num_shards in zip(self.env.keyspaces, self.env.num_shards):
self.explicit_reparent(
keyspace, num_shards, external=True, cross_cell=True)
if __name__ == '__main__':
base_end2end_test.main()
|
|
from datetime import datetime
from datetime import timedelta
import json
import unittest
from bson.objectid import ObjectId
from flask import url_for
from flask.ext import testing
from flask.ext import pymongo
import logcas.bootstrap
import db
class IndexTestCase(testing.TestCase):
def create_app(self):
app = logcas.bootstrap.app
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
return app
def test_index(self):
response = self.client.get('/')
self.assertRedirects(response, url_for('_request_index'))
class RequestIndexTestCase(testing.TestCase):
col = db.logs
controller = '_request_index'
template = 'request_index.html'
def create_app(self):
app = logcas.bootstrap.app
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
return app
@classmethod
def setUpClass(cls):
now = datetime.today()
cls.now = now
onesecond = timedelta(0, 1)
for i in range(0, 20):
for level in logcas.bootstrap.LEVELMAP.keys():
cls.col.save({
"time": now,
"created": int(now.strftime("%s")),
"message": "This is a message",
"hostname": "localhost",
"levelno": level,
"levelname": logcas.bootstrap.LEVELMAP[level],
"binary": "nova-compute",
"extra": {
"request_id": str(i),
"remote_address": "127.0.0.1",
"project_name": "testproject",
"user_name": "testuser",
"user_id": "xxxxxxxx",
}
})
now = now + onesecond
@classmethod
def tearDownClass(cls):
cls.col.drop()
# no param
def test_without_params(self):
response = self.client.get(url_for(self.controller))
self.assert200(response)
self.assertTemplateUsed(self.template)
# page
def test_with_page_(self):
response = self.client.get(url_for(self.controller, page=""))
self.assert400(response)
def test_with_page_abc(self):
response = self.client.get(url_for(self.controller, page="abc"))
self.assert400(response)
def test_with_page_0(self):
response = self.client.get(url_for(self.controller, page="0"))
self.assert400(response)
def test_with_page_1(self):
response = self.client.get(url_for(self.controller, page="1"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_page_100(self):
response = self.client.get(url_for(self.controller, page="100"))
self.assert200(response)
self.assertTemplateUsed(self.template)
# limit
def test_with_limit_(self):
response = self.client.get(url_for(self.controller, limit=""))
self.assert400(response)
def test_with_limit_abc(self):
response = self.client.get(url_for(self.controller, limit="abc"))
self.assert400(response)
def test_with_limit_9(self):
response = self.client.get(url_for(self.controller, limit="9"))
self.assert400(response)
def test_with_limit_10(self):
response = self.client.get(url_for(self.controller, limit="10"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_limit_200(self):
response = self.client.get(url_for(self.controller, limit="200"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_limit_201(self):
response = self.client.get(url_for(self.controller, limit="201"))
self.assert400(response)
# levelno
def test_with_levelno_(self):
response = self.client.get(url_for(self.controller, levelno=""))
self.assert400(response)
def test_with_levelno_abc(self):
response = self.client.get(url_for(self.controller, levelno="abc"))
self.assert400(response)
def test_with_levelno_0(self):
response = self.client.get(url_for(self.controller, levelno="0"))
self.assert400(response)
def test_with_levelno_10(self):
response = self.client.get(url_for(self.controller, levelno="10"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_20(self):
response = self.client.get(url_for(self.controller, levelno="20"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_21(self):
response = self.client.get(url_for(self.controller, levelno="21"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_30(self):
response = self.client.get(url_for(self.controller, levelno="30"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_40(self):
response = self.client.get(url_for(self.controller, levelno="40"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_50(self):
response = self.client.get(url_for(self.controller, levelno="50"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_levelno_60(self):
response = self.client.get(url_for(self.controller, levelno="60"))
self.assert400(response)
# style
def test_with_style_(self):
response = self.client.get(url_for(self.controller, style=""))
self.assert400(response)
def test_with_style_abc(self):
response = self.client.get(url_for(self.controller, style="abc"))
self.assert400(response)
def test_with_style_default(self):
response = self.client.get(url_for(self.controller, style="default"))
self.assert200(response)
self.assertTemplateUsed(self.template)
def test_with_style_dark(self):
response = self.client.get(url_for(self.controller, style="dark"))
self.assert200(response)
self.assertTemplateUsed(self.template)
class ArchivedRequestIndexTestCase(RequestIndexTestCase):
col = db.archived_logs
controller = '_archived_request_index'
template = 'archived_request_index.html'
if __name__ == '__main__':
unittest.main()
|
|
#
# handlers.py
# Handling for actual web server bits
#
import socket, sys, json, os, time
from BaseHTTPServer import BaseHTTPRequestHandler
from sendfile import sendfile
# When moving data ourselves, how much to move at a time
Bufsize = 64*1024
# Tell if this pathname looks questionable
def sketchy(fn):
# No chance of up-relative
if ".." not in fn:
return False
# See if any are actual up-refs
tup = fn.split("/")
if any( (s == "..") for s in tup ):
return True
# Ok
return False
# Return value for a digit (hex)
def digval(c):
if c.isdigit():
return ord(c) - ord('0')
c = c.lower()
if (c < 'a') or (c > 'f'):
return None
return (ord(c) - ord('a')) + 10
# Remove %XX hex char values from form input string
# I just cannot effin believe this isn't somewhere already...
def unescape(s):
res = ""
c1 = c2 = idx = None
for c in s:
if idx is None:
if c != '%':
res += c
else:
idx = 1
continue
if idx == 1:
c1 = digval(c)
idx = 2
continue
assert idx == 2
idx = None
if c1 is None:
res += "?"
else:
c2 = digval(c)
if c2 is None:
res += "?"
else:
res += chr((c1 << 4) + c2)
c1 = c2 = None
return res
# An instance of this runs to dispatch a given HTTP request
#
# An actual app will inherit this and add app-specific dispatchers.
#
# self.server gets set to our web server's state
class Chore_Handler(BaseHTTPRequestHandler):
# Default config
def __init__(self, conn, addr, webserver, inits):
# Our common code will also set this, but we do it
# here to make it available to our init's
self.server = webserver
# /js/..., /imgs/..., and such just serve files
self.lits = ("js", "imgs", "css", "html")
# Code which, in turn, tries to dispatch ops
# (Each is (op, fn), e.g., ("GET", self.do_get1))
self.dispatchers = [ ("GET", self.base_get), ]
# Add on others (mixins)
for i in inits:
i(self)
# Default title, use the mandatory service name
self.title = webserver.approot.config["service"]
# These get dicts if there are options
self.vals = self.rvals = None
# Hook for custom headers
self.extra_headers = []
# If authentication is active
self.user = None
# This both init's, and runs the web service
# (BTW, this sucks. Break out instance creation and
# service start--always.)
BaseHTTPRequestHandler.__init__(self, conn, addr, webserver)
# Hook to set up SSL
def old_OpenSSL_setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
# Find & dispatch handler
def dispatch(self, op, *args):
# Always decode <path>?<opts>
self.options()
# Find handler
for tup in self.dispatchers:
if tup[0] == op:
res = tup[1](*args)
if res is None:
sys.stderr.write("Null handler ret: %s\n" % (tup[1],))
return None
else:
dispatched,res = res
if dispatched:
return res
# Nobody could make heads or tails of it
self.send_error(404)
return None
# HTTP GET operation
def do_GET(self):
sys.stderr.write("GET: %s\n" % (self.path,))
self.base_op = "GET"
buf = self.dispatch("GET")
sys.stderr.write("GET back\n")
if buf:
self.wfile.write(buf)
# HTTP HEAD; header of a GET result, no body
def do_HEAD(self):
sys.stderr.write("HEAD: %s\n" % (self.path,))
self.base_op = "HEAD"
buf = self.dispatch("GET")
sys.stderr.write("HEAD back\n")
# Default POST/PUT size cap
def check_post_len(self, val):
# Default, 1 meg
if val > 1024*1024:
return True
return False
# POST and PUT; very similar
def postput(self, typ):
sys.stderr.write("%s: %s\n" % (typ, self.path))
self.base_op = typ
# Decode options in path
self.options()
# How much data?
content_len = int(self.headers.getheader('content-length', 0))
# Try to hand off to a raw resource handler
rawtyp = typ+"_raw"
dispatched = False
for tup in self.dispatchers:
if tup[0] == rawtyp:
dispatched,buf = tup[1](content_len)
if dispatched:
break
# Hook; cap transfer size
# We let raw handlers go first, since the whole point of raw
# transfers is to avoid buffering to memory, accomodating
# large sizes.
if self.check_post_len(content_len):
self.send_error(400, "Content Length too big")
return
# Then get a local buffer of the POST/PUT content and
# call simple string-based handler
buf = None
if not dispatched:
dbuf = self.rfile.read(content_len)
for tup in self.dispatchers:
if tup[0] == typ:
dispatched,buf = tup[1](dbuf)
if dispatched:
break
# No match
if not dispatched:
self.send_error(404)
return
# Push any result
if buf:
self.wfile.write(buf)
sys.stderr.write("%s back\n" % (typ,))
def do_POST(self):
self.postput("POST")
def do_PUT(self):
self.postput("PUT")
# Standard header for web pages
def build_header(self, title=None):
if title is None:
title = self.title
buf = '<html><head><title>%s</title>' % (title,)
buf += ' <meta name="viewport"' \
' content="width=device-width, initial-scale=1">\n'
buf += '</head><body>\n'
return buf
# Tack on end of HTML
def build_tailer(self, buf):
buf += "</body></html>\n"
return buf
# Map file extension to MIME type
# Returns (isbin?, mime-type) or None
def get_mtype(self, fn):
if fn.endswith(".js"):
return False, "application/javascript"
if fn.endswith(".jpg"):
return True,"image/jpeg"
if fn.endswith(".png"):
return True,"image/png"
if fn.endswith(".svg"):
return True,"image/svg+xml"
if fn.endswith(".gif"):
return True,"image/gif"
if fn.endswith(".ico"):
return True,"image/vnd.microsoft.icon"
if fn.endswith(".wav"):
return True,"audio/x-wav"
if fn.endswith(".mp3"):
return True,"audio/mpeg"
if fn.endswith(".flac"):
return True,"audio/flac"
if fn.endswith(".ogg"):
return True,"audio/ogg"
if fn.endswith(".html"):
return False,"text/html"
if fn.endswith(".htm"):
return False,"text/html"
if fn.endswith(".txt"):
return False,"text/plain"
if fn.endswith(".json"):
return False,"text/plain"
if fn.endswith(".css"):
return False,"text/css"
# Unknown file type
return None
# Decode a "range:" header option, return
# (offset,length) or None if we don't like
# the region (TBD, multiple ranges and
# multipart)
# We're passed the file's os.stat as well as
# the range: field value.
def decode_range(self, st, range):
# Byte units, please
if not range.startswith("bytes="):
return None
range = range[6:]
# Single range
if ',' in range:
return None
# Start to offset
if range[0] == '-':
range = range[1:]
if not range.isdigit():
return None
val1 = int(range)
if val1 >= st.st_size:
return None
return (0, val1)
# Offset to end...
elif range[-1] == '-':
range = range[:-1]
if not range.isdigit():
return None
val2 = int(range)
if val2 >= st.st_size:
return None
return (val2, st.st_size - val2)
# Offset1 to offset2
else:
parts = range.split('-')
if len(parts) != 2:
return None
if not all(p.isdigit() for p in parts):
return None
val1 = int(parts[0])
val2 = int(parts[1])
if val1 >= val2:
return None
return (val1, val2-val1)
# Try to serve the named file
def send_files(self, fn, pacer=None, cacheable=True,
replacer=None, streaming=False):
global Bufsize
# Hanky-panky?
if sketchy(fn):
self.send_error(404)
return None
# Make sure we know its MIME type
tup = self.get_mtype(fn)
if tup is None:
# Unknown file type
self.send_error(404)
return None
isbin,mtyp = tup
try:
f = open(fn, "rb" if isbin else "r")
except:
self.send_error(404)
return None
# Get dope on file overall
st = os.fstat(f.fileno())
startoff = 0
nbyte = st.st_size
ranged = False
# Sub-ranged output
if (not streaming) and ('range' in self.headers):
tup = self.decode_range(st, self.headers['range'])
if tup is None:
# Bad range
self.send_error(416)
return None
ranged = True
startoff,nbyte = tup
else:
startoff = 0
nbyte = st.st_size
# For media files, use sendfile() rather than passing
# it all through this process.
# We also support ranges here.
if isbin:
# Ranged or normal response
if ranged:
self.send_response(206)
else:
self.send_response(200)
self.send_header("Content-type", mtyp)
if not streaming:
# iOS will see this, and try to gulp the whole thing down
self.send_header("Content-Length", nbyte)
if ranged:
self.send_header("Content-Range",
"bytes %d-%d/%d" % (startoff, startoff+nbyte-1, st.st_size))
if cacheable:
self.send_header("Last-Modified",
time.asctime(time.localtime(st.st_mtime)))
else:
self.send_header("Cache-Control", "no-cache")
self.end_headers()
# Don't push out body if they're just asking us about
# the file's size via HEAD
if self.base_op == "GET":
# SSL encap apparently isn't compatible with
# the nicely efficient sendfile(), so move
# it "by hand"
if self.server.ssl or (pacer is not None) or \
(replacer is not None):
f.seek(startoff)
nleft = nbyte
while nleft > 0:
req = min(nleft, Bufsize)
buf = f.read(req)
if replacer is not None:
buf = replacer(buf)
self.wfile.write(buf)
nleft -= len(buf)
if pacer is not None:
if pacer.done:
break
pacer.sent(len(buf))
self.wfile.flush()
else:
sendfile(self.wfile.fileno(),
f.fileno(), startoff, nbyte)
# We've pushed it, tell the upper layers
buf = None
# Text, just shuffle bytes around as a whole
# TBD are gigabyte text files... an encyclopedia, anybody?
# But don't forget the DOS-style line endings; can't just
# use sendfile() if you honor that.
else:
buf = f.read()
if replacer is not None:
buf = replacer(buf)
self.changed = st.st_mtime
buf = self.send_result(buf, mtyp, cacheable=cacheable)
# Done with the file
f.close()
# Return contents (or None if we already pushed it out)
return buf
# Common code to strip and decode options
# Also bursts the path to self.paths[]
def options(self):
p = self.path
# Options?
if "?" in p:
idx = p.rindex("?")
if self.parseKV(p[idx+1:]) is None:
return None
self.path = p = p[:idx]
# Burst path
self.paths = pp = p.strip("/").split("/")
# Helper; match path to given one
def path_match(self, *elems):
pp = self.paths
if len(elems) != len(pp):
return False
if any( (p1 != p2) for p1,p2 in zip(elems, pp) ):
return False
return True
# Basic GET functions
def base_get(self):
# Top level
pp = self.paths
if (not pp) or ((len(pp) == 1) and not pp[0]):
return True,self.send_top()
# Service file content
if pp[0] in self.lits:
fname = os.path.join(*pp)
return True,self.send_files(fname)
# Special case, bleh
if len(pp) == 1:
if pp[0] == "favicon.ico":
return True,self.send_files("imgs/favicon.ico")
if pp[0] == "apple-touch-icon.png":
return True,self.send_files("imgs/apple-touch-icon.png")
# Couldn't help
return False,None
# Intercept after request is parsed; apply HTTP authentication
def parse_request(self):
# Make sure it's well-formed
res = BaseHTTPRequestHandler.parse_request(self)
if not res:
return False
# Check whether this request can proceed
if not hasattr(self, "authenticate"):
return True
# Authentication is present, go ask
res = self.authenticate()
# Supplied body is the completion of the request
if isinstance(res, str):
res = self.send_result(res, "text/html")
# self.base_op hasn't been decoded yet, so
# just snoop the operation type directly
if not self.requestline.startswith("HEAD"):
self.wfile.write(res)
# We did the whole request, so don't have
# handle_one_request() try also
return False
# HTML error; code and message
if isinstance(res, (tuple,list)):
self.send_error(res[0], res[1])
return False
# Only other valid return format is True; keep
# going
if res is not True:
raise Exception, "TBD, check this code path"
assert res is True
return True
# Send header
# Also canonicalize to DOS-style line endings (ew)
# This code only handles textual responses; binary/large
# media is handled inline.
def send_result(self, buf, mtyp, cacheable=False):
# Send response
self.send_response(200)
self.send_header("Content-type", mtyp)
self.send_header("Content-Length", len(buf))
if (not cacheable) or (not hasattr(self, "changed")):
self.send_header("Cache-Control", "no-cache")
else:
self.send_header("Last-Modified",
time.asctime(time.localtime(self.changed)))
self.end_headers()
return buf
# Hook to add on any specified extra headers
def end_headers(self):
# Add on any extras we've calculated
if self.extra_headers:
for tag,val in self.extra_headers:
self.send_header(tag, val)
del self.extra_headers[:]
# Now do the basic action
BaseHTTPRequestHandler.end_headers(self)
# Generate a meta REFRESH body
def gen_redir(self, url, msg=None):
if msg is None:
timeout = 0
msg = "Refreshing..."
else:
# If we have something to say, give'em 5 seconds
# to admire its wisdom.
timeout = 5
buf = \
"""
<html>
<head>
<title>%s</title>
<meta http-equiv="REFRESH"
content="%d;url=%s" />
</head>
<body>
%s
</body>
</html>
""" % (self.title, timeout, url, msg)
return buf
# Generate meta REFRESH body, send it
def send_redir(self, url, msg=None, style=None):
# 303 is an alternate HTTP response code, often needed
# for smaller HTTP devices which won't parse a full
# meta REFRESH
if style == "303":
self.extra_headers.append( ("Location", url) )
return self.send_error(303)
# Default, Meta REFRESH style
buf = self.gen_redir(url, msg)
buf = self.send_result(buf, "text/html")
return buf
# In <url>[?key[=val][&key[=val...]]], parse key[/val] and
# put into self.vals{} and self.rvals{}
# Returns the input @buf, or None if there was a problem
def parseKV(self, buf):
# Walk each key/val, in the format "k[=v]"
# vals{} assembles vals[k] = v, and
# rvals{} assembles rvals[v] = k
self.vals = vals = {}
self.rvals = rvals = {}
for kv in buf.split("&"):
# Split to k and v
tup2 = kv.split("=")
if len(tup2) == 1:
k = tup2[0]
v = True
elif len(tup2) == 2:
k,v = tup2
else:
# x=y=z, something like that?
self.send_error(404)
return None
# Field name should be folded to lower case, as case
# sensitivity varies by browser.
k = k.lower()
if isinstance(v, bool):
vals[k] = v
else:
# The show/artist/track can have spaces, which have
# turned into plus signs.
v = v.replace("+", " ")
vals[k] = unescape(v)
rvals[v] = k
# Success; @buf untouched
return buf
# Take the JSON-ish dict @d, and send its JSON
# encoding back to our client
def send_json(self, d):
buf = json.dumps(d)
buf = self.send_result(buf, "application/json")
return buf
# Wrap the very common action of returning HTML
def send_html(self, h, cacheable=True):
buf = self.send_result(h, "text/html", cacheable=cacheable)
return buf
# Common code for failing to parse a request
def bad_request(self):
self.send_error(400)
return True,None
# Utility to pull file contents
def readf(self, fn):
f = open(fn, "r")
res = f.read()
f.close()
return res
# Decode Cookie: header option
def get_cookie(self, target):
# Header present?
cookies = self.headers.get("cookie")
if not cookies:
return None
# Parse into cookies; each is "key=value", with
# semicolon separating them (if more than one)
# TBD, use Python's Cookie module?
cookies = [c.strip().split('=') for c in cookies.split(';')]
for tup in cookies:
if len(tup) != 2:
continue
# Here's a match
if tup[0] == target:
return tup[1]
# Don't have that cookie
return None
# Tell if our peer is presenting an iOS User-Agent
def peer_ios(self):
ua = self.headers.get("user-agent")
if not ua:
return False
# iOS-ish?
if not any( (s in ua) for s in ("iPad", "iPhone", "iPod") ):
return False
# Laggard MS, claiming to be an iPhone
if "IEMobile" in ua:
return False
return True
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding layers."""
import math
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import hyperparams
from lingvo.core import py_utils
from lingvo.core import schedule
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_embedding as tpu_embedding_lib
# pylint:enable=g-direct-tensorflow-import
def _IsTpuTraining(p):
"""Whether we should create embedding tables and run lookup on tpu."""
return not p.is_inference and py_utils.use_tpu()
def _RemovePrivateVar(layer, var_name):
"""Remove a variable by name from `layer`.
This is usually used to avoid copying the variable to TPU, for example, by the
tf.cast when accessing layer.theta.
Args:
layer: The layer to remove the variable from.
var_name: The name of the variable to remove.
"""
# pylint: disable=protected-access
del layer._private_vars[var_name]
del layer._private_theta[var_name]
# pylint: enable=protected-access
class TpuEmbeddingCollection:
"""Manage various TPU embedding related ops and tensors."""
@classmethod
def Get(cls):
"""Returns the TpuEmbeddingCollection associated with the current graph."""
emb_collection = py_utils.GetTpuEmbeddingGraphCollection()
assert len(emb_collection) <= 1
if len(emb_collection) == 1:
tf.logging.info(
'TpuEmbeddingCollection singleton already exists, reusing')
return emb_collection[0]
else:
singleton = cls()
emb_collection.append(singleton)
return singleton
def __init__(self):
# Maps table name to a tuple (var_list, is_inference_with_bfloat16), where
# var_list is the list of variables for the corresponding table, and
# is_inference_with_bfloat16 is a boolean telling whether this table is
# using bfloat16 for inference.
self._table_vars = py_utils.NestedMap()
# The TPUEmbedding configuration.
self._tpu_embedding = None
# Maps table name to the list of ops that loads/retrieves embedding tables
# to/from TPU.
self._load_ops_map = py_utils.NestedMap()
self._retrieve_ops_map = py_utils.NestedMap()
# Maps task name to the (feature_name -> activation_tensor) dict for the
# corresponding task.
self._activations_by_task = {}
# List of (name, value, weight) tuples for summary.
self._summary_tensors = []
# Set of embedding feature names.
self._feature_names = None
# Schedule for the value that is used as TPU embedding gradient multiplier.
self._gradient_multiplier_schedule = None
# Maps task name to the mode used by that task.
self._mode_by_task = {}
# Maps task name to the send gradient op for that task. Mainly used to
# ensure that send gradient op is created only once for each task.
self._send_gradient_op_by_task = {}
def AddTableVariables(self, table_name, var_list, is_inference_with_bfloat16):
"""Add TPU embedding table variable list to the collection."""
if table_name in self._table_vars:
raise ValueError(f'Variables for table {table_name} already exist.')
self._table_vars[table_name] = (var_list, is_inference_with_bfloat16)
@property
def table_variables(self):
"""Returns a list of table variables."""
return self._table_vars.Transform(lambda val: val[0])
@property
def inference_with_bfloat16_var_names(self):
"""Returns a list of names of table variables that do bfloat16 inference."""
result = []
for var_list, is_inference_with_bfloat16 in self._table_vars.values():
if is_inference_with_bfloat16:
result += [v.op.name for v in var_list]
return result
@property
def tpu_embedding(self):
return self._tpu_embedding
@tpu_embedding.setter
def tpu_embedding(self, tpu_embedding):
if self._tpu_embedding is not None:
raise ValueError('TPUEmbedding already set before.')
self._tpu_embedding = tpu_embedding
def AddLoadRetrieveOps(self, table_name, load_ops, retrieve_ops):
if table_name in self._load_ops_map:
raise ValueError(f'Load ops for table {table_name} already exist.')
assert table_name not in self._retrieve_ops_map
self._load_ops_map[table_name] = load_ops
self._retrieve_ops_map[table_name] = retrieve_ops
@property
def load_ops(self):
return self._load_ops_map
@property
def retrieve_ops(self):
return self._retrieve_ops_map
def _ValidateTaskScope(self, task_call_scope):
if not task_call_scope:
raise ValueError(
'It expects a non-empty task call scope name, but get '
f'{task_call_scope}. This usually means the current code is not run '
'under a py_utils.TaskCallScope() context.')
def AddActivations(self, task_call_scope):
self._ValidateTaskScope(task_call_scope)
tf.logging.info(
f'Adding TPU embedding activations for task {task_call_scope}.')
if task_call_scope not in self._activations_by_task:
activations = self._tpu_embedding.get_activations()
self._activations_by_task[task_call_scope] = activations
return self._activations_by_task[task_call_scope]
def GetActivations(self, task_call_scope):
tf.logging.info(
f'Getting TPU embedding activations for task {task_call_scope}.')
if task_call_scope in self._activations_by_task:
self._ValidateTaskScope(task_call_scope)
return self._activations_by_task[task_call_scope]
return None
def AddSummaryTensor(self, name, value, weight=1.0):
self._summary_tensors.append((name, value, tf.convert_to_tensor(weight)))
@property
def summary_tensors(self):
return self._summary_tensors
@property
def feature_names(self):
return self._feature_names
@feature_names.setter
def feature_names(self, feature_names):
if self._feature_names and self._feature_names != feature_names:
raise ValueError('feature_names already exists. '
f'Existing feature names: {self._feature_names}, '
f'feature names being added: {feature_names}')
self._feature_names = feature_names
def SetGradientMultiplierSchedule(self, multiplier_schedule):
if self._gradient_multiplier_schedule is not None:
raise ValueError('gradient_multiplier_schedule was set before.')
self._gradient_multiplier_schedule = multiplier_schedule
def SetTaskMode(self, task_call_scope, mode):
self._ValidateTaskScope(task_call_scope)
tf.logging.info(
f'Setting TPU embedding mode for task {task_call_scope} as {mode}.')
self._mode_by_task[task_call_scope] = mode
def ShouldStopGradient(self, task_call_scope):
self._ValidateTaskScope(task_call_scope)
if task_call_scope not in self._mode_by_task:
raise ValueError(
f'TPU embedding mode for task {task_call_scope} not found.')
should_stop_gradient = (self._mode_by_task[task_call_scope] != 'train')
tf.logging.info(('Disabled' if should_stop_gradient else 'Enabled') +
f' TPU embedding gradient for task {task_call_scope}.')
return should_stop_gradient
def ApplyGradients(self, task_call_scope, feature_to_gradient_dict):
"""Apply tpu embedding gradient updates.
Args:
task_call_scope: The current task call scope name.
feature_to_gradient_dict: A `py_utils.NestedMap` of: tpu embedding feature
name -> gradient tensor for the embedding feature.
Returns:
The gradient update op and a dict of eval metrics.
Raises:
ValueError: if gradients have been applied before for the current task.
"""
self._ValidateTaskScope(task_call_scope)
if task_call_scope in self._send_gradient_op_by_task:
raise ValueError(
f'Send gradient op for task {task_call_scope} already exist.')
tf.logging.info(
f'Applying TPU embedding gradients for task {task_call_scope}.')
# Apply gradient multiplier schedule.
grad_multiplier = self._gradient_multiplier_schedule.Value()
feature_to_gradient_dict = feature_to_gradient_dict.Transform(
lambda g: g * grad_multiplier)
send_gradient_op = (
self._tpu_embedding.generate_send_gradients_op(
feature_to_gradient_dict, step=py_utils.GetGlobalStep()))
self._send_gradient_op_by_task[task_call_scope] = send_gradient_op
activations = self.GetActivations(task_call_scope).values()
eval_metrics = {
'tpu_embedding_activation_norm':
(tf.sqrt(py_utils.SumSquared(activations)), tf.constant(1.0)),
'tpu_embedding_grad_norm':
(tf.sqrt(py_utils.SumSquared(feature_to_gradient_dict.Flatten())),
tf.constant(1.0)),
'tpu_embedding_gradient_multiplier':
(grad_multiplier, tf.constant(1.0)),
}
return send_gradient_op, eval_metrics
class _TPUEmbeddingOptimizer(base_layer.BaseLayer):
"""Base class for TPUEmbeddingLayer, TPUEmbeddingTable optimizers."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('clip_weight_min', None,
'The minimum value to clip the weight by; None means -infinity.')
p.Define('clip_weight_max', None,
'The maximum value to clip the weight by; None means +infinity.')
p.Define(
'clip_gradient_min', None,
'The minimum value to clip the gradient by; None means -infinity.')
p.Define(
'clip_gradient_max', None,
'The maximum value to clip the gradient by; None means +infinity.')
p.Define(
'weight_decay_factor', None,
'Amount of weight decay to apply; None means that the weights are not '
'decayed.')
p.Define(
'multiply_weight_decay_factor_by_learning_rate', None,
'If true, weight_decay_factor is multiplied by the current learning '
'rate.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
# A dict of slot_variable_name -> slot_variable for checkpointing purposes.
self._slot_var_dict = {}
def CreateOptimizerParameters(self, learning_rate):
"""Create TPUEmbedding API optimzier parameters."""
return NotImplementedError()
def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):
"""Create slot variables and load/retrieve ops.
Args:
table_vars: A list of all embedding table shard variables.
tpu_embedding_table: Parent TPUEmbeddingTable layer.
Returns:
List of load ops
List of retrieve ops
"""
raise NotImplementedError()
def _GetSelfVariablesDict(self):
"""Returns a dict of variables for checkpointing purposes."""
return self._slot_var_dict
class TPUEmbeddingSGDOptimizer(_TPUEmbeddingOptimizer):
"""SGD optimizer for TPUEmbeddingLayer, TPUEmbeddingTable."""
def CreateOptimizerParameters(self, learning_rate):
p = self.params
return tpu_embedding_lib.StochasticGradientDescentParameters(
learning_rate=learning_rate,
clip_weight_min=p.clip_weight_min,
clip_weight_max=p.clip_weight_max,
weight_decay_factor=p.weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=p
.multiply_weight_decay_factor_by_learning_rate,
clip_gradient_min=p.clip_gradient_min,
clip_gradient_max=p.clip_gradient_max)
def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):
load_op_list = []
retrieve_op_list = []
num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts
table_name = tpu_embedding_table.table_name
for host_id, table_var in zip(range(num_tpu_hosts), table_vars):
# The slot vars should be on the same device as the table var.
device_name = tpu_embedding_table.GetDeviceName(host_id)
with tf.device(device_name), py_utils.outside_all_rewrites():
# Only the Trainer needs these ops.
if py_utils.use_tpu():
# TPU Embedding load/retrieve ops need to be in the outer graph scope.
with tf.init_scope():
tf.logging.info('creating load and retrieve ops.')
load_parameters_op = (
tpu_embedding_lib.tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_var,
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
retrieved_table = (
tpu_embedding_lib.tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(
tf.assign(table_var, retrieved_table))
retrieve_op_list.append(retrieve_parameters_op)
return load_op_list, retrieve_op_list
class TPUEmbeddingAdagradOptimizer(_TPUEmbeddingOptimizer):
"""Adagrad optimizer for TPUEmbeddingLayer, TPUEmbeddingTable."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('initial_accumulator', 0.1,
'Initial value of Adagrad accumulator.')
p.Define(
'use_gradient_accumulation', True,
'Setting this to False makes embedding gradients calculation less '
'accurate but faster. See tpu_embedding_lib for more details.')
return p
def CreateOptimizerParameters(self, learning_rate):
p = self.params
return tpu_embedding_lib.AdagradParameters(
learning_rate=learning_rate,
initial_accumulator=p.initial_accumulator,
clip_weight_min=p.clip_weight_min,
clip_weight_max=p.clip_weight_max,
weight_decay_factor=p.weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=p
.multiply_weight_decay_factor_by_learning_rate,
clip_gradient_min=p.clip_gradient_min,
clip_gradient_max=p.clip_gradient_max)
def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):
p = self.params
load_op_list = []
retrieve_op_list = []
num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts
table_name = tpu_embedding_table.table_name
slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']
for host_id, table_var in zip(range(num_tpu_hosts), table_vars):
# The slot vars should be on the same device as the table var.
device_name = tpu_embedding_table.GetDeviceName(host_id)
with tf.device(device_name), py_utils.outside_all_rewrites():
w_ada = py_utils.WeightParams(
shape=table_var.shape.as_list(),
init=py_utils.WeightInit.Constant(p.initial_accumulator),
dtype=p.dtype,
collections=slot_var_collections)
var_name = tpu_embedding_table.GetVariableName(host_id) + '/Adagrad'
accumulator_var = tpu_embedding_table.CreateOptimizerSlotVariable(
var_name, w_ada, self._slot_var_dict)
# Only the Trainer needs these ops.
if py_utils.use_tpu():
# Remove the slot vars from the variable list to avoid them being
# copied to TPU.
_RemovePrivateVar(tpu_embedding_table, var_name)
# TPU Embedding load/retrieve ops need to be in the outer graph scope.
with tf.init_scope():
tf.logging.info('creating load and retrieve ops.')
load_parameters_op = (
tpu_embedding_lib.tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_var,
accumulators=accumulator_var,
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
retrieved_table, retrieved_accumulator = (
tpu_embedding_lib.tpu_ops
.retrieve_tpu_embedding_adagrad_parameters(
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(
tf.assign(table_var, retrieved_table),
tf.assign(accumulator_var, retrieved_accumulator))
retrieve_op_list.append(retrieve_parameters_op)
return load_op_list, retrieve_op_list
class TPUEmbeddingAdamOptimizer(_TPUEmbeddingOptimizer):
"""Adam optimizer for TPUEmbeddingLayer, TPUEmbeddingTable."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'sum_inside_sqrt', True, 'When this is true, the Adam update'
'formula is changed from m / (sqrt(v) + epsilon) to m / '
'sqrt(v + epsilon**2). This option improves the performance of'
'TPU training and is not expected to harm model quality.')
p.Define('lazy_adam', True, 'Use lazy Adam instead of Adam. Lazy Adam'
'trains faster.')
p.Define('beta1', 0.9, 'The exponential decay rate for the 1st moment'
'estimates')
p.Define('beta2', 0.999, 'The exponential decay rate for the 2nd moment'
'estimates')
p.Define('epsilon', 1e-08, 'A small constant for numerical stability')
p.Define(
'use_gradient_accumulation', True, 'Setting this to False makes'
'embedding gradients calculation less accurate but faster')
return p
def CreateOptimizerParameters(self, learning_rate):
p = self.params
return tpu_embedding_lib.AdamParameters(
learning_rate=learning_rate,
beta1=p.beta1,
beta2=p.beta2,
epsilon=p.epsilon,
lazy_adam=p.lazy_adam,
sum_inside_sqrt=p.sum_inside_sqrt,
use_gradient_accumulation=p.use_gradient_accumulation,
clip_weight_min=p.clip_weight_min,
clip_weight_max=p.clip_weight_max,
weight_decay_factor=p.weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=p
.multiply_weight_decay_factor_by_learning_rate,
clip_gradient_min=p.clip_gradient_min,
clip_gradient_max=p.clip_gradient_max)
def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):
p = self.params
load_op_list = []
retrieve_op_list = []
num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts
table_name = tpu_embedding_table.table_name
slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']
for host_id, table_var in zip(range(num_tpu_hosts), table_vars):
# The slot vars should be on the same device as the table var.
device_name = tpu_embedding_table.GetDeviceName(host_id)
with tf.device(device_name), py_utils.outside_all_rewrites():
m_adam = py_utils.WeightParams(
shape=table_var.shape.as_list(),
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=slot_var_collections)
var_name_m = tpu_embedding_table.GetVariableName(host_id) + '/Adam/m'
m_var = tpu_embedding_table.CreateOptimizerSlotVariable(
var_name_m, m_adam, self._slot_var_dict)
v_adam = py_utils.WeightParams(
shape=table_var.shape.as_list(),
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=slot_var_collections)
var_name_v = tpu_embedding_table.GetVariableName(host_id) + '/Adam/v'
v_var = tpu_embedding_table.CreateOptimizerSlotVariable(
var_name_v, v_adam, self._slot_var_dict)
# Only the Trainer needs these ops.
if py_utils.use_tpu():
# Remove the slot vars from the variable list to avoid them being
# copied to TPU.
_RemovePrivateVar(tpu_embedding_table, var_name_m)
_RemovePrivateVar(tpu_embedding_table, var_name_v)
# TPU Embedding load/retrieve ops need to be in the outer graph scope.
with tf.init_scope():
tf.logging.info('creating load and retrieve ops.')
load_parameters_op = (
tpu_embedding_lib.tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_var,
momenta=m_var,
velocities=v_var,
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
retrieved_table, retrieved_m, retrieved_v = (
tpu_embedding_lib.tpu_ops
.retrieve_tpu_embedding_adam_parameters(
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(
tf.assign(table_var, retrieved_table),
tf.assign(m_var, retrieved_m), tf.assign(v_var, retrieved_v))
retrieve_op_list.append(retrieve_parameters_op)
return load_op_list, retrieve_op_list
class TPUEmbeddingFTRLOptimizer(_TPUEmbeddingOptimizer):
"""FTRL optimizer for TPUEmbeddingLayer, TPUEmbeddingTable."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'learning_rate_power', -0.5,
'A float value, must be less or equal to zero. Controls how the'
'learning rate decreases during training. Use zero for a fixed learning'
'rate.')
p.Define(
'initial_accumulator_value', 0.1, 'The starting value for'
'accumulators. Only zero or positive values are allowed.')
p.Define(
'l1_regularization_strength', 0.0, 'A float value, must be greater'
'than or equal to zero. Defaults to 0.0.')
p.Define(
'l2_regularization_strength', 0.0, 'A float value, must be greater'
'than or equal to zero. Defaults to 0.0.')
p.Define('multiply_linear_by_learning_rate', False, 'Whether multiply'
'linear by learning rate.')
p.Define(
'beta', 0.0, 'A float value, representing the beta value from the'
'FTLR paper. Defaults to 0.0.')
p.Define('allow_zero_accumulator', False, 'Whether allowing zero'
'accumulator.')
p.Define('use_gradient_accumulation', True, 'Use gradient accumulation.')
p.Define('initial_linear_value', 0.0, 'Initial linear value.')
return p
def CreateOptimizerParameters(self, learning_rate):
p = self.params
return tpu_embedding_lib.FtrlParameters(
learning_rate=learning_rate,
learning_rate_power=p.learning_rate_power,
initial_accumulator_value=p.initial_accumulator_value,
l1_regularization_strength=p.l1_regularization_strength,
l2_regularization_strength=p.l2_regularization_strength,
use_gradient_accumulation=p.use_gradient_accumulation,
clip_weight_min=p.clip_weight_min,
clip_weight_max=p.clip_weight_max,
weight_decay_factor=p.weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=p
.multiply_weight_decay_factor_by_learning_rate,
multiply_linear_by_learning_rate=p.multiply_linear_by_learning_rate,
beta=p.beta,
allow_zero_accumulator=p.allow_zero_accumulator,
clip_gradient_min=p.clip_gradient_min,
clip_gradient_max=p.clip_gradient_max)
def CreateSlotVariablesAndOps(self, table_vars, tpu_embedding_table):
p = self.params
load_op_list = []
retrieve_op_list = []
num_tpu_hosts = tpu_embedding_table.params.num_tpu_hosts
table_name = tpu_embedding_table.table_name
slot_var_collections = [tpu_embedding_table.__class__.__name__ + '_vars']
for host_id, table_var in zip(range(num_tpu_hosts), table_vars):
# The slot vars should be on the same device as the table var.
device_name = tpu_embedding_table.GetDeviceName(host_id)
with tf.device(device_name), py_utils.outside_all_rewrites():
accumulator = py_utils.WeightParams(
shape=table_var.shape.as_list(),
init=py_utils.WeightInit.Constant(p.initial_accumulator_value),
dtype=p.dtype,
collections=slot_var_collections)
accumulator_name = (
tpu_embedding_table.GetVariableName(host_id) + '/Ftrl')
accumulator_var = tpu_embedding_table.CreateOptimizerSlotVariable(
accumulator_name, accumulator, self._slot_var_dict)
linear = py_utils.WeightParams(
shape=table_var.shape.as_list(),
init=py_utils.WeightInit.Constant(p.initial_linear_value),
dtype=p.dtype,
collections=slot_var_collections)
linear_name = tpu_embedding_table.GetVariableName(host_id) + '/Ftrl_1'
linear_var = tpu_embedding_table.CreateOptimizerSlotVariable(
linear_name, linear, self._slot_var_dict)
# Only the Trainer needs these ops.
if py_utils.use_tpu():
# Remove the slot vars from the variable list to avoid them being
# copied to TPU.
_RemovePrivateVar(tpu_embedding_table, accumulator_name)
_RemovePrivateVar(tpu_embedding_table, linear_name)
# TPU Embedding load/retrieve ops need to be in the outer graph scope.
with tf.init_scope():
tf.logging.info('creating load and retrieve ops.')
load_parameters_op = (
tpu_embedding_lib.tpu_ops.load_tpu_embedding_ftrl_parameters(
parameters=table_var,
accumulators=accumulator_var,
linears=linear_var,
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
retrieved_table, retrieved_accumulator, retrieved_linear = (
tpu_embedding_lib.tpu_ops
.retrieve_tpu_embedding_ftrl_parameters(
table_name=table_name,
num_shards=num_tpu_hosts,
shard_id=host_id))
retrieve_parameters_op = tpu_embedding_lib.control_flow_ops.group(
tf.assign(table_var, retrieved_table),
tf.assign(accumulator_var, retrieved_accumulator),
tf.assign(linear_var, retrieved_linear))
retrieve_op_list.append(retrieve_parameters_op)
return load_op_list, retrieve_op_list
class TPUEmbeddingTable(base_layer.BaseLayer):
"""An embedding table controlled by TPUEmbeddingLayer.
Note that all input_keys needs to be declared upfront.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0, 'Depth of the input.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define('input_keys', None, 'Name of inputs in InputBatch.')
p.Define(
'combiner', 'mean',
'Must be "sum", "sqrtn", "mean" or None in the case of a '
'"sequence embedding "')
p.Define(
'max_sequence_length', None,
'If not None or 0, embedding lookup will return a '
'"sequence embedding" of shape '
'`[batch, max_sequence_length, embedding_dim]` without applying a '
'sequence reducing combiner')
p.Define('num_tpu_hosts', 0, 'Total number of TPU hosts.')
p.Define(
'optimizer', None,
'Table optimizer parameters. Will override the optimizer parameters '
'defined in this table\'s TPUEmbeddingLayer.')
p.Define('learning_rate', None,
'Overrides TPUEmbeddingLayer\'s learning_rate.')
p.Define('lr_schedule', None, 'Overrides TPUEmbeddingLayer\'s lr_schedule.')
p.Define(
'inference_use_merged_variable', False,
'Whether to use merged embedding table variable during inference. '
'If set to True, only one table variable will be created, and '
'the user will need to manually merge the sharded table variables '
'in the trained checkpoint before generating the inference graph.')
p.Define(
'inference_use_bfloat16', False,
'Whether to use bfloat16 as variable dtype for embedding table during '
'inference. If set to True, the variables in the inference checkpoint '
'must be in bfloat16 format, and the conversion (float->bfloat16) '
'need to be done offline.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size > 0
assert p.embedding_dim > 0
assert p.input_keys
assert p.name
assert p.num_tpu_hosts > 0
if p.combiner is None:
assert p.max_sequence_length
if p.max_sequence_length is not None and p.max_sequence_length > 0:
assert p.combiner is None
assert p.optimizer
assert p.learning_rate
assert p.lr_schedule
self._ids_per_shard = int(math.ceil(float(p.vocab_size) / p.num_tpu_hosts))
self._padded_vocab_size = self._ids_per_shard * p.num_tpu_hosts
self._input_keys = p.input_keys
self._max_sequence_length = 0
if p.max_sequence_length:
self._max_sequence_length = p.max_sequence_length
self.CreateChild('optimizer', p.optimizer)
self.CreateChild('schedule', p.lr_schedule)
self._tpu_embedding_collection = TpuEmbeddingCollection.Get()
def LearningRateFn(step):
with py_utils.GlobalStepContext(step):
lr = self.schedule.Value() * p.learning_rate
self._tpu_embedding_collection.AddSummaryTensor(
'tpu_embedding_lr/{}'.format(p.name), lr)
return lr
self._table_name = '{}_table'.format(p.name)
self._table_config = tpu_embedding_lib.TableConfig(
self._padded_vocab_size,
p.embedding_dim,
combiner=p.combiner,
learning_rate=None,
learning_rate_fn=LearningRateFn,
# All TableConfigs passed to API will have a learning rate function,
# so the learning_rate in the optimization_parameters is not used.
optimization_parameters=self.optimizer.CreateOptimizerParameters(
p.learning_rate))
def _CreateLayerVariables(self):
p = self.params
# Reuse the singleton table variables if they were created before.
all_table_vars = self._tpu_embedding_collection.table_variables
if self.table_name in all_table_vars:
embedding_table_vars = all_table_vars[self.table_name]
else:
inference_with_merged_var = (
p.is_inference and p.inference_use_merged_variable)
is_inference_with_bfloat16 = (p.is_inference and p.inference_use_bfloat16)
dtype = tf.bfloat16 if is_inference_with_bfloat16 else p.dtype
w_pc = py_utils.WeightParams(
shape=[
p.vocab_size if inference_with_merged_var else
self._ids_per_shard, p.embedding_dim
],
init=p.params_init,
dtype=dtype,
collections=[self.__class__.__name__ + '_vars'])
embedding_table_vars = []
if inference_with_merged_var:
with py_utils.outside_all_rewrites():
var_name = 'merged_var'
self.CreateVariable(var_name, w_pc)
embedding_var = self.vars[var_name]
embedding_table_vars.append(embedding_var)
# Remove from _private_vars / _private_thetas to be added later as wm.
_RemovePrivateVar(self, var_name)
else:
for i in range(p.num_tpu_hosts):
device_name = self.GetDeviceName(i)
with tf.device(device_name), py_utils.outside_all_rewrites():
var_name = self.GetVariableName(i)
self.CreateVariable(var_name, w_pc)
embedding_var = self.vars[var_name]
embedding_table_vars.append(embedding_var)
# Remove from _private_vars / _private_thetas to be added later as
# wm.
_RemovePrivateVar(self, var_name)
# Track the table variables so they can be excluded from EMA.
self._tpu_embedding_collection.AddTableVariables(
self.table_name, embedding_table_vars, is_inference_with_bfloat16)
if not _IsTpuTraining(p):
# We don't need this for TrainerTpu, as the vars are not directly
# accessed besides in the TPU embeddding load/retrieve ops.
# However, this is needed for CPU (eval/decode/controller).
self._private_vars['wm'] = embedding_table_vars
self._private_theta['wm'] = embedding_table_vars
# If slot variables and load/retrieve ops were created before, maybe by a
# different program or task, don't create it again.
# Note that there should be only one copy of slot variables and
# load/retrieve ops in the graph and they're shared by different
# tasks/programs.
all_load_ops = self._tpu_embedding_collection.load_ops
if self.table_name not in all_load_ops:
assert self.table_name not in self._tpu_embedding_collection.retrieve_ops
# Only trainer and controller (for checkpointing) need slot variables.
# Only trainer needs load/retrieve ops.
if not self.do_eval and not p.is_inference:
load_ops, retrieve_ops = self.optimizer.CreateSlotVariablesAndOps(
embedding_table_vars, self)
self._tpu_embedding_collection.AddLoadRetrieveOps(
self.table_name, load_ops, retrieve_ops)
def _GetSelfVariablesDict(self):
"""Returns a dict of variables for checkpointing purposes."""
all_table_vars = self._tpu_embedding_collection.table_variables
assert self.table_name in all_table_vars
return {var.name: var for var in all_table_vars[self.table_name]}
def CreateOptimizerSlotVariable(self, var_name, var_params, slot_var_dict):
"""Create optimizer slot variable and add it to the given variable dict."""
self.CreateVariable(var_name, var_params, trainable=False)
var = self.vars[var_name]
slot_var_dict[var.name] = var
return var
# Return device to place sharded variables on.
def GetDeviceName(self, host_id):
if self.params.is_inference:
# This is to place variables on the same device as other variables.
return None
if self.do_eval:
return '/cpu:0'
else:
return '{}/replica:0/task:{}/device:CPU:0'.format(
self.cluster.params.worker.name, host_id)
# Return variable name for embedding table shards.
def GetVariableName(self, host_id):
return 'var_%d' % host_id
@property
def table_config(self):
return self._table_config
@property
def table_name(self):
return self._table_name
@property
def input_keys(self):
return self._input_keys
@property
def max_sequence_length(self):
return self._max_sequence_length
def _SequenceEmbLookup(self, dense_ids: tf.Tensor,
partition_strategy: str) -> tf.Tensor:
"""Sequence embedding lookup.
Note that we do not support padding ids in sequence embeddings.
Args:
dense_ids: An int Tensor of shape [batch, sequence].
partition_strategy: See TPUEmbeddingLayer partition_strategy param.
Returns:
A float32 activations Tensor of shape
[batch, max_sequence_length, embedding_dim].
"""
p = self.params
embs = tf.nn.embedding_lookup(
self.theta.wm,
tf.reshape(dense_ids, [-1]),
partition_strategy=partition_strategy)
out_shape = tf.concat([tf.shape(dense_ids), [p.embedding_dim]], 0)
return tf.reshape(embs, out_shape)
def _CombinerEmbLookup(self, sparse_ids: tf.SparseTensor,
partition_strategy: str) -> tf.Tensor:
"""Combiner embedding lookup.
Args:
sparse_ids: An int SparseTensor of shape [batch, ...].
partition_strategy: See TPUEmbeddingLayer partition_strategy param.
Returns:
A float32 activations Tensor of shape [batch, 1, embedding_dim].
"""
p = self.params
embs = tf.nn.embedding_lookup_sparse(
self.theta.wm,
sparse_ids,
None, # sp_weights
combiner=p.combiner,
partition_strategy=partition_strategy)
batch_size = sparse_ids.dense_shape[0]
# For tf.nn.embedding_lookup_sparse, output.dim0 might be different from
# sparse_ids.dense_shape.dim0.
# Explicitly pad results to maintain dim0=batch.
dim0_padlen = tf.cast(batch_size, tf.int32) - tf.shape(embs)[0]
embs = tf.pad(embs, [[0, dim0_padlen], [0, 0]])
# [batch, 1, embedding_dim]
embs = py_utils.HasShape(embs, [batch_size], ndims=1)
return tf.expand_dims(embs, 1)
def CpuEmbLookup(self, ids_map: py_utils.NestedMap,
partition_strategy: str) -> py_utils.NestedMap:
"""CPU evaluation embedding lookup for dense tensors.
Args:
ids_map: A NestedMap of nested `input_key` string -> [batch, sequence]
int Tensor. For sequence embeddings, -1 is used as a padding id.
Non-sequence embeddings do not support padded ids.
partition_strategy: See TPUEmbeddingLayer partition_strategy param.
Returns:
An activations NestedMap of nested string -> float32 Tensor.
For non-sequence embeddings: [batch, 1, embedding_dim]
For sequence embeddings: [batch, max_sequence_length, embedding_dim]
"""
if self.max_sequence_length > 0:
# "Sequence embedding", no combiner case
return ids_map.Transform(
lambda ids: self._SequenceEmbLookup(ids, partition_strategy))
else:
# Non-"Sequence embedding", combiner case
def _Lookup(ids):
# Dense to sparse.
dense_shape = tf.shape(ids, out_type=tf.int64)
sample_indices = tf.cast(tf.where(tf.not_equal(ids, -1)), tf.int64)
embedding_indices = tf.cast(tf.gather_nd(ids, sample_indices), tf.int64)
# [?, embedding_dim]
sparse_ids = tf.SparseTensor(
indices=sample_indices,
values=embedding_indices,
dense_shape=dense_shape)
return self._CombinerEmbLookup(sparse_ids, partition_strategy)
return ids_map.Transform(_Lookup)
def CpuEmbLookupSparse(self, ids_map: py_utils.NestedMap,
partition_strategy: str) -> py_utils.NestedMap:
"""CPU evaluation embedding lookup for SparseTensors.
Args:
ids_map: A NestedMap of nested `input_key` string -> [batch, ...] int
SparseTensor.
partition_strategy: See TPUEmbeddingLayer partition_strategy param.
Returns:
An activations NestedMap of nested string -> float32 Tensor.
For non-sequence embeddings: [batch, 1, embedding_dim]
For sequence embeddings: [batch, max_sequence_length, embedding_dim]
"""
if self.max_sequence_length > 0:
# "Sequence embedding", no combiner case
def _Lookup(ids):
# Sparse to dense.
dense_ids = tf.sparse.to_dense(ids, default_value=-1)
return self._SequenceEmbLookup(dense_ids, partition_strategy)
return ids_map.Transform(_Lookup)
else:
# Non-"Sequence embedding", combiner case
return ids_map.Transform(
lambda ids: self._CombinerEmbLookup(ids, partition_strategy))
class TPUEmbeddingLayer(base_layer.BaseLayer):
"""Monolithic interface to TPU embedding.
This layer has some important caveats, due to the interface of the
TPU embedding hardware. Its behavior most closely mimics that of
tf.nn.embedding_lookup_sparse.
Supports multiple tables and multiple input_keys per table.
Requires its own optimizer parameters.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_tpu_hosts', 0, 'Total number of TPU hosts.')
p.Define('tables', None, 'TPUEmbeddingTables')
p.Define('pipeline_execution_with_tensor_core', False,
'Set to True to be faster. See tpu_embedding.py for details.')
p.Define('batch_size', 0, 'Per-core batch size.')
p.Define(
'optimizer', TPUEmbeddingAdagradOptimizer.Params(),
'Layer optimizer parameters. Will be used for any TPUEmbeddingTables '
'with None optimizer parameters.')
p.Define('learning_rate', 0.0, 'Learning rate.')
p.Define(
'lr_schedule', schedule.ContinuousSchedule.Params(),
'Lingvo learning rate schedule. Will be multiplied to learning rate.')
p.Define(
'partition_strategy', 'div', 'A string, either "mod" or "div", '
'specifying how to map the lookup id to the embedding tensor. For '
'more information see `tf.nn.embedding_lookup_sparse`.')
p.Define(
'gradient_multiplier_schedule', schedule.ConstantOne.Params(),
'Values from this schedule will be multiplied to the embedding '
'gradients. Gradients from Tensorcore will not be affected.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.tables
assert p.batch_size > 0
assert p.name
assert p.gradient_multiplier_schedule
assert p.partition_strategy in ['mod', 'div']
if p.num_tpu_hosts > 0:
for table_params in p.tables:
num_tpu_hosts = table_params.num_tpu_hosts
if num_tpu_hosts > 0 and num_tpu_hosts != p.num_tpu_hosts:
raise ValueError(
f'num_tpu_hosts mismatch: {num_tpu_hosts} vs {p.num_tpu_hosts}')
table_params.num_tpu_hosts = p.num_tpu_hosts
else:
num_tpu_hosts = p.tables[0].num_tpu_hosts
assert all([t.num_tpu_hosts == num_tpu_hosts for t in p.tables])
# Stop if a table has no optimizer related parameters and the layer also
# has no optimizer parameters
for param_name in ['optimizer', 'learning_rate', 'lr_schedule']:
table_param_missing = any(
table_params.Get(param_name) is None for table_params in p.tables)
if not p.Get(param_name) and table_param_missing:
raise ValueError(
f'A table is missing {param_name} parameters, and no layer-level '
f'{param_name} parameters were given.')
elif table_param_missing:
for table_params in p.tables:
if table_params.Get(param_name) is None:
value = p.Get(param_name)
if isinstance(value, hyperparams.Params):
value = value.Copy() # Avoid mutating the original copy.
table_params.Set(**{param_name: value})
self.CreateChildren('tables', p.tables)
self.CreateChild('gradient_multiplier_schedule',
p.gradient_multiplier_schedule)
self._tpu_embedding_collection = TpuEmbeddingCollection.Get()
# Save embedding feature names in the collection.
feature_names = set()
for table in self.tables:
for feature in table.input_keys:
if feature in feature_names:
raise ValueError(f'Input key {feature} was used by multiple tables.')
feature_names.add(feature)
self._tpu_embedding_collection.feature_names = feature_names
def _child_variable_scope_override(self):
return {**super()._child_variable_scope_override(), 'tables': []}
def _CheckTPUEmbeddingConfig(self, tpu_embedding, table_to_config_dict,
feature_to_config_dict, global_batch_size):
"""Check that the existing tpu_embedding config matches the given ones."""
def _Match(d1, d2, namedtuple_attrs_to_check):
if len(d1) != len(d2):
return False
for k, v1 in d1.items():
if k not in d2:
return False
v2 = d2[k]
for attr in namedtuple_attrs_to_check:
if getattr(v1, attr) != getattr(v2, attr):
return False
return True
# We just check numeric/string settings for simplicity, this excludes things
# like learning_rate_fn, optimization_parameters, etc since it's hard to
# compare them.
if not _Match(tpu_embedding.table_to_config_dict, table_to_config_dict,
['vocabulary_size', 'dimension', 'combiner']):
raise ValueError('table_to_config_dict mismatch. '
f'Expecting {tpu_embedding.table_to_config_dict}, '
f'got {table_to_config_dict}')
if not _Match(tpu_embedding.feature_to_config_dict, feature_to_config_dict,
['table_id', 'max_sequence_length']):
raise ValueError('feature_to_config_dict mismatch. '
f'Expecting {tpu_embedding.feature_to_config_dict}, '
f'got {feature_to_config_dict}')
if (tpu_embedding.batch_size_per_core * tpu_embedding.num_cores !=
global_batch_size):
raise ValueError(
'global_batch_size mismatch. '
f'batch_size_per_core: {tpu_embedding.batch_size_per_core}, '
f'num_cores: {tpu_embedding.num_cores}, '
f'global_batch_size: {global_batch_size}')
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
# At the feature level, track which are associated
# with "sequence embeddings".
self._sequence_features = {}
if _IsTpuTraining(p):
num_cores = self.cluster.params.worker.tpus_per_replica
global_batch_size = (
self.params.batch_size * self.cluster.num_splits_per_client)
table_to_config_dict = {}
feature_to_config_dict = {}
for table in self.tables:
table_to_config_dict[table.table_name] = table.table_config
for feature in table.input_keys:
if table.max_sequence_length > 0:
self._sequence_features[feature] = True
feature_to_config_dict[feature] = tpu_embedding_lib.FeatureConfig(
table.table_name, max_sequence_length=table.max_sequence_length)
tpu_embedding = self._tpu_embedding_collection.tpu_embedding
if tpu_embedding:
self._CheckTPUEmbeddingConfig(tpu_embedding, table_to_config_dict,
feature_to_config_dict, global_batch_size)
tf.logging.info('TPUEmbedding API singleton already exists, reusing')
self._tpu_embedding = tpu_embedding
else:
mode = tpu_embedding_lib.TRAINING
device_config = tpu_embedding_lib.DeviceConfig(
num_cores=num_cores,
num_hosts=self.params.tables[0].num_tpu_hosts,
job_name=self.cluster.params.worker.name)
self._tpu_embedding = tpu_embedding_lib.TPUEmbedding(
table_to_config_dict,
feature_to_config_dict,
global_batch_size,
mode,
master=None,
pipeline_execution_with_tensor_core=(
self.params.pipeline_execution_with_tensor_core),
partition_strategy=p.partition_strategy,
device_config=device_config)
self._tpu_embedding_collection.tpu_embedding = self._tpu_embedding
self._tpu_embedding_collection.SetGradientMultiplierSchedule(
self.gradient_multiplier_schedule)
def _TpuEmbLookup(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:
"""TPU Embedding lookup."""
task_call_scope = py_utils.GetTaskCallScope()
activations = self._tpu_embedding_collection.AddActivations(task_call_scope)
ret = py_utils.NestedMap()
for k, v in activations.items():
if ids_map.Get(k) is not None:
if k in self._sequence_features:
ret.Set(k, v)
else:
# Non-sequence embeddings, we fill the "time" dimension with 1.
with tf.name_scope(k):
ret.Set(k, tf.expand_dims(v, axis=[1]))
return ret
def EmbLookup(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:
"""Looks up embedding vectors for each entry in dense Tensor ids_map.
Since the TPUEmbedding is monolothic, and consulted once per
FProp/BProp, we must centralize the lookup. Thus, for multiple
features, we contain them into a single-lookup rather than allowing
the caller to call Lookup multiple times.
Args:
ids_map: A NestedMap of nested `input_key` string -> [batch, sequence] int
Tensor.
For sequence embeddings, -1 is used as a padding id. Non-sequence
embeddings do not support padded ids.
Returns:
Activations NestedMap of nested string ->
For non-sequence embeddings: [batch, 1, embedding_dim],
For sequence embeddings: [batch, max_sequence_length, embedding_dim]
float32 Tensor.
"""
assert isinstance(ids_map, py_utils.NestedMap)
p = self.params
def CpuEmbLookup(ids_map):
"""CPU evaluation embedding lookup."""
rets = py_utils.NestedMap()
for table in self.tables:
table_id_map = py_utils.NestedMap()
for key in table.input_keys:
if ids_map.Get(key) is not None:
table_id_map.Set(key, ids_map.GetItem(key))
table_rets = table.CpuEmbLookup(table_id_map, p.partition_strategy)
# Merge table_rets with rets
for key in table.input_keys:
if ids_map.Get(key) is not None:
rets.Set(key, table_rets.GetItem(key))
return rets
if _IsTpuTraining(p):
return self._TpuEmbLookup(ids_map)
else:
return CpuEmbLookup(ids_map)
def EmbLookupSparse(self, ids_map: py_utils.NestedMap) -> py_utils.NestedMap:
"""Looks up embedding vectors for each entry in SparseTensor ids_map.
Since the TPUEmbedding is monolothic, and consulted once per
FProp/BProp, we must centralize the lookup. Thus, for multiple
features, we contain them into a single-lookup rather than allowing
the caller to call Lookup multiple times.
Args:
ids_map: A NestedMap of nested `input_key` string -> [batch, ...] int
SparseTensor.
Returns:
Activations NestedMap of nested string ->
For non-sequence embeddings: [batch, 1, embedding_dim],
For sequence embeddings: [batch, max_sequence_length, embedding_dim]
float32 Tensor.
"""
assert isinstance(ids_map, py_utils.NestedMap)
p = self.params
def CpuEmbLookupSparse(ids_map):
"""CPU evaluation embedding lookup."""
rets = py_utils.NestedMap()
for table in self.tables:
table_id_map = py_utils.NestedMap()
for key in table.input_keys:
if ids_map.Get(key) is not None:
table_id_map.Set(key, ids_map.GetItem(key))
table_rets = table.CpuEmbLookupSparse(table_id_map,
p.partition_strategy)
# Merge table_rets with rets
for key in table.input_keys:
if ids_map.Get(key) is not None:
rets.Set(key, table_rets.GetItem(key))
return rets
if _IsTpuTraining(p):
return self._TpuEmbLookup(ids_map)
else:
return CpuEmbLookupSparse(ids_map)
|
|
from datetime import datetime
from Queue import Queue, Empty
from flask import request, jsonify, Response
from flexget.options import get_parser
from flexget.api import api, APIResource
from flexget.utils import json
from json import JSONEncoder
from flexget.event import event
from flexget.utils.lazy_dict import LazyLookup
execution_api = api.namespace('execution', description='Execute tasks')
def _task_info_dict(task):
return {
'id': int(task.id),
'name': task.name,
'current_phase': task.current_phase,
'current_plugin': task.current_plugin,
}
task_info_schema = {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'name': {'type': 'string'},
'current_phase': {'type': 'string'},
'current_plugin': {'type': 'string'},
}
}
execution_results_schema = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': task_info_schema,
}
}
}
execute_task_schema = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': {'type': 'string'}
},
'opt': {'type': 'string'},
}
}
execution_api_result_schema = api.schema('execution_result', execution_results_schema)
execute_api_task_schema = api.schema('execute_task', execute_task_schema)
@execution_api.route('/queue/')
class ExecutionQueueAPI(APIResource):
@api.response(200, 'Show tasks in queue for execution', execution_api_result_schema)
def get(self, session=None):
""" List task executions """
tasks = [_task_info_dict(task) for task in self.manager.task_queue.run_queue.queue]
if self.manager.task_queue.current_task:
tasks.insert(0, _task_info_dict(self.manager.task_queue.current_task))
return jsonify({'tasks': tasks})
@execution_api.route('/execute/')
@api.doc(description='Wildcards supported ie: TV* will execute all tasks with TV in the name')
class ExecutionAPI(APIResource):
@api.validate(execute_api_task_schema)
@api.response(400, 'invalid options specified')
@api.response(200, 'List of tasks queued for execution')
def post(self, session=None):
""" Execute task(s) """
options = request.json or {}
options_string = options.pop('options_string', '')
if options_string:
try:
options['options'] = get_parser('execute').parse_args(options_string, raise_errors=True)
except ValueError as e:
return {'error': 'invalid options_string specified: %s' % e.message}, 400
tasks = [{'id': task_id, 'name': task_name}
for task_id, task_name, task_event
in self.manager.execute(options=options)]
return {'tasks': tasks}
class ExecuteQueue(Queue):
""" Supports task log streaming by acting like a file object """
def write(self, s):
self.put(json.dumps({'log': s}))
stream_parser = api.parser()
stream_parser.add_argument('progress', type=bool, required=False, default=True, help='Include task progress updates')
stream_parser.add_argument('summary', type=bool, required=False, default=True, help='Include task summary')
stream_parser.add_argument('log', type=bool, required=False, default=False, help='Include execution log')
stream_parser.add_argument('entry_dump', type=bool, required=False, default=False, help='Include dump of entries including fields')
_streams = {}
@execution_api.route('/execute/stream/')
@api.doc(description='Wildcards supported ie: TV* will execute all tasks with TV in the name')
class ExecutionAPIStream(APIResource):
@api.validate(execute_api_task_schema)
@api.response(400, 'invalid options specified')
@api.response(200, 'Execution stream with task progress and/or log')
@api.doc(parser=stream_parser)
def post(self, session=None):
""" Execute task(s) and stream results """
options = request.json or {}
args = stream_parser.parse_args()
options_string = options.pop('options_string', '')
if options_string:
try:
options['options'] = get_parser('execute').parse_args(options_string, raise_errors=True)
except ValueError as e:
return {'error': 'invalid options_string specified: %s' % e.message}, 400
queue = ExecuteQueue()
output = queue if args['log'] else None
tasks_queued = []
for task_id, task_name, task_event in self.manager.execute(options=options, output=output):
tasks_queued.append({'id': task_id, 'name': task_name, 'event': task_event})
_streams[task_id] = {
'queue': queue,
'last_update': datetime.now(),
'args': args
}
def stream_response():
# First return the tasks to execute
yield '{"stream": ['
yield json.dumps({'tasks': [{'id': task['id'], 'name': task['name']} for task in tasks_queued]}) + ',\n'
while True:
try:
yield queue.get(timeout=1) + ',\n'
continue
except Empty:
pass
if queue.empty() and all([task['event'].is_set() for task in tasks_queued]):
for task in tasks_queued:
del _streams[task['id']]
break
yield '{}]}'
return Response(stream_response(), mimetype='text/event-stream')
class EntryDecoder(JSONEncoder):
def default(self, o):
if isinstance(o, LazyLookup):
return '<LazyField>'
try:
return JSONEncoder.default(self, o)
except TypeError:
return str(o)
_phase_percents = {
'input': 5,
'metainfo': 10,
'filter': 30,
'download': 40,
'modify': 65,
'output': 75,
'exit': 100,
}
def update_stream(task, status='pending'):
if task.current_phase in _phase_percents:
task.stream['percent'] = _phase_percents[task.current_phase]
progress = {
'status': status,
'phase': task.current_phase,
'plugin': task.current_plugin,
'percent': task.stream.get('percent', 0)
}
task.stream['queue'].put(json.dumps({'progress': {task.id: progress}}))
@event('task.execute.started')
def start_task(task):
task.stream = _streams.get(task.id)
if task.stream:
update_stream(task, status='running')
@event('task.execute.completed')
def finish_task(task):
if task.stream:
update_stream(task, status='complete')
if task.stream['args']['entry_dump']:
entries = [entry.store for entry in task.entries]
task.stream['queue'].put(EntryDecoder().encode({'entry_dump': {task.id: entries}}))
if task.stream['args']['summary']:
task.stream['queue'].put(json.dumps({
'summary': {
task.id: {
'accepted': len(task.accepted),
'rejected': len(task.rejected),
'failed': len(task.failed),
'undecided': len(task.undecided),
'aborted': task.aborted,
'abort_reason': task.abort_reason,
}
}
}))
@event('task.execute.before_plugin')
def track_progress(task, plugin_name):
if task.stream:
update_stream(task, status='running')
|
|
#!/usr/bin/env python2.7
import h5py, os, sys
import cStringIO as StringIO
from Bio import SeqIO
from fast5tools.f5class import *
from fast5tools.f5ops import *
import argparse
from glob import glob
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fast5 file(s) and/or directories of fast5s, return fasta, fastq, qual, or intqual for all fast5s found.
For files that are corrupt or empty, for now it silently skips them.
As an alternative, fast5stats will tell you all files skipped (in stderr or to specified file).
John Urban (2015, 2016, 2017)
TODO (11/17/2017): Allow customized name design with options for readtype, len, Q, channel, read num, asic, abspath, filename, etc
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fast5', metavar='fast5', nargs='+',
type= str,
help='''Paths to as many fast5 files and/or directories filled with fast5 files as you want.
Assumes all fast5 files have '.fast5' extension.
If inside dir of dirs with .fast5 files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-r', '--readtype', default="mol",
type= str,
help='''Choose type of fasta to get.
Choices: 'template', 'complement', '2d', 'molecule', 'all', 'MoleQual'.
Default: molecule.
There is no need to write full word for options - can do: t, c, 2, m, a, M.
Molecule returns single fasta for each fast5 by following rules:
if 2d present, return 2d.
elif complement present with no 2d, return longer of template or complement.
elif only template present, return template.
'MoleQual' is similar to molecule.
It differs only in choosing between template and complement when a 2D is not present.
Instead of choosing the longer one, it chooses the one with a higher quality mean quality score.''')
parser.add_argument('-o', '--outtype', type=str, default="fasta",
help = '''Choices: fasta, fastq, qual, intqual, details, falcon, oldfalcon, newfalcon, fasta_readstatsname, fastq_readstatsname, qual_readstatsname.
Additional choices:
Add _with_abspath to fasta/fastq/qual options (and readstatname versions) to add absolute f5 file path to read name.
Add _with_filename to fasta/fastq/qual options (and readstatname versions) to add only the basename of each f5 file (excluding fast5 extension) to read name.
If only want abs path in name, add _only_abspath to fasta/fastq/qual options (and readstatname versions).
If only want basename of file in read name, add _only_filename to fasta/fastq/qualoptions (and readstatname versions).
Default: fasta.
If details, sequence not reported, but name, seqlen, and meanq are.
falcon/oldfalcon/newfalcon output fasta files that are compatible with FALCON assembler.
falcon and oldfalcon put out the same thing and might be safest choice as it should work with old and new FALCON versions.
newfalcon will only work with latest FALCON versions.
The real issue is fasta2DB, which is particular about fasta headers.
In older versions, it only allowed data from 1 SMRT cell per file.
Now it allows multiple SMRT cells per file,
only if all data from a given SMRT cell are grouped together.
NOTE: if 'all' is used, for now each will be given the same well number.
This could potentially have the side effect of using only the longest read in falcon,
if '-a' is not used in DBsplit.
To avoid this, just use '--outtype fasta', then use filterFast5DerivedFastx.py
to convert the nanopore fasta headers to falcon-compatible fasta headers.''')
parser.add_argument('--minlen', type=int, default=0, help='''Only report reads >= minlen. Default: 0 bp.''')
parser.add_argument('--maxlen', type=int, default=int(3e9), help='''Only report reads <= maxlen. Default: 3 billion bp.''')
parser.add_argument('--minq', type=float, default=0, help='''Only report reads with mean quality scores >= Q. Default: 0.''')
parser.add_argument('--maxq', type=float, default=int(10e3), help='''Only report reads with mean quality scores <= Q.
Default: 10000 (this is orders of magnitude higher than normal max which are always < 20)''')
parser.add_argument('--notarlite', action='store_true', default=False, help=''' The default methof (called tarlite) extracts 1 file from a given tarchive at a time, processes, and deletes it.
This options says to turn tarlite off resulting in extracting entire tarchive before proceeding (and finally deleting).
It is possible that --notarlite is faster, but at the expense of exceeding file number limits or disk storage quotas.
Nonetheless, the difference in speed is a lot smaller than the difference in space needed.
For example, not using tarlite will require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
A corollary is that tarlite just needs to be allowed to form 1 (or a few) files compared to what could be thousands to millions.
''')
parser.add_argument('--tarlite', action='store_true', default=False, help='''This legacy option is outdated.
However, it is kept here to avoid breaking pipelines that make use of it.
The tarlite approach is now default. Specifying this will not change that default behavior.
It will just prevent pipelines from breaking.
However, not specifying this will still also result in the tarlite approach.
Use --notarlite to turn it off.''')
parser.add_argument('-c', '--comments', type=str, default=False, help='''Add comments to fastx names.
Comments are separated from main name (following > or @) with a tab.
Default: no comments/False.
Leave any desired string here (you may need to enclode in quotes is there are spaces).
Alternatively, specify one of the following options:
base_info
pore_info
read_stats
event_stats
read_event_stats
''')
parser.add_argument('-S', '--samflag', action='store_true', default=False, help='''Add sam flag to comments.
''')
args = parser.parse_args()
#################################################
## deal with some of the arguments
#################################################
legalouts = ("fasta", "fastq", "qual", "intqual", "details", "falcon", "oldfalcon", "newfalcon", "fasta_with_abspath", "fasta_only_abspath","fastq_with_abspath", "fastq_only_abspath", "qual_with_abspath", "qual_only_abspath", "fasta_with_filename", "fasta_only_filename","fastq_with_filename", "fastq_only_filename", "qual_with_filename", "qual_only_filename")
legalouts += ("fasta_readstatsname", "fasta_readstatsname_with_abspath", "fasta_readstatsname_with_filename")
legalouts += ("fastq_readstatsname", "fastq_readstatsname_with_abspath", "fastq_readstatsname_with_filename")
legalouts += ("qual_readstatsname", "qual_readstatsname_with_abspath", "qual_readstatsname_with_filename")
assert args.outtype in legalouts
## PUTTING IN f5ops as assert_readtype() AND replacing here with assert_readtype() -- 12/8/17 -- if it still works, delete this
##assert args.readtype[0] in "tc2maM"
##if args.readtype[0] == "t":
## args.readtype = "template"
##elif args.readtype[0] == "c":
## args.readtype = "complement"
##elif args.readtype[0] == "2":
## args.readtype = "2d"
##elif args.readtype[0] == "m":
## args.readtype = "molecule"
##elif args.readtype[0] == "a":
## args.readtype = "all"
##elif args.readtype[0] == "M":
## args.readtype = "MoleQual"
args.readtype = assert_readtype(args.readtype, legaloptions="tc2maM")
#################################################
### uses output functions from f5ops.py
### fasta(), fastq(), qual(), intqual()
###
#################################################
#################################################
## fast5tofastx specific "output" functions
#################################################
## PUTTING IN f5ops -- 12/8/17 -- if it still works, delete this
##def details(f5, readtype):
## readstats = []
## readstats.append( f5._get_pore_info_name(readtype) )
## readstats.append( f5.get_seq_len(readtype) )
## readstats.append( f5.get_mean_qscore(readtype) )
## readstats.append( f5.get_num_events(readtype) )
## try:
## readstats.append( f5.get_num_called_events(readtype) )
## except:
## readstats.append("-")
## try:
## readstats.append( f5.get_num_skips(readtype) )
## except:
## readstats.append("-")
## try:
## readstats.append( f5.get_num_stays(readtype) )
## except:
## readstats.append("-")
## return ("\t").join([str(e) for e in readstats])
#################################################
####### argument processing functions ###########
#################################################
## PUTTING IN f5ops -- 12/8/17 -- if it still works, delete this
##def get_fast5tofastx_fxns(args):
## ### get outtype fxn ###
## if args.outtype == "fasta":
## output = fasta
## elif args.outtype == "fastq":
## output = fastq
## elif args.outtype == "qual":
## output = qual
## elif args.outtype == "intqual":
## output = intqual
## elif args.outtype == "details":
## output = details
## elif args.outtype == "falcon" or args.outtype == "oldfalcon":
## output = oldfalcon
## elif args.outtype == "newfalcon":
## output = newfalcon
## elif args.outtype == "fasta_with_abspath":
## output = fasta_with_abspath
## elif args.outtype == "fasta_only_abspath":
## output = fasta_only_abspath
## elif args.outtype == "fastq_with_abspath":
## output = fastq_with_abspath
## elif args.outtype == "fastq_only_abspath":
## output = fastq_only_abspath
## elif args.outtype == "qual_with_abspath":
## output = qual_with_abspath
## elif args.outtype == "qual_only_abspath":
## output = qual_only_abspath
## #
## elif args.outtype == "fasta_with_filename":
## output = fasta_with_filename
## elif args.outtype == "fasta_only_filename":
## output = fasta_only_filename
## elif args.outtype == "fastq_with_filename":
## output = fastq_with_filename
## elif args.outtype == "fastq_only_filename":
## output = fastq_only_filename
## elif args.outtype == "qual_with_filename":
## output = qual_with_filename
## elif args.outtype == "qual_only_filename":
## output = qual_only_filename
## #
## elif args.outtype == "fasta_readstatsname":
## output = fasta_readstatsname
## elif args.outtype == "fasta_readstatsname_with_abspath":
## output = fasta_readstatsname_with_abspath
## elif args.outtype == "fasta_readstatsname_with_filename":
## output = fasta_readstatsname_with_filename
##
## elif args.outtype == "fastq_readstatsname":
## output = fastq_readstatsname
## elif args.outtype == "fastq_readstatsname_with_abspath":
## output = fastq_readstatsname_with_abspath
## elif args.outtype == "fastq_readstatsname_with_filename":
## output = fastq_readstatsname_with_filename
##
## elif args.outtype == "qual_readstatsname":
## output = qual_readstatsname
## elif args.outtype == "qual_readstatsname_with_abspath":
## output = qual_readstatsname_with_abspath
## elif args.outtype == "qual_readstatsname_with_filename":
## output = qual_readstatsname_with_filename
##
## ### get readtype fxn ###
## if args.readtype == "template":
## getread = get_template_read
## elif args.readtype == "complement":
## getread = get_complement_read
## elif args.readtype == "2d":
## getread = get_2d_read
## elif args.readtype == "molecule":
## getread = get_molecule_read
## elif args.readtype == "all":
## getread = get_all_reads
## elif args.readtype == "MoleQual":
## getread = get_molequal_read
## return output, getread
#################################################
#### EXECUTE @@@@@@@@@@@@
#################################################
if __name__ == "__main__":
output, getread = get_fast5tofastx_fxns(args.outtype, args.readtype)
samflag=""
if args.samflag:
samflag = "F5:Z:"
falcon_i = 0
for f5 in Fast5List(args.fast5, keep_tar_footprint_small=(not args.notarlite)):
if f5.is_not_corrupt() and f5.is_nonempty:
## counter in case using falcon options
falcon_i += 1
## Process args.comments
read = getread(f5, args.minlen, args.maxlen, args.minq, args.maxq, output, comments=args.comments, falcon_i=falcon_i, samflag=samflag)
if read:
print read
|
|
# Spam detection algorithm in spirit of belief propagation (like Karger's algo).
# Use's reliability is computed using Dirichlet dist.
import numpy as np
ALGO_DIRICHLET_KARMA_USER_VOTE = 0.1
DEBUG = False
def compute_percentile_dirichlet(neg, pos, percentile):
""" Numerically computes percentile of Dirichlet distribution.
Percentile is between 0 and 1.
"""
# alpha is a number of "Truth"
# beta is a number of "False"
alpha, beta = pos, abs(neg)
# Sanity check for testing purposes.
if alpha > 1000000 or beta > 1000000:
raise Exception("Alpha or Beta is too big!!!")
# First, numerically compute unnormalised probability mass function.
delta = 0.0001
x = np.arange(0 + delta, 1, delta)
y = x ** (alpha) * (1 - x) ** (beta)
# Integral approximation based on trapezoidal rule.
y1 = y[:-1]
y2 = y[1:]
integral_vec = (y2 + y1) / 2 * delta
integral = np.sum(integral_vec)
cumsum = np.cumsum(integral_vec)
threshold = (1 - percentile) * integral
idx = cumsum.searchsorted(threshold)
val = idx * delta
return val
def get_reliability(u_n, u_p):
perc = 0.8
# todo(michael): mid_point determines what is default attitude towards
# a user. If it is 0 then we treat user with no feedback positively.
# If we compute mod_point as percentile of 0,0 case, then user would
# have any impact only if it has some agreement with other users.
# todo(michael): Another important property in a case when a new user
# has zero reliability is that user's reliability converges to zero.
# Understand better this behaviour.
#mid_point = compute_percentile_dirichlet(0, 0, perc)
mid_point = 0
val = compute_percentile_dirichlet(u_n, u_p, perc)
val = max(0, val - mid_point)
val = (val / (1 - mid_point)) ** 2
return val
def get_item_weight(c_n, c_p):
perc = 0.8
mid_point = compute_percentile_dirichlet(0, 0, perc)
val = compute_percentile_dirichlet(c_n, c_p, perc)
return val - mid_point
def neg_first(val1, val2):
"""A helper function which returns a tuple of original values.
It puts negative item on the first place"""
# Sanity check.
if val1 * val2 > 0:
raise Exception("Number should have opposite sign.")
if val1 < 0:
return val1, val2
if val2 < 0:
return val2, val1
# If we reached this point than one values is at leas zero and
# it should go first.
if val1 > 0:
return val2, val1
return val1, val2
class Item(object):
def __init__(self, item_id):
self.id = item_id
# c_p is a sum of positive signals sent towards the item
self.c_p = 0
# c_n is a sum of negative signals sent towards the item
self.c_n = 0
self.weight = 0
# A list of messages from users.
self.msgs = []
def __repr__(self):
return '<Item %s, weight %s >' % (self.id, self.weight)
class User(object):
def __init__(self, user_id, base_u_n=0, base_u_p=0):
self.id = user_id
self.base_u_n = base_u_n
self.base_u_p = base_u_p
self.u_n = base_u_n
self.u_p = base_u_p
self.reliability = get_reliability(base_u_n, base_u_p)
# answers is a dictionary of user's flags/votes: it maps item id to
# answer A by the user.
# In terms of spam/ham: if A is positive then the item is ham and if A
# is negative it is spam.
self.answers = {}
# A list of messages from items.
self.msgs = []
def __repr__(self):
return '<User %s, reliability %s>' % (self.id, self.reliability)
class Message_to_user(object):
""" A message from item to user."""
def __init__(self, item_id, c_n, c_p):
self.item_id = item_id
self.c_n = c_n
self.c_p = c_p
def __repr__(self):
return "Message from item %s, c_n = %s, c_p = %s" % (self.item_id,
self.c_n, self.c_p)
class Message_to_item(object):
""" A message from user to item."""
def __init__(self, user_id, value):
self.user_id = user_id
self.value = value
def __repr__(self):
return "Message from user %s, value = %s " % (self.user_id, self.value)
class Graph(object):
def __init__(self):
self.users = []
self.items = []
self.item_dict = {}
self.user_dict = {}
# normalization is to normalize user's reliability.
self.normaliz = 1
def __repr__(self):
s = 'Graph \n'
for u in self.users:
s = '%s %s\n' % (s, u)
for it in self.items:
s = '%s %s\n' % (s, it)
for u in self.users:
for it_id in u.answers:
s = '%s user %s -> item %s, value %s \n' % (s, u.id, it_id,
u.answers[it_id])
return s
def add_answer(self, user_id, item_id, answer, base_u_n=0, base_u_p=0):
""" Method adds answer to dictionary user.answers. If user or item
with give ids does not exist then the method creates it.
"""
u = self.user_dict.get(user_id)
if not u:
u = User(user_id, base_u_n, base_u_p)
self.users.append(u)
self.user_dict[user_id] = u
it = self.item_dict.get(item_id)
if not it:
it = Item(item_id)
self.items.append(it)
self.item_dict[item_id] = it
# Adds answer
u.answers[it.id] = answer
def get_item(self, item_id):
return self.item_dict.get(item_id)
def get_user(self, user_id):
return self.user_dict.get(user_id)
def _propagate_from_users(self):
for it in self.items:
it.msgs = []
# For each user computes u_n and u_p over ALL item.
# To get u_n and u_p for a particular item we need to subtract a value
# related to the item from u_n and u_p.
for u in self.users:
u.u_n = u.base_u_n
u.u_p = u.base_u_p
for msg in u.msgs:
A = u.answers[msg.item_id]
val_n, val_p = neg_first(msg.c_n * np.sign(A), msg.c_p * np.sign(A))
u.u_n += val_n
u.u_p += val_p
u.reliability = get_reliability(u.u_n, u.u_p)
# Okay, now we send messages to items and compute user reliability
for u in self.users:
# Sends messages to items.
for msg in u.msgs:
A = u.answers[msg.item_id]
val_n, val_p = neg_first(msg.c_n * np.sign(A), msg.c_p * np.sign(A))
reliab = get_reliability(u.u_n - val_n, u.u_p - val_p)
# Gets item.
it = self.item_dict[msg.item_id]
it.msgs.append(Message_to_item(u.id, A * reliab))
def _propagate_from_items(self):
for u in self.users:
u.msgs = []
for it in self.items:
it.c_n, it.c_p = 0, 0
for msg in it.msgs:
u = self.user_dict[msg.user_id]
#val = u.answers[it.id] * u.reliability
#val = u.answers[it.id] * msg.value
val = msg.value
if val < 0:
it.c_n += val
else:
it.c_p += val
# Sends messages to users.
for msg in it.msgs:
u = self.user_dict[msg.user_id]
#val = u.answers[it.id] * u.reliability
#val = u.answers[it.id] * msg.value
val = msg.value
c_n = it.c_n
c_p = it.c_p
if val < 0:
c_n -= val
else:
c_p -= val
u.msgs.append(Message_to_user(it.id, c_n, c_p))
def _compute_items_weight(self):
for it in self.items:
it.weight = get_item_weight(it.c_n, it.c_n)
def _aggregate_items(self):
""" Aggregates information for items """
for it in self.items:
it.c_n, it.c_p = 0, 0
for msg in it.msgs:
u = self.user_dict[msg.user_id]
val = u.answers[it.id] * u.reliability
if val < 0:
it.c_n += val
else:
it.c_p += val
it.weight = get_item_weight(it.c_n, it.c_p)
def compute_answers(self, k_max):
# Sends the initial messages from users to items.
for it in self.items:
it.msgs = []
for u in self.users:
for it_id in u.answers.iterkeys():
it = self.item_dict[it_id]
msg = Message_to_item(u.id, u.answers[it_id])
it.msgs.append(msg)
# Runs main iterations.
if DEBUG:
for it in self.items:
for msg in it.msgs:
print msg
for i in xrange(k_max):
if DEBUG:
print ''
print 'iteration', i
self._propagate_from_items()
if DEBUG:
for u in self.users:
for msg in u.msgs:
print msg
self._propagate_from_users()
if DEBUG:
for it in self.items:
for msg in it.msgs:
print msg
# Aggregating item information.
self._aggregate_items()
|
|
#!/usr/bin/env python
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parameterizable simulated annealing procedure on the parameter space of
res_factor, lat_factor, bw_factor. To find the best parameter setting of
the algorithm on the given topology. Algorithm settings are evaluated based
on the different kind of stress tests.
"""
import sys, getopt, traceback, threading, time, math, random, logging
import StressTest as st
import multiprocessing as mp
helpmsg = """SimulatedAnnealing.py usage:
-h Print this help message.
-o Output file for logging visualizable parameters of the
procedure.
--neighbor_cnt=i Sets the number of neighboring parameter space points to examine
All of them are evaluated in separate threads.
Default value is 4.
--stepsize=f Sets the step size in the parameter space. Parameter intervals
--maxstep=i are [0.0, 3.0]. And the maximal number of how many of these
steps can be taken at a time.
--seed=i Random seed is needed for generating the probability.
Determines the step sequence and step length sequence of the
annealing. Default value is system time.
--test_seed=i Needed to make the test requirement sequences deterministic.
--start_bw=f Exactly two of the three parameters should be given as the
--start_res=f starting point for the annealing. The three parameters are
summed to 3.0.
--start_temp=f Sets the starting temerature.
--temp_step=f Sets the decrementation of temperature during one step.
--maxidle=i Defines how many iterations without moving to other parameter
points should cause the algorithm to exit.
--maxiters=i The number of iterations to complete at most.
--test_types= Defines which test sequences shall be used to evaluate a
parameter point. All of them launches an extra thread per
paralelly examined neighboring point. Possible comma separated
values (in any combinations) are:
single: only disjoint SAP-SAP chains are generated one by one.
multi: more chains are generated, VNF-s can be shared among
chains of the same chain batch.
shared: single chains are generated in each step, but VNF-s
can be shared with any chain mapped earlier in the
test sequence.
--prob_exponent_multiplier=f
Controls the exponent of the probablity generating function.
The smaller it is, the longer the probability to accept worse
points stays high.
"""
log = logging.getLogger("SimulatedAnnealing")
log.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(name)s:%(message)s')
def evaluatePoint(bw, res, test_seed, error_file, queue=None,
shortest_paths=None, single=False, multi=False, shared=False):
"""
If shortest_paths is None than, the calculated paths are sent back with the
resulting point value as a tuple.
NOTE: the same file can be written here simulaniously, but it is only used
in case an exception is thrown and that is logged there.
"""
if not single and not multi and not shared:
raise Exception("Point evaluation cannot be started because no desired test"
" sequence were selected!")
lat = 3.0 - bw - res
shortest_paths_calced = shortest_paths
log.debug("Examination of point %s %s started..."%(bw,res))
shortest_paths_sendback = shortest_paths is None
# TODO: refactor this: save params to tuple and dict and give it to a for cycle
# to start the function in parallel or sequenctual.
if single:
single_test = mp.Queue()
if shortest_paths is not None:
mp.Process(target=st.StressTestCore, args=(test_seed, False, 0.0, False, 0,
False, bw, res, lat, error_file),
kwargs={'queue':single_test,
'shortest_paths_precalc':shortest_paths_calced,
'filehandler':handler}).start()
else:
# With shortest path not given, but queue given, it will return
# shortest_path so we could give it to the next two.
shortest_paths_calced = st.StressTestCore(test_seed, False, 0.0, False, 0,
False, bw, res, lat, error_file,
queue=single_test,
shortest_paths_precalc=None,
filehandler=handler)
if multi:
multi_test = mp.Queue()
if shortest_paths is not None:
mp.Process(target=st.StressTestCore, args=(test_seed, False, 0.3, True, 3,
False, bw, res, lat, error_file),
kwargs={'queue':multi_test,
'shortest_paths_precalc':shortest_paths_calced,
'filehandler':handler}).start()
else:
shortest_paths_calced = st.StressTestCore(test_seed, False, 0.3, True, 3,
False, bw, res, lat, error_file,
queue=multi_test,
shortest_paths_precalc=None,
filehandler=handler)
if shared:
shared_test = mp.Queue()
if shortest_paths is not None:
mp.Process(target=st.StressTestCore, args=(test_seed, False, 0.2, False, 0,
False, bw, res, lat, error_file),
kwargs={'queue':shared_test,
'shortest_paths_precalc':shortest_paths_calced,
'filehandler':handler}).start()
else:
shortest_paths_calced = st.StressTestCore(test_seed, False, 0.2, False, 0,
False, bw, res, lat, error_file,
queue=shared_test,
shortest_paths_precalc=None,
filehandler=handler)
# wait all three test sequences to finish
result_vector = (single_test.get() if single else 0,
multi_test.get() if multi else 0,
shared_test.get() if shared else 0)
for result, test in zip(result_vector, ("single", "multi", "shared")):
if type(result) == str:
log.warn("An exception was thrown by the \"%s\" StressTest: %s"%
(test, result))
if queue is not None:
queue.put(((bw, res), -1.0))
if shortest_paths_sendback:
return -1.0, shortest_paths_calced
else:
return -1.0
# the length of the result vector is the value
value = math.sqrt(reduce(lambda a, b: a+b*b, result_vector, 0))
log.debug("Examination of point %s %s finished, scores are: %s"%
(bw,res,result_vector))
if queue is not None:
queue.put(((bw, res), value))
if shortest_paths_sendback:
return value, shortest_paths_calced
else:
return value
def checkEvalCache(cache, point, delta=0.001):
for p in cache:
if math.fabs(p[0] - point[0]) < delta and math.fabs(p[1] - point[1]) < delta:
return p, cache[p]
return None, None
def rotateVector(v, deg):
x = v[0]
y = v[1]
rad = deg/180.0 * math.pi
return x*math.cos(rad) - y*math.sin(rad), x*math.sin(rad) + y*math.cos(rad)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],"ho:", ["neighbor_cnt=", "stepsize=",
"maxstep=", "test_seed=",
"seed=", "start_res=",
"start_bw=", "start_temp=",
"temp_step=", "maxidle=",
"prob_exponent_multiplier=",
"maxiters=", "test_types="])
baseoutfile = "simulannealing"
stepsize = 0.05
maxstep = 6
bw = None
res = None
maxiters = 30
neighbor_cnt = 4
seed = math.floor(time.time())
test_seed = 0
start_temp = 100
temp_step = 5
maxidle = 10
prob_exponent_multiplier = 0.3
single = False
multi = False
shared = False
for opt, arg in opts:
if opt == "-h":
print helpmsg
sys.exit()
elif opt == "-o":
baseoutfile = arg
elif opt == "--seed":
seed = int(arg)
elif opt == "--test_seed":
test_seed = int(arg)
elif opt == "--stepsize":
stepsize = float(arg)
elif opt == "--maxstep":
maxstep = float(arg)
elif opt == "--start_bw":
bw = float(arg)
elif opt == "--start_res":
res = float(arg)
elif opt == "--neighbor_cnt":
neighbor_cnt = int(arg)
elif opt == "--start_temp":
start_temp = float(arg)
elif opt == "--temp_step":
temp_step = float(arg)
elif opt == "--maxidle":
maxidle = int(arg)
elif opt == "--maxiters":
maxiters = int(arg)
elif opt == "--test_types":
types = arg.split(",")
if "single" in types:
single = True
if "multi" in types:
multi = True
if "shared" in types:
shared = True
elif opt == "--prob_exponent_multiplier":
prob_exponent_multiplier = float(arg)
if bw is None or res is None:
raise Exception("Starting parameters must be given!")
elif bw + res > 3.0:
raise Exception("The sum of params shouldn't get above 3.0!")
except Exception as e:
print traceback.format_exc()
print helpmsg
sys.exit()
itercnt = 0
current = (bw, res)
handler = logging.FileHandler(baseoutfile, 'w',)
log.addHandler(handler)
point_eval_cache = {}
idlehistory = []
for i in range(0,maxidle):
idlehistory.append(False)
# evaluate the starting point and receive the shortest path for speeding up
currvalue, shortest_paths = evaluatePoint(current[0], current[1], test_seed,
baseoutfile, single=single,
multi=multi, shared=shared)
if shortest_paths is None:
raise Exception("The evaluation of starting point and thus shortest path "
"calculation is failed!")
else:
point_eval_cache[current] = currvalue
random.seed(seed)
temperature = start_temp
best = current
bestvalue = currvalue
x = random.random() - 0.5
y = random.random() - 0.5
length = math.sqrt(x*x + y*y)
# this defines the tangent of the net in the parameter space where we will
# continue searching. The annealing can't escape from this net.
netdirection = (x / length * stepsize, y / length * stepsize)
log.debug("Net direction is %s %s"%(netdirection[0], netdirection[1]))
while itercnt <= maxiters:
try:
step_number = random.randint(1, maxstep)
v0 = (step_number*netdirection[0], step_number*netdirection[1])
log.debug("%i: V0 is: %s"%(itercnt,v0))
is_cached = []
threads = []
for i in range(0, neighbor_cnt):
is_cached.append(False)
# save the state of Random module before evaluating the points
randomstate = random.getstate()
# start every points evaluation in separate threads!
results_q = mp.Queue(maxsize = neighbor_cnt)
deg = 360 / float(neighbor_cnt)
log.info("%i:Examining the %s-step neighbors of %s %s"%
(itercnt,step_number,current[0],current[1]))
for i in range(0, neighbor_cnt):
v0_limited = list(v0)
# preventing escaping from parameter space with boundaries.
if current[0]+v0_limited[0] < 0 and math.fabs(v0_limited[0]) > 0.00001:
v0_limited[1] = ((-1 * current[0]) / v0_limited[0]) * v0_limited[1]
v0_limited[0] = -1 * current[0]
if current[1]+v0_limited[1] < 0 and math.fabs(v0_limited[1]) > 0.00001:
v0_limited[0] = ((-1 * current[1]) / v0_limited[1]) * v0_limited[0]
v0_limited[1] = -1 * current[1]
if current[0]+v0[0] + current[1]+v0[1] > 3 and \
(v0_limited[0] + v0_limited[1] > 0.00001 or \
v0_limited[0] + v0_limited[1] < -0.00001):
temp = list(v0_limited)
v0_limited[0] = temp[0] * (3.0 - current[0] - current[1]) / \
(temp[0] + temp[1])
v0_limited[1] = temp[1] * (3.0 - current[0] - current[1]) / \
(temp[0] + temp[1])
point, pvalue = checkEvalCache(point_eval_cache,
(current[0]+v0_limited[0],
current[1]+v0_limited[1]))
if point == None and pvalue == None:
threads.append(
mp.Process(target=evaluatePoint, args=(current[0]+v0_limited[0],
current[1]+v0_limited[1],
test_seed, baseoutfile),
kwargs={'queue':results_q,
'shortest_paths':shortest_paths,
'single':single, 'multi':multi, 'shared':shared}))
threads[-1].start()
else:
log.debug("%i:Point %s %s was cached!"%(itercnt,point[0], point[1]))
results_q.put((point, pvalue))
is_cached[i] = True
v0 = rotateVector(v0, deg)
results_new = []
results_old = []
# wait all evaluations to finish and decide where should we step forward
for i in range(0, neighbor_cnt):
result = results_q.get()
log.debug("%i:Result received from neighbor number %s."%(itercnt,i))
if not is_cached[i]:
point_eval_cache[result[0]] = result[1]
results_new.append(result)
else:
results_old.append(result)
# restore random module state after points are evaluated.
random.setstate(randomstate)
probability = math.e **(-(start_temp*prob_exponent_multiplier)/temperature)
log.debug("%i:Trying neighbors of %s %s..."%(itercnt, current[0],
current[1]))
log.debug("%i:Probability of accepting worse case: %s"%
(itercnt, probability))
result_l = sorted(results_new, key=lambda a: a[1], reverse=True)
result_l.extend(sorted(results_old, key=lambda a: a[1], reverse=True))
for neigh, nvalue in result_l:
# check whether a move of the moveset is better than the current
if nvalue >= currvalue:
# if we were here last iteration, then let us check other neighbors.
if idlehistory[-1]:
continue
if math.fabs(nvalue - currvalue) < 0.000001:
idlehistory.append(True)
else:
idlehistory.append(False)
current = neigh
currvalue = nvalue
log.info("%i:Accepted better point %s %s with value %s!"%
(itercnt,current[0],current[1],currvalue))
if nvalue >= bestvalue:
best = neigh
bestvalue = nvalue
log.info("Overall best point %s %s found with value %s!"%
(best[0], best[1], bestvalue))
break
elif random.random() < probability:
# if not, we can still accept it with some probability
current = neigh
currvalue = nvalue
log.debug("%i:Accepted worse point %s %s with value %s!"%
(itercnt, current[0], current[1], currvalue))
idlehistory.append(False)
break
log.debug("%i:Point %s %s with value %s wasn't accepted, trying next"
" negihbor..."%(itercnt, neigh[0], neigh[1], nvalue))
else:
# if none of the neighbors are selected to step forward
log.debug("%i:No negihbors left, Staying in place %s %s..."%
(itercnt, current[0], current[1]))
idlehistory.append(True)
# remove the oldest element
idlehistory = idlehistory[1:]
log.debug("%i:Idle history: %s"%(itercnt,idlehistory))
if reduce(lambda a,b: a and b, idlehistory):
log.info("%i:The process stayed in place for %i consequent iterations"
"...Exiting..."%(itercnt, len(idlehistory)))
break
temperature = temperature-temp_step if temperature-temp_step > 0 else 0.0000001
log.info("%i:Temperature is %s"%(itercnt, temperature))
itercnt += 1
except Exception:
log.error(traceback.format_exc())
# wait all the already started (in this iteration before the exception)
# threads to finish in order not to overload the matchine in case of some
# consequent erronous iterations.
for thread in threads:
if thread.is_alive():
thread.join()
# The Queue for the result of the threads is reset in the beginning of
# next iteration.
# needed to move the random module forward to possibly avoid the exception
# in next iteration.
random.random()
log.info("Simulated Annealing finished!")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
|
"""Generic Z-Wave Entity Class."""
from __future__ import annotations
import logging
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import NodeStatus
from zwave_js_server.model.value import Value as ZwaveValue, get_value_id
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .helpers import get_device_id, get_unique_id
from .migrate import async_add_migration_entity_value
LOGGER = logging.getLogger(__name__)
EVENT_VALUE_UPDATED = "value updated"
EVENT_DEAD = "dead"
EVENT_ALIVE = "alive"
class ZWaveBaseEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
_attr_should_poll = False
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a generic Z-Wave device entity."""
self.config_entry = config_entry
self.client = client
self.info = info
# entities requiring additional values, can add extra ids to this list
self.watched_value_ids = {self.info.primary_value.value_id}
if self.info.additional_value_ids_to_watch:
self.watched_value_ids = self.watched_value_ids.union(
self.info.additional_value_ids_to_watch
)
# Entity class attributes
self._attr_name = self.generate_name()
self._attr_unique_id = get_unique_id(
self.client.driver.controller.home_id, self.info.primary_value.value_id
)
self._attr_entity_registry_enabled_default = (
self.info.entity_registry_enabled_default
)
self._attr_assumed_state = self.info.assumed_state
# device is precreated in main handler
self._attr_device_info = DeviceInfo(
identifiers={get_device_id(self.client, self.info.node)},
)
@callback
def on_value_update(self) -> None:
"""Call when one of the watched values change.
To be overridden by platforms needing this event.
"""
async def async_poll_value(self, refresh_all_values: bool) -> None:
"""Poll a value."""
if not refresh_all_values:
self.hass.async_create_task(
self.info.node.async_poll_value(self.info.primary_value)
)
LOGGER.info(
(
"Refreshing primary value %s for %s, "
"state update may be delayed for devices on battery"
),
self.info.primary_value,
self.entity_id,
)
return
for value_id in self.watched_value_ids:
self.hass.async_create_task(self.info.node.async_poll_value(value_id))
LOGGER.info(
(
"Refreshing values %s for %s, state update may be delayed for "
"devices on battery"
),
", ".join(self.watched_value_ids),
self.entity_id,
)
async def async_added_to_hass(self) -> None:
"""Call when entity is added."""
# Add value_changed callbacks.
self.async_on_remove(
self.info.node.on(EVENT_VALUE_UPDATED, self._value_changed)
)
for status_event in (EVENT_ALIVE, EVENT_DEAD):
self.async_on_remove(
self.info.node.on(status_event, self._node_status_alive_or_dead)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.unique_id}_poll_value",
self.async_poll_value,
)
)
# Add legacy Z-Wave migration data.
await async_add_migration_entity_value(
self.hass, self.config_entry, self.entity_id, self.info
)
def generate_name(
self,
include_value_name: bool = False,
alternate_value_name: str | None = None,
additional_info: list[str] | None = None,
name_suffix: str | None = None,
) -> str:
"""Generate entity name."""
if additional_info is None:
additional_info = []
name: str = (
self.info.node.name
or self.info.node.device_config.description
or f"Node {self.info.node.node_id}"
)
if name_suffix:
name = f"{name} {name_suffix}"
if include_value_name:
value_name = (
alternate_value_name
or self.info.primary_value.metadata.label
or self.info.primary_value.property_key_name
or self.info.primary_value.property_name
)
name = f"{name}: {value_name}"
for item in additional_info:
if item:
name += f" - {item}"
# append endpoint if > 1
if self.info.primary_value.endpoint > 1:
name += f" ({self.info.primary_value.endpoint})"
return name
@property
def available(self) -> bool:
"""Return entity availability."""
return (
self.client.connected
and bool(self.info.node.ready)
and self.info.node.status != NodeStatus.DEAD
)
@callback
def _node_status_alive_or_dead(self, event_data: dict) -> None:
"""
Call when node status changes to alive or dead.
Should not be overridden by subclasses.
"""
self.async_write_ha_state()
@callback
def _value_changed(self, event_data: dict) -> None:
"""Call when (one of) our watched values changes.
Should not be overridden by subclasses.
"""
value_id = event_data["value"].value_id
if value_id not in self.watched_value_ids:
return
value = self.info.node.values[value_id]
LOGGER.debug(
"[%s] Value %s/%s changed to: %s",
self.entity_id,
value.property_,
value.property_key_name,
value.value,
)
self.on_value_update()
self.async_write_ha_state()
@callback
def get_zwave_value(
self,
value_property: str | int,
command_class: int | None = None,
endpoint: int | None = None,
value_property_key: int | None = None,
add_to_watched_value_ids: bool = True,
check_all_endpoints: bool = False,
) -> ZwaveValue | None:
"""Return specific ZwaveValue on this ZwaveNode."""
# use commandclass and endpoint from primary value if omitted
return_value = None
if command_class is None:
command_class = self.info.primary_value.command_class
if endpoint is None:
endpoint = self.info.primary_value.endpoint
# lookup value by value_id
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
# If we haven't found a value and check_all_endpoints is True, we should
# return the first value we can find on any other endpoint
if return_value is None and check_all_endpoints:
for endpoint_idx in self.info.node.endpoints:
if endpoint_idx != self.info.primary_value.endpoint:
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint_idx,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
if return_value:
break
# add to watched_ids list so we will be triggered when the value updates
if (
return_value
and return_value.value_id not in self.watched_value_ids
and add_to_watched_value_ids
):
self.watched_value_ids.add(return_value.value_id)
return return_value
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.common import utils
from openstackclient.network.v2 import subnet as subnet_v2
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes_v3
from openstackclient.tests.network.v2 import fakes as network_fakes
from openstackclient.tests import utils as tests_utils
class TestSubnet(network_fakes.TestNetworkV2):
def setUp(self):
super(TestSubnet, self).setUp()
# Get a shortcut to the network client
self.network = self.app.client_manager.network
class TestCreateSubnet(TestSubnet):
# An IPv4 subnet to be created with mostly default values
_subnet = network_fakes.FakeSubnet.create_one_subnet(
attrs={
'tenant_id': identity_fakes_v3.project_id,
}
)
# Subnet pool to be used to create a subnet from a pool
_subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool()
# An IPv4 subnet to be created using a specific subnet pool
_subnet_from_pool = network_fakes.FakeSubnet.create_one_subnet(
attrs={
'tenant_id': identity_fakes_v3.project_id,
'subnetpool_id': _subnet_pool.id,
'dns_nameservers': ['8.8.8.8',
'8.8.4.4'],
'host_routes': [{'destination': '10.20.20.0/24',
'nexthop': '10.20.20.1'},
{'destination': '10.30.30.0/24',
'nexthop': '10.30.30.1'}],
}
)
# An IPv6 subnet to be created with most options specified
_subnet_ipv6 = network_fakes.FakeSubnet.create_one_subnet(
attrs={
'tenant_id': identity_fakes_v3.project_id,
'cidr': 'fe80:0:0:a00a::/64',
'enable_dhcp': True,
'dns_nameservers': ['fe80:27ff:a00a:f00f::ffff',
'fe80:37ff:a00a:f00f::ffff'],
'allocation_pools': [{'start': 'fe80::a00a:0:c0de:0:100',
'end': 'fe80::a00a:0:c0de:0:f000'},
{'start': 'fe80::a00a:0:c0de:1:100',
'end': 'fe80::a00a:0:c0de:1:f000'}],
'host_routes': [{'destination': 'fe80:27ff:a00a:f00f::/64',
'nexthop': 'fe80:27ff:a00a:f00f::1'},
{'destination': 'fe80:37ff:a00a:f00f::/64',
'nexthop': 'fe80:37ff:a00a:f00f::1'}],
'ip_version': 6,
'gateway_ip': 'fe80::a00a:0:c0de:0:1',
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac',
'subnetpool_id': 'None',
}
)
# The network to be returned from find_network
_network = network_fakes.FakeNetwork.create_one_network(
attrs={
'id': _subnet.network_id,
}
)
columns = (
'allocation_pools',
'cidr',
'dns_nameservers',
'enable_dhcp',
'gateway_ip',
'host_routes',
'id',
'ip_version',
'ipv6_address_mode',
'ipv6_ra_mode',
'name',
'network_id',
'project_id',
'subnetpool_id',
)
data = (
subnet_v2._format_allocation_pools(_subnet.allocation_pools),
_subnet.cidr,
utils.format_list(_subnet.dns_nameservers),
_subnet.enable_dhcp,
_subnet.gateway_ip,
subnet_v2._format_host_routes(_subnet.host_routes),
_subnet.id,
_subnet.ip_version,
_subnet.ipv6_address_mode,
_subnet.ipv6_ra_mode,
_subnet.name,
_subnet.network_id,
_subnet.project_id,
_subnet.subnetpool_id,
)
data_subnet_pool = (
subnet_v2._format_allocation_pools(_subnet_from_pool.allocation_pools),
_subnet_from_pool.cidr,
utils.format_list(_subnet_from_pool.dns_nameservers),
_subnet_from_pool.enable_dhcp,
_subnet_from_pool.gateway_ip,
subnet_v2._format_host_routes(_subnet_from_pool.host_routes),
_subnet_from_pool.id,
_subnet_from_pool.ip_version,
_subnet_from_pool.ipv6_address_mode,
_subnet_from_pool.ipv6_ra_mode,
_subnet_from_pool.name,
_subnet_from_pool.network_id,
_subnet_from_pool.project_id,
_subnet_from_pool.subnetpool_id,
)
data_ipv6 = (
subnet_v2._format_allocation_pools(_subnet_ipv6.allocation_pools),
_subnet_ipv6.cidr,
utils.format_list(_subnet_ipv6.dns_nameservers),
_subnet_ipv6.enable_dhcp,
_subnet_ipv6.gateway_ip,
subnet_v2._format_host_routes(_subnet_ipv6.host_routes),
_subnet_ipv6.id,
_subnet_ipv6.ip_version,
_subnet_ipv6.ipv6_address_mode,
_subnet_ipv6.ipv6_ra_mode,
_subnet_ipv6.name,
_subnet_ipv6.network_id,
_subnet_ipv6.project_id,
_subnet_ipv6.subnetpool_id,
)
def setUp(self):
super(TestCreateSubnet, self).setUp()
# Get the command object to test
self.cmd = subnet_v2.CreateSubnet(self.app, self.namespace)
# Set identity client v3. And get a shortcut to Identity client.
identity_client = identity_fakes_v3.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_client
self.identity = self.app.client_manager.identity
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.identity.projects
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v3.PROJECT),
loaded=True,
)
# Get a shortcut to the DomainManager Mock
self.domains_mock = self.identity.domains
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v3.DOMAIN),
loaded=True,
)
def test_create_no_options(self):
arglist = []
verifylist = []
# Testing that a call without the required argument will fail and
# throw a "ParserExecption"
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, verifylist)
def test_create_default_options(self):
# Mock create_subnet and find_network sdk calls to return the
# values we want for this test
self.network.create_subnet = mock.Mock(return_value=self._subnet)
self._network.id = self._subnet.network_id
self.network.find_network = mock.Mock(return_value=self._network)
arglist = [
"--subnet-range", self._subnet.cidr,
"--network", self._subnet.network_id,
self._subnet.name,
]
verifylist = [
('name', self._subnet.name),
('subnet_range', self._subnet.cidr),
('network', self._subnet.network_id),
('ip_version', self._subnet.ip_version),
('gateway', 'auto'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_subnet.assert_called_once_with(**{
'cidr': self._subnet.cidr,
'enable_dhcp': self._subnet.enable_dhcp,
'ip_version': self._subnet.ip_version,
'name': self._subnet.name,
'network_id': self._subnet.network_id,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_create_from_subnet_pool_options(self):
# Mock create_subnet, find_subnet_pool, and find_network sdk calls
# to return the values we want for this test
self.network.create_subnet = \
mock.Mock(return_value=self._subnet_from_pool)
self._network.id = self._subnet_from_pool.network_id
self.network.find_network = mock.Mock(return_value=self._network)
self.network.find_subnet_pool = \
mock.Mock(return_value=self._subnet_pool)
arglist = [
self._subnet_from_pool.name,
"--subnet-pool", self._subnet_from_pool.subnetpool_id,
"--prefix-length", '24',
"--network", self._subnet_from_pool.network_id,
"--ip-version", str(self._subnet_from_pool.ip_version),
"--gateway", self._subnet_from_pool.gateway_ip,
"--dhcp",
]
for dns_addr in self._subnet_from_pool.dns_nameservers:
arglist.append('--dns-nameserver')
arglist.append(dns_addr)
for host_route in self._subnet_from_pool.host_routes:
arglist.append('--host-route')
value = 'gateway=' + host_route.get('nexthop', '') + \
',destination=' + host_route.get('destination', '')
arglist.append(value)
verifylist = [
('name', self._subnet_from_pool.name),
('prefix_length', '24'),
('network', self._subnet_from_pool.network_id),
('ip_version', self._subnet_from_pool.ip_version),
('gateway', self._subnet_from_pool.gateway_ip),
('dns_nameservers', self._subnet_from_pool.dns_nameservers),
('dhcp', self._subnet_from_pool.enable_dhcp),
('host_routes', subnet_v2.convert_entries_to_gateway(
self._subnet_from_pool.host_routes)),
('subnet_pool', self._subnet_from_pool.subnetpool_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_subnet.assert_called_once_with(**{
'dns_nameservers': self._subnet_from_pool.dns_nameservers,
'enable_dhcp': self._subnet_from_pool.enable_dhcp,
'gateway_ip': self._subnet_from_pool.gateway_ip,
'host_routes': self._subnet_from_pool.host_routes,
'ip_version': self._subnet_from_pool.ip_version,
'name': self._subnet_from_pool.name,
'network_id': self._subnet_from_pool.network_id,
'prefixlen': '24',
'subnetpool_id': self._subnet_from_pool.subnetpool_id,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data_subnet_pool, data)
def test_create_options_subnet_range_ipv6(self):
# Mock create_subnet and find_network sdk calls to return the
# values we want for this test
self.network.create_subnet = mock.Mock(return_value=self._subnet_ipv6)
self._network.id = self._subnet_ipv6.network_id
self.network.find_network = mock.Mock(return_value=self._network)
arglist = [
self._subnet_ipv6.name,
"--subnet-range", self._subnet_ipv6.cidr,
"--network", self._subnet_ipv6.network_id,
"--ip-version", str(self._subnet_ipv6.ip_version),
"--ipv6-ra-mode", self._subnet_ipv6.ipv6_ra_mode,
"--ipv6-address-mode", self._subnet_ipv6.ipv6_address_mode,
"--gateway", self._subnet_ipv6.gateway_ip,
"--dhcp",
]
for dns_addr in self._subnet_ipv6.dns_nameservers:
arglist.append('--dns-nameserver')
arglist.append(dns_addr)
for host_route in self._subnet_ipv6.host_routes:
arglist.append('--host-route')
value = 'gateway=' + host_route.get('nexthop', '') + \
',destination=' + host_route.get('destination', '')
arglist.append(value)
for pool in self._subnet_ipv6.allocation_pools:
arglist.append('--allocation-pool')
value = 'start=' + pool.get('start', '') + \
',end=' + pool.get('end', '')
arglist.append(value)
verifylist = [
('name', self._subnet_ipv6.name),
('subnet_range', self._subnet_ipv6.cidr),
('network', self._subnet_ipv6.network_id),
('ip_version', self._subnet_ipv6.ip_version),
('ipv6_ra_mode', self._subnet_ipv6.ipv6_ra_mode),
('ipv6_address_mode', self._subnet_ipv6.ipv6_address_mode),
('gateway', self._subnet_ipv6.gateway_ip),
('dns_nameservers', self._subnet_ipv6.dns_nameservers),
('dhcp', self._subnet_ipv6.enable_dhcp),
('host_routes', subnet_v2.convert_entries_to_gateway(
self._subnet_ipv6.host_routes)),
('allocation_pools', self._subnet_ipv6.allocation_pools),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_subnet.assert_called_once_with(**{
'cidr': self._subnet_ipv6.cidr,
'dns_nameservers': self._subnet_ipv6.dns_nameservers,
'enable_dhcp': self._subnet_ipv6.enable_dhcp,
'gateway_ip': self._subnet_ipv6.gateway_ip,
'host_routes': self._subnet_ipv6.host_routes,
'ip_version': self._subnet_ipv6.ip_version,
'ipv6_address_mode': self._subnet_ipv6.ipv6_address_mode,
'ipv6_ra_mode': self._subnet_ipv6.ipv6_ra_mode,
'name': self._subnet_ipv6.name,
'network_id': self._subnet_ipv6.network_id,
'allocation_pools': self._subnet_ipv6.allocation_pools,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data_ipv6, data)
class TestDeleteSubnet(TestSubnet):
# The subnet to delete.
_subnet = network_fakes.FakeSubnet.create_one_subnet()
def setUp(self):
super(TestDeleteSubnet, self).setUp()
self.network.delete_subnet = mock.Mock(return_value=None)
self.network.find_subnet = mock.Mock(return_value=self._subnet)
# Get the command object to test
self.cmd = subnet_v2.DeleteSubnet(self.app, self.namespace)
def test_delete(self):
arglist = [
self._subnet.name,
]
verifylist = [
('subnet', self._subnet.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.delete_subnet.assert_called_once_with(self._subnet)
self.assertIsNone(result)
class TestListSubnet(TestSubnet):
# The subnets going to be listed up.
_subnet = network_fakes.FakeSubnet.create_subnets(count=3)
columns = (
'ID',
'Name',
'Network',
'Subnet',
)
columns_long = columns + (
'Project',
'DHCP',
'Name Servers',
'Allocation Pools',
'Host Routes',
'IP Version',
'Gateway',
)
data = []
for subnet in _subnet:
data.append((
subnet.id,
subnet.name,
subnet.network_id,
subnet.cidr,
))
data_long = []
for subnet in _subnet:
data_long.append((
subnet.id,
subnet.name,
subnet.network_id,
subnet.cidr,
subnet.tenant_id,
subnet.enable_dhcp,
utils.format_list(subnet.dns_nameservers),
subnet_v2._format_allocation_pools(subnet.allocation_pools),
utils.format_list(subnet.host_routes),
subnet.ip_version,
subnet.gateway_ip,
))
def setUp(self):
super(TestListSubnet, self).setUp()
# Get the command object to test
self.cmd = subnet_v2.ListSubnet(self.app, self.namespace)
self.network.subnets = mock.Mock(return_value=self._subnet)
def test_subnet_list_no_options(self):
arglist = []
verifylist = [
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.subnets.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
def test_subnet_list_long(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.subnets.assert_called_once_with()
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, list(data))
def test_subnet_list_ip_version(self):
arglist = [
'--ip-version', str(4),
]
verifylist = [
('ip_version', 4),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
filters = {'ip_version': 4}
self.network.subnets.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
class TestSetSubnet(TestSubnet):
_subnet = network_fakes.FakeSubnet.create_one_subnet()
def setUp(self):
super(TestSetSubnet, self).setUp()
self.network.update_subnet = mock.Mock(return_value=None)
self.network.find_subnet = mock.Mock(return_value=self._subnet)
self.cmd = subnet_v2.SetSubnet(self.app, self.namespace)
def test_set_this(self):
arglist = [
"--name", "new_subnet",
"--dhcp",
"--gateway", self._subnet.gateway_ip,
self._subnet.name,
]
verifylist = [
('name', "new_subnet"),
('dhcp', True),
('gateway', self._subnet.gateway_ip),
('subnet', self._subnet.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'enable_dhcp': True,
'gateway_ip': self._subnet.gateway_ip,
'name': "new_subnet",
}
self.network.update_subnet.assert_called_with(self._subnet, **attrs)
self.assertIsNone(result)
def test_set_that(self):
arglist = [
"--name", "new_subnet",
"--no-dhcp",
"--gateway", "none",
self._subnet.name,
]
verifylist = [
('name', "new_subnet"),
('no_dhcp', True),
('gateway', "none"),
('subnet', self._subnet.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'enable_dhcp': False,
'gateway_ip': None,
'name': "new_subnet",
}
self.network.update_subnet.assert_called_with(self._subnet, **attrs)
self.assertIsNone(result)
def test_set_nothing(self):
arglist = [self._subnet.name, ]
verifylist = [('subnet', self._subnet.name)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, self.cmd.take_action,
parsed_args)
def test_append_options(self):
_testsubnet = network_fakes.FakeSubnet.create_one_subnet(
{'dns_nameservers': ["10.0.0.1"]})
self.network.find_subnet = mock.Mock(return_value=_testsubnet)
arglist = [
'--dns-nameserver', '10.0.0.2',
_testsubnet.name,
]
verifylist = [
('dns_nameservers', ['10.0.0.2']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'dns_nameservers': ['10.0.0.2', '10.0.0.1'],
}
self.network.update_subnet.assert_called_once_with(
_testsubnet, **attrs)
self.assertIsNone(result)
class TestShowSubnet(TestSubnet):
# The subnets to be shown
_subnet = network_fakes.FakeSubnet.create_one_subnet()
columns = (
'allocation_pools',
'cidr',
'dns_nameservers',
'enable_dhcp',
'gateway_ip',
'host_routes',
'id',
'ip_version',
'ipv6_address_mode',
'ipv6_ra_mode',
'name',
'network_id',
'project_id',
'subnetpool_id',
)
data = (
subnet_v2._format_allocation_pools(_subnet.allocation_pools),
_subnet.cidr,
utils.format_list(_subnet.dns_nameservers),
_subnet.enable_dhcp,
_subnet.gateway_ip,
utils.format_list(_subnet.host_routes),
_subnet.id,
_subnet.ip_version,
_subnet.ipv6_address_mode,
_subnet.ipv6_ra_mode,
_subnet.name,
_subnet.network_id,
_subnet.tenant_id,
_subnet.subnetpool_id,
)
def setUp(self):
super(TestShowSubnet, self).setUp()
# Get the command object to test
self.cmd = subnet_v2.ShowSubnet(self.app, self.namespace)
self.network.find_subnet = mock.Mock(return_value=self._subnet)
def test_show_no_options(self):
arglist = []
verifylist = []
# Testing that a call without the required argument will fail and
# throw a "ParserExecption"
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, verifylist)
def test_show_all_options(self):
arglist = [
self._subnet.name,
]
verifylist = [
('subnet', self._subnet.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.find_subnet.assert_called_once_with(
self._subnet.name, ignore_missing=False)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib import context
from oslo_utils import uuidutils
from neutron.db import rbac_db_models
from neutron.objects import base as obj_base
from neutron.objects.db import api as obj_db_api
from neutron.objects import network as net_obj
from neutron.objects import rbac_db
from neutron.objects import subnet
from neutron.tests.unit.objects import test_base as obj_test_base
from neutron.tests.unit import testlib_api
class IPAllocationPoolObjectIfaceTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.IPAllocationPool
class IPAllocationPoolDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.IPAllocationPool
def setUp(self):
super(IPAllocationPoolDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'subnet_id': lambda: self._create_test_subnet_id()})
class DNSNameServerObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.DNSNameServer
def setUp(self):
super(DNSNameServerObjectIfaceTestCase, self).setUp()
self.pager_map[self._test_class.obj_name()] = (
obj_base.Pager(sorts=[('order', True)]))
class DNSNameServerDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.DNSNameServer
def setUp(self):
super(DNSNameServerDbObjectTestCase, self).setUp()
self._subnet_id = self._create_test_subnet_id()
self.update_obj_fields({'subnet_id': self._subnet_id})
def _create_dnsnameservers(self):
for obj in self.obj_fields:
dns = self._make_object(obj)
dns.create()
def test_get_objects_sort_by_order_asc(self):
self._create_dnsnameservers()
objs = self._test_class.get_objects(self.context)
fields_sorted = sorted([obj['order'] for obj in self.obj_fields])
self.assertEqual(fields_sorted, [obj.order for obj in objs])
def test_get_objects_sort_by_order_desc(self):
self._create_dnsnameservers()
pager = obj_base.Pager(sorts=[('order', False)])
objs = self._test_class.get_objects(self.context, _pager=pager,
subnet_id=self._subnet_id)
fields_sorted = sorted([obj['order'] for obj in self.obj_fields],
reverse=True)
self.assertEqual(fields_sorted, [obj.order for obj in objs])
def test_get_objects_sort_by_address_asc_using_pager(self):
self._create_dnsnameservers()
pager = obj_base.Pager(sorts=[('address', True)])
objs = self._test_class.get_objects(self.context, _pager=pager)
fields_sorted = sorted([obj['address'] for obj in self.obj_fields])
self.assertEqual(fields_sorted, [obj.address for obj in objs])
class RouteObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.Route
class RouteDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.Route
def setUp(self):
super(RouteDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'subnet_id': lambda: self._create_test_subnet_id()})
class SubnetServiceTypeObjectIfaceTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.SubnetServiceType
class SubnetServiceTypeDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.SubnetServiceType
def setUp(self):
super(SubnetServiceTypeDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'subnet_id': lambda: self._create_test_subnet_id()})
class SubnetObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.Subnet
def setUp(self):
super(SubnetObjectIfaceTestCase, self).setUp()
self.pager_map[subnet.DNSNameServer.obj_name()] = (
obj_base.Pager(sorts=[('order', True)]))
# Base class will mock those out only when rbac_db_model is set for the
# object. Since subnets don't have their own models but only derive
# shared value from networks, we need to unconditionally mock those
# entry points out here, otherwise they will trigger database access,
# which is not allowed in 'Iface' test classes.
mock.patch.object(
rbac_db.RbacNeutronDbObjectMixin,
'is_shared_with_project', return_value=False).start()
mock.patch.object(
rbac_db.RbacNeutronDbObjectMixin,
'get_shared_with_project').start()
class SubnetDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.Subnet
CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
def setUp(self):
super(SubnetDbObjectTestCase, self).setUp()
# set up plugin because some models used here require a plugin
# (specifically, rbac models and their get_valid_actions validators)
self.setup_coreplugin(self.CORE_PLUGIN)
network_id = self._create_test_network_id()
self.update_obj_fields(
{'network_id': network_id,
'segment_id': lambda: self._create_test_segment_id(network_id)})
def test_get_dns_nameservers_in_order(self):
obj = self._make_object(self.obj_fields[0])
obj.create()
dns_nameservers = [(2, '1.2.3.4'), (1, '5.6.7.8'), (4, '7.7.7.7')]
for order, address in dns_nameservers:
dns = subnet.DNSNameServer(self.context, order=order,
address=address,
subnet_id=obj.id)
dns.create()
new = self._test_class.get_object(self.context, id=obj.id)
self.assertEqual(1, new.dns_nameservers[0].order)
self.assertEqual(2, new.dns_nameservers[1].order)
self.assertEqual(4, new.dns_nameservers[-1].order)
def _create_shared_network_rbac_entry(self, network):
attrs = {
'object_id': network['id'],
'target_tenant': '*',
'action': rbac_db_models.ACCESS_SHARED
}
obj_db_api.create_object(net_obj.NetworkRBAC, self.context, attrs)
def test_get_subnet_shared_true(self):
network = self._create_test_network()
self._create_shared_network_rbac_entry(network)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network['id']
obj = self._make_object(subnet_data)
# check if shared will be load by 'obj_load_attr' and using extra query
# by RbacNeutronDbObjectMixin get_shared_with_project
self.assertTrue(obj.shared)
obj.create()
# here the shared should be load by is_network_shared
self.assertTrue(obj.shared)
new = self._test_class.get_object(self.context,
**obj._get_composite_keys())
# again, the shared should be load by is_network_shared
self.assertTrue(new.shared)
def test_filter_by_shared(self):
network = self._create_test_network()
self._create_shared_network_rbac_entry(network)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network['id']
obj = self._make_object(subnet_data)
obj.create()
result = self._test_class.get_objects(self.context, shared=True)
self.assertEqual(obj, result[0])
def test_get_shared_subnet_with_another_tenant(self):
network_shared = self._create_test_network()
self._create_shared_network_rbac_entry(network_shared)
subnet_data = dict(self.obj_fields[0])
subnet_data['network_id'] = network_shared['id']
shared_subnet = self._make_object(subnet_data)
shared_subnet.create()
priv_subnet = self._make_object(self.obj_fields[1])
priv_subnet.create()
# Situation here:
# - we have one network with a subnet that are private
# - shared network with its subnet
# creating new context, user should have access to one shared network
all_subnets = self._test_class.get_objects(self.context)
self.assertEqual(2, len(all_subnets))
# access with new tenant_id, should be able to access to one subnet
new_ctx = context.Context('', uuidutils.generate_uuid())
public_subnets = self._test_class.get_objects(new_ctx)
self.assertEqual([shared_subnet], public_subnets)
# test get_object to fetch the private and then the shared subnet
fetched_private_subnet = self._test_class.get_object(new_ctx,
id=priv_subnet.id)
self.assertIsNone(fetched_private_subnet)
fetched_public_subnet = (
self._test_class.get_object(new_ctx, id=shared_subnet.id))
self.assertEqual(shared_subnet, fetched_public_subnet)
def test_get_service_types(self):
obj = self._make_object(self.obj_fields[0])
obj.create()
service_type_obj = subnet.SubnetServiceType(
self.context, subnet_id=obj.id, service_type='dhcp-agent')
service_type_obj.create()
listed_obj = subnet.Subnet.get_object(self.context, id=obj.id)
self.assertEqual([service_type_obj.service_type],
listed_obj.service_types)
# Try to load the service_types by obj_load_attr
obj1 = self._make_object(self.obj_fields[0])
self.assertEqual([service_type_obj.service_type],
obj1.service_types)
class NetworkSubnetLockTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = subnet.NetworkSubnetLock
class NetworkSubnetLockDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = subnet.NetworkSubnetLock
def setUp(self):
super(NetworkSubnetLockDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'network_id': lambda: self._create_test_network_id()})
def test_lock_subnet_update(self):
obj = self._make_object(self.obj_fields[0])
obj.create()
subnet_id = self._create_test_subnet_id(network_id=obj.network_id)
subnet.NetworkSubnetLock.lock_subnet(self.context, obj.network_id,
subnet_id)
obj = subnet.NetworkSubnetLock.get_object(self.context,
network_id=obj.network_id)
self.assertEqual(subnet_id, obj.subnet_id)
def test_lock_subnet_create(self):
network_id = self._create_test_network_id()
subnet_id = self._create_test_subnet_id(network_id=network_id)
obj = subnet.NetworkSubnetLock.get_object(self.context,
network_id=network_id)
self.assertIsNone(obj)
subnet.NetworkSubnetLock.lock_subnet(self.context, network_id,
subnet_id)
obj = subnet.NetworkSubnetLock.get_object(self.context,
network_id=network_id)
self.assertEqual(network_id, obj.network_id)
self.assertEqual(subnet_id, obj.subnet_id)
|
|
#!/usr/bin/env python3
import unittest
import socket
import binascii
from framework import VppTestCase, VppTestRunner
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet6 import IPv6, UDP, TCP
from util import ppp
from template_classifier import TestClassifier
class TestClassifierIP6(TestClassifier):
""" Classifier IP6 Test Case """
@classmethod
def setUpClass(cls):
super(TestClassifierIP6, cls).setUpClass()
cls.af = socket.AF_INET6
@classmethod
def tearDownClass(cls):
super(TestClassifierIP6, cls).tearDownClass()
def test_iacl_src_ip(self):
""" Source IP6 iACL test
Test scenario for basic IP ACL with source IP
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with source IP address.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with source IP
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
key = 'ip6_src'
self.create_classify_table(
key,
self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(src_ip=self.pg0.remote_ip6))
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_dst_ip(self):
""" Destination IP6 iACL test
Test scenario for basic IP ACL with destination IP
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with destination IP address.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with destination IP
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
key = 'ip6_dst'
self.create_classify_table(
key,
self.build_ip6_mask(dst_ip='ffffffffffffffffffffffffffffffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(dst_ip=self.pg1.remote_ip6))
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_src_dst_ip(self):
""" Source and destination IP6 iACL test
Test scenario for basic IP ACL with source and destination IP
- Create IPv4 stream for pg0 -> pg1 interface.
- Create iACL with source and destination IP addresses.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with source and destination IP
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
key = 'ip6'
self.create_classify_table(
key,
self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff',
dst_ip='ffffffffffffffffffffffffffffffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(src_ip=self.pg0.remote_ip6,
dst_ip=self.pg1.remote_ip6))
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
# Tests split to different test case classes because of issue reported in
# ticket VPP-1336
class TestClassifierIP6UDP(TestClassifier):
""" Classifier IP6 UDP proto Test Case """
@classmethod
def setUpClass(cls):
super(TestClassifierIP6UDP, cls).setUpClass()
cls.af = socket.AF_INET6
def test_iacl_proto_udp(self):
""" IP6 UDP protocol iACL test
Test scenario for basic protocol ACL with UDP protocol
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with UDP IP protocol.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with UDP protocol
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
key = 'nh_udp'
self.create_classify_table(key, self.build_ip6_mask(nh='ff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_UDP))
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_udp_sport(self):
""" IP6 UDP source port iACL test
Test scenario for basic protocol ACL with UDP and sport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with UDP IP protocol and defined sport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with UDP and sport
sport = 38
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
UDP(sport=sport, dport=5678))
self.pg0.add_stream(pkts)
key = 'nh_udp_sport'
self.create_classify_table(
key, self.build_ip6_mask(nh='ff', src_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_UDP, src_port=sport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_udp_dport(self):
""" IP6 UDP destination port iACL test
Test scenario for basic protocol ACL with UDP and dport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with UDP IP protocol and defined dport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with UDP and dport
dport = 427
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
UDP(sport=1234, dport=dport))
self.pg0.add_stream(pkts)
key = 'nh_udp_dport'
self.create_classify_table(
key, self.build_ip6_mask(nh='ff', dst_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_UDP, dst_port=dport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_udp_sport_dport(self):
""" IP6 UDP source and destination ports iACL test
Test scenario for basic protocol ACL with UDP and sport and dport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with UDP IP protocol and defined sport and dport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with UDP and sport and dport
sport = 13720
dport = 9080
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
UDP(sport=sport, dport=dport))
self.pg0.add_stream(pkts)
key = 'nh_udp_ports'
self.create_classify_table(
key,
self.build_ip6_mask(nh='ff', src_port='ffff', dst_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_UDP, src_port=sport,
dst_port=dport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
class TestClassifierIP6TCP(TestClassifier):
""" Classifier IP6 TCP proto Test Case """
@classmethod
def setUpClass(cls):
super(TestClassifierIP6TCP, cls).setUpClass()
cls.af = socket.AF_INET6
def test_iacl_proto_tcp(self):
""" IP6 TCP protocol iACL test
Test scenario for basic protocol ACL with TCP protocol
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with TCP IP protocol.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with TCP protocol
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
TCP(sport=1234, dport=5678))
self.pg0.add_stream(pkts)
key = 'nh_tcp'
self.create_classify_table(key, self.build_ip6_mask(nh='ff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_TCP))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts, TCP)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_tcp_sport(self):
""" IP6 TCP source port iACL test
Test scenario for basic protocol ACL with TCP and sport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with TCP IP protocol and defined sport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with TCP and sport
sport = 38
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
TCP(sport=sport, dport=5678))
self.pg0.add_stream(pkts)
key = 'nh_tcp_sport'
self.create_classify_table(
key, self.build_ip6_mask(nh='ff', src_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_TCP, src_port=sport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts, TCP)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_tcp_dport(self):
""" IP6 TCP destination port iACL test
Test scenario for basic protocol ACL with TCP and dport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with TCP IP protocol and defined dport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with TCP and dport
dport = 427
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
TCP(sport=1234, dport=dport))
self.pg0.add_stream(pkts)
key = 'nh_tcp_dport'
self.create_classify_table(
key, self.build_ip6_mask(nh='ff', dst_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_TCP, dst_port=dport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts, TCP)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
def test_iacl_proto_tcp_sport_dport(self):
""" IP6 TCP source and destination ports iACL test
Test scenario for basic protocol ACL with TCP and sport and dport
- Create IPv6 stream for pg0 -> pg1 interface.
- Create iACL with TCP IP protocol and defined sport and dport.
- Send and verify received packets on pg1 interface.
"""
# Basic iACL testing with TCP and sport and dport
sport = 13720
dport = 9080
pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
TCP(sport=sport, dport=dport))
self.pg0.add_stream(pkts)
key = 'nh_tcp_ports'
self.create_classify_table(
key,
self.build_ip6_mask(nh='ff', src_port='ffff', dst_port='ffff'))
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(nh=socket.IPPROTO_TCP, src_port=sport,
dst_port=dport))
self.input_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg1.get_capture(len(pkts))
self.verify_capture(self.pg1, pkts, TCP)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
class TestClassifierIP6Out(TestClassifier):
""" Classifier output IP6 Test Case """
@classmethod
def setUpClass(cls):
super(TestClassifierIP6Out, cls).setUpClass()
cls.af = socket.AF_INET6
def test_acl_ip_out(self):
""" Output IP6 ACL test
Test scenario for basic IP ACL with source IP
- Create IPv6 stream for pg1 -> pg0 interface.
- Create ACL with source IP address.
- Send and verify received packets on pg0 interface.
"""
# Basic oACL testing with source IP
pkts = self.create_stream(self.pg1, self.pg0, self.pg_if_packet_sizes)
self.pg1.add_stream(pkts)
key = 'ip6_out'
self.create_classify_table(
key,
self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff'),
data_offset=0)
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip6_match(src_ip=self.pg1.remote_ip6))
self.output_acl_set_interface(
self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture(len(pkts))
self.verify_capture(self.pg0, pkts)
self.pg1.assert_nothing_captured(remark="packets forwarded")
self.pg2.assert_nothing_captured(remark="packets forwarded")
class TestClassifierIP6MAC(TestClassifier):
""" Classifier IP6 MAC Test Case """
@classmethod
def setUpClass(cls):
super(TestClassifierIP6MAC, cls).setUpClass()
cls.af = socket.AF_INET6
def test_acl_mac(self):
""" IP6 MAC iACL test
Test scenario for basic MAC ACL with source MAC
- Create IPv6 stream for pg0 -> pg2 interface.
- Create ACL with source MAC address.
- Send and verify received packets on pg2 interface.
"""
# Basic iACL testing with source MAC
pkts = self.create_stream(self.pg0, self.pg2, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
key = 'mac'
self.create_classify_table(
key, self.build_mac_mask(src_mac='ffffffffffff'), data_offset=-14)
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_mac_match(src_mac=self.pg0.remote_mac))
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.acl_active_table = key
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg2.get_capture(len(pkts))
self.verify_capture(self.pg2, pkts)
self.pg0.assert_nothing_captured(remark="packets forwarded")
self.pg1.assert_nothing_captured(remark="packets forwarded")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class DeductLoyaltyPointsEffectProps(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rule_title': 'str',
'program_id': 'int',
'sub_ledger_id': 'str',
'value': 'float',
'transaction_uuid': 'str'
}
attribute_map = {
'rule_title': 'ruleTitle',
'program_id': 'programId',
'sub_ledger_id': 'subLedgerId',
'value': 'value',
'transaction_uuid': 'transactionUUID'
}
def __init__(self, rule_title=None, program_id=None, sub_ledger_id=None, value=None, transaction_uuid=None, local_vars_configuration=None): # noqa: E501
"""DeductLoyaltyPointsEffectProps - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rule_title = None
self._program_id = None
self._sub_ledger_id = None
self._value = None
self._transaction_uuid = None
self.discriminator = None
self.rule_title = rule_title
self.program_id = program_id
self.sub_ledger_id = sub_ledger_id
self.value = value
self.transaction_uuid = transaction_uuid
@property
def rule_title(self):
"""Gets the rule_title of this DeductLoyaltyPointsEffectProps. # noqa: E501
The title of the rule that contained triggered this points deduction # noqa: E501
:return: The rule_title of this DeductLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._rule_title
@rule_title.setter
def rule_title(self, rule_title):
"""Sets the rule_title of this DeductLoyaltyPointsEffectProps.
The title of the rule that contained triggered this points deduction # noqa: E501
:param rule_title: The rule_title of this DeductLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rule_title is None: # noqa: E501
raise ValueError("Invalid value for `rule_title`, must not be `None`") # noqa: E501
self._rule_title = rule_title
@property
def program_id(self):
"""Gets the program_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
The ID of the loyalty program where these points were added # noqa: E501
:return: The program_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
:rtype: int
"""
return self._program_id
@program_id.setter
def program_id(self, program_id):
"""Sets the program_id of this DeductLoyaltyPointsEffectProps.
The ID of the loyalty program where these points were added # noqa: E501
:param program_id: The program_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and program_id is None: # noqa: E501
raise ValueError("Invalid value for `program_id`, must not be `None`") # noqa: E501
self._program_id = program_id
@property
def sub_ledger_id(self):
"""Gets the sub_ledger_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
The ID of the subledger within the loyalty program where these points were added # noqa: E501
:return: The sub_ledger_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._sub_ledger_id
@sub_ledger_id.setter
def sub_ledger_id(self, sub_ledger_id):
"""Sets the sub_ledger_id of this DeductLoyaltyPointsEffectProps.
The ID of the subledger within the loyalty program where these points were added # noqa: E501
:param sub_ledger_id: The sub_ledger_id of this DeductLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and sub_ledger_id is None: # noqa: E501
raise ValueError("Invalid value for `sub_ledger_id`, must not be `None`") # noqa: E501
self._sub_ledger_id = sub_ledger_id
@property
def value(self):
"""Gets the value of this DeductLoyaltyPointsEffectProps. # noqa: E501
The amount of points that were deducted # noqa: E501
:return: The value of this DeductLoyaltyPointsEffectProps. # noqa: E501
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this DeductLoyaltyPointsEffectProps.
The amount of points that were deducted # noqa: E501
:param value: The value of this DeductLoyaltyPointsEffectProps. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
@property
def transaction_uuid(self):
"""Gets the transaction_uuid of this DeductLoyaltyPointsEffectProps. # noqa: E501
The identifier of this deduction in the loyalty ledger # noqa: E501
:return: The transaction_uuid of this DeductLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._transaction_uuid
@transaction_uuid.setter
def transaction_uuid(self, transaction_uuid):
"""Sets the transaction_uuid of this DeductLoyaltyPointsEffectProps.
The identifier of this deduction in the loyalty ledger # noqa: E501
:param transaction_uuid: The transaction_uuid of this DeductLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and transaction_uuid is None: # noqa: E501
raise ValueError("Invalid value for `transaction_uuid`, must not be `None`") # noqa: E501
self._transaction_uuid = transaction_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeductLoyaltyPointsEffectProps):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeductLoyaltyPointsEffectProps):
return True
return self.to_dict() != other.to_dict()
|
|
import fnmatch
import importlib
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import unquote
from django.http import HttpRequest
from django.utils.translation import gettext as _
from zerver.lib.actions import (
check_send_private_message,
check_send_stream_message,
check_send_stream_message_by_id,
send_rate_limited_pm_notification_to_bot_owner,
)
from zerver.lib.exceptions import ErrorCode, JsonableError, StreamDoesNotExistError
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.send_email import FromAddress
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.validator import check_list, check_string
from zerver.models import UserProfile
MISSING_EVENT_HEADER_MESSAGE = """\
Hi there! Your bot {bot_name} just sent an HTTP request to {request_path} that
is missing the HTTP {header_name} header. Because this header is how
{integration_name} indicates the event type, this usually indicates a configuration
issue, where you either entered the URL for a different integration, or are running
an older version of the third-party service that doesn't provide that header.
Contact {support_email} if you need help debugging!
"""
INVALID_JSON_MESSAGE = """
Hi there! It looks like you tried to set up the Zulip {webhook_name} integration,
but didn't correctly configure the webhook to send data in the JSON format
that this integration expects!
"""
SETUP_MESSAGE_TEMPLATE = "{integration} webhook has been successfully configured"
SETUP_MESSAGE_USER_PART = " by {user_name}"
# Django prefixes all custom HTTP headers with `HTTP_`
DJANGO_HTTP_PREFIX = "HTTP_"
def get_setup_webhook_message(integration: str, user_name: Optional[str] = None) -> str:
content = SETUP_MESSAGE_TEMPLATE.format(integration=integration)
if user_name:
content += SETUP_MESSAGE_USER_PART.format(user_name=user_name)
content = f"{content}."
return content
def notify_bot_owner_about_invalid_json(
user_profile: UserProfile, webhook_client_name: str
) -> None:
send_rate_limited_pm_notification_to_bot_owner(
user_profile,
user_profile.realm,
INVALID_JSON_MESSAGE.format(webhook_name=webhook_client_name).strip(),
)
class MissingHTTPEventHeader(JsonableError):
code = ErrorCode.MISSING_HTTP_EVENT_HEADER
data_fields = ["header"]
def __init__(self, header: str) -> None:
self.header = header
@staticmethod
def msg_format() -> str:
return _("Missing the HTTP event header '{header}'")
@has_request_variables
def check_send_webhook_message(
request: HttpRequest,
user_profile: UserProfile,
topic: str,
body: str,
complete_event_type: Optional[str] = None,
stream: Optional[str] = REQ(default=None),
user_specified_topic: Optional[str] = REQ("topic", default=None),
only_events: Optional[List[str]] = REQ(default=None, json_validator=check_list(check_string)),
exclude_events: Optional[List[str]] = REQ(
default=None, json_validator=check_list(check_string)
),
unquote_url_parameters: bool = False,
) -> None:
if complete_event_type is not None:
# Here, we implement Zulip's generic support for filtering
# events sent by the third-party service.
#
# If complete_event_type is passed to this function, we will check the event
# type against user configured lists of only_events and exclude events.
# If the event does not satisfy the configuration, the function will return
# without sending any messages.
#
# We match items in only_events and exclude_events using Unix
# shell-style wildcards.
if (
only_events is not None
and all([not fnmatch.fnmatch(complete_event_type, pattern) for pattern in only_events])
) or (
exclude_events is not None
and any([fnmatch.fnmatch(complete_event_type, pattern) for pattern in exclude_events])
):
return
client = RequestNotes.get_notes(request).client
assert client is not None
if stream is None:
assert user_profile.bot_owner is not None
check_send_private_message(user_profile, client, user_profile.bot_owner, body)
else:
# Some third-party websites (such as Atlassian's Jira), tend to
# double escape their URLs in a manner that escaped space characters
# (%20) are never properly decoded. We work around that by making sure
# that the URL parameters are decoded on our end.
if stream is not None and unquote_url_parameters:
stream = unquote(stream)
if user_specified_topic is not None:
topic = user_specified_topic
if unquote_url_parameters:
topic = unquote(topic)
try:
if stream.isdecimal():
check_send_stream_message_by_id(user_profile, client, int(stream), topic, body)
else:
check_send_stream_message(user_profile, client, stream, topic, body)
except StreamDoesNotExistError:
# A PM will be sent to the bot_owner by check_message, notifying
# that the webhook bot just tried to send a message to a non-existent
# stream, so we don't need to re-raise it since it clutters up
# webhook-errors.log
pass
def standardize_headers(input_headers: Union[None, Dict[str, Any]]) -> Dict[str, str]:
"""This method can be used to standardize a dictionary of headers with
the standard format that Django expects. For reference, refer to:
https://docs.djangoproject.com/en/2.2/ref/request-response/#django.http.HttpRequest.headers
NOTE: Historically, Django's headers were not case-insensitive. We're still
capitalizing our headers to make it easier to compare/search later if required.
"""
canonical_headers = {}
if not input_headers:
return {}
for raw_header in input_headers:
polished_header = raw_header.upper().replace("-", "_")
if polished_header not in ["CONTENT_TYPE", "CONTENT_LENGTH"]:
if not polished_header.startswith("HTTP_"):
polished_header = "HTTP_" + polished_header
canonical_headers[polished_header] = str(input_headers[raw_header])
return canonical_headers
def validate_extract_webhook_http_header(
request: HttpRequest, header: str, integration_name: str, fatal: bool = True
) -> Optional[str]:
assert request.user.is_authenticated
extracted_header = request.META.get(DJANGO_HTTP_PREFIX + header)
if extracted_header is None and fatal:
message_body = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=request.user.full_name,
request_path=request.path,
header_name=header,
integration_name=integration_name,
support_email=FromAddress.SUPPORT,
)
send_rate_limited_pm_notification_to_bot_owner(
request.user, request.user.realm, message_body
)
raise MissingHTTPEventHeader(header)
return extracted_header
def get_fixture_http_headers(integration_name: str, fixture_name: str) -> Dict["str", "str"]:
"""For integrations that require custom HTTP headers for some (or all)
of their test fixtures, this method will call a specially named
function from the target integration module to determine what set
of HTTP headers goes with the given test fixture.
"""
view_module_name = f"zerver.webhooks.{integration_name}.view"
try:
# TODO: We may want to migrate to a more explicit registration
# strategy for this behavior rather than a try/except import.
view_module = importlib.import_module(view_module_name)
fixture_to_headers = getattr(view_module, "fixture_to_headers")
except (ImportError, AttributeError):
return {}
return fixture_to_headers(fixture_name)
def get_http_headers_from_filename(http_header_key: str) -> Callable[[str], Dict[str, str]]:
"""If an integration requires an event type kind of HTTP header which can
be easily (statically) determined, then name the fixtures in the format
of "header_value__other_details" or even "header_value" and the use this
method in the headers.py file for the integration."""
def fixture_to_headers(filename: str) -> Dict[str, str]:
if "__" in filename:
event_type = filename.split("__")[0]
else:
event_type = filename
return {http_header_key: event_type}
return fixture_to_headers
def unix_milliseconds_to_timestamp(milliseconds: Any, webhook: str) -> datetime:
"""If an integration requires time input in unix milliseconds, this helper
checks to ensure correct type and will catch any errors related to type or
value and raise a JsonableError.
Returns a datetime representing the time."""
try:
# timestamps are in milliseconds so divide by 1000
seconds = milliseconds / 1000
return timestamp_to_datetime(seconds)
except (ValueError, TypeError):
raise JsonableError(_("The {} webhook expects time in milliseconds.").format(webhook))
|
|
##
# Copyright 2016 Sagnik Ghosh, licensed under the Apache 2.0 License.
#
# GooPyCharts: an interface between Python and Google Charts API. Written to serve as a simple substitute
# for matplotlib. Syntax is similar to MATLAB figures.
##
# Python3 compatibility
import sys
python_version = sys.version_info[0]
if python_version >= 3:
try:
from past.builtins import xrange
except ImportError:
print("past module not installed. Run `pip install future` for GooPyCharts's Python3 compatibility.")
sys.exit()
# Module's meat begins
from os import path
from warnings import warn
import webbrowser
import re
try:
from IPython.core.display import display, HTML, display_html, display_javascript
except ImportError:
pass
#The webpage templates. One each for numeric, datetime, and string as the independent variable.
#Compressed the start and end of the template into 1 string to shorten number of lines of code.
graphPgTemplateStart = """
<html>
<head>
<script src="https://code.jquery.com/jquery-1.10.2.js"></script>
<script type="text/javascript">
$.getScript( "https://www.gstatic.com/charts/loader.js", function() {
if ((typeof google === 'undefined') || (typeof google.visualization === 'undefined'))
{
google.charts.load('current', {'packages':['corechart']});
}
google.charts.setOnLoadCallback(drawChart%(functionName)s);
});
function drawChart%(functionName)s() {
var dataArr = %(data)s;
var grTitle = '%(title)s';
var height = %(height)d;
var width = %(width)d;
var logScaleFlag = %(logScaleFlag)s;
var vAxisTitle = '%(ylabel)s';
var vAxisOpt;
if(logScaleFlag)
{
vAxisOpt = { title: vAxisTitle, logScale: true, format: 'scientific'};
}
else
{
vAxisOpt = { title: vAxisTitle };
}
"""
graphPgTemplate_numeric = """
var options = {
width: width,
height: height,
explorer: { actions: ['dragToZoom', 'rightClickToReset'], maxZoomIn: 0.01 },
curveType: 'function',
title: grTitle,
titleTextStyle: { fontSize: 18, bold: true },
hAxis: { title: dataArr[0][0] },
vAxis: vAxisOpt,
%(other)s
};
var data = new google.visualization.DataTable();
var csvOut = "data:text/csv;charset=utf-8";
// Add column headers
for (var j = 0; j < dataArr[0].length; j++)
{
data.addColumn('number',dataArr[0][j]);
csvOut += ',' + dataArr[0][j];
}
csvOut += '\\n';
// Add columns
for (var i = 1; i < dataArr.length; i++)
{
data.addRow(dataArr[i]);
csvOut += dataArr[i].join(",") + '\\n';
}
"""
graphPgTemplate_string = """
var options = {
width: width,
height: height,
explorer: { actions: ['dragToZoom', 'rightClickToReset'], maxZoomIn: 0.01 },
curveType: 'function',
title: grTitle,
titleTextStyle: { fontSize: 18, bold: true },
hAxis: { title: dataArr[0][0] },
vAxis: vAxisOpt,
%(other)s
};
var data = new google.visualization.DataTable();
var csvOut = "data:text/csv;charset=utf-8";
// Add column headers
data.addColumn('string',dataArr[0][0]);
csvOut += ',' + dataArr[0][0];
for (var j = 0; j < dataArr[0].length-1; j++)
{
data.addColumn('number',dataArr[0][j+1]);
csvOut += ',' + dataArr[0][j+1];
}
csvOut += '\\n';
// Add columns
for (var i = 1; i < dataArr.length; i++)
{
data.addRow(dataArr[i]);
csvOut += dataArr[i].join(",") + '\\n';
}
"""
graphPgTemplate_dateTime = """
var options = {
width: width,
height: height,
explorer: { actions: ['dragToZoom', 'rightClickToReset'], maxZoomIn: 0.01 },
curveType: 'function',
title: grTitle,
titleTextStyle: { fontSize: 18, bold: true },
hAxis: { title: dataArr[0][0],
"gridlines": {
"count": -1,
"units": {
"minutes": { "format": [ "HH:mm", "mm" ] },
"hours": { "format": [ "MM/dd HH:mm", "HH" ] },
"days": { "format": [ "MM/dd" ] },
}
},
"minorGridlines": {
"count": -1,
"units": {
"minutes": { "format": [ "HH:mm", "mm" ] },
"hours": { "format": [ "MM/dd HH:mm", "HH" ] },
"days": { "format": [ "MM/dd" ] },
}
},
},
vAxis: vAxisOpt,
%(other)s
};
var data = new google.visualization.DataTable();
var csvOut = "data:text/csv;charset=utf-8";
// Add column headers
data.addColumn('date',dataArr[0][0]);
csvOut += ',' + dataArr[0][0];
for (var j = 0; j < dataArr[0].length-1; j++)
{
data.addColumn('number',dataArr[0][j+1]);
csvOut += ',' + dataArr[0][j+1];
}
csvOut += '\\n';
var tmpArr;
// Add columns
for (var i = 0; i < dataArr.length-1; i++)
{
// Add time data
tempStr = dataArr[i+1][0];
year = parseInt(tempStr.substr(0,4));
month = parseInt(tempStr.substr(5,2))-1;
day = parseInt(tempStr.substr(8,2));
hour = parseInt(tempStr.substr(11,2));
minute = parseInt(tempStr.substr(14,2));
second = parseInt(tempStr.substr(17,2));
tmpArr = [new Date(year,month,day,hour,minute,second)];
data.addRow(tmpArr.concat(dataArr[i+1].slice(1,dataArr[i+1].length)));
csvOut += tempStr + ',' + dataArr[i+1].slice(1,dataArr[i+1].length).join(",") + '\\n';
}
"""
graphPgTemplate_hist = """
var options = {
width: width,
height: height,
title: grTitle,
titleTextStyle: { fontSize: 18, bold: true },
hAxis: { title: dataArr[0]},
vAxis: vAxisOpt,
%(other)s
};
var data = new google.visualization.DataTable();
var csvOut = "data:text/csv;charset=utf-8";
// Add column header
data.addColumn('number',dataArr[0]);
csvOut += ',' + dataArr[0];
csvOut += '\\n';
// Add data
for (var i = 1; i < dataArr.length; i++)
{
data.addRow([dataArr[i]]);
csvOut += dataArr[i].toString()+'\\n';
}
"""
graphPgTemplateEnd = """
var chart = new google.visualization.%(plotType)s(document.getElementById('chart_div%(functionName)s'));
chart.draw(data, options);
document.getElementById('pic_div%(functionName)s').innerHTML = '<a href="' + chart.getImageURI() + '" download="'+grTitle+'.png">Download Figure</a>'
document.getElementById('csvFileDl%(functionName)s').innerHTML = '<a href="' + encodeURI(csvOut) + '" download="'+grTitle+'.csv">Download CSV</a>'
}
</script>
</head>
<body>
<div id="chart_div%(functionName)s"></div>
<div id="pic_div%(functionName)s"></div>
<div id="csvFileDl%(functionName)s"></div>
</body>
</html>
"""
#helper function to determine template type
def templateType(xdata):
#check if x axis is numeric, string, or datetime
if type(xdata[1]) is str:
#check if first 4 characters of xdata is a valid year
if len(xdata[1]) == 19 and int(xdata[1][:4]) > 0 and int(xdata[1][:4]) < 3000:
#the x-axis data looks like it's a datetime! use datetime template
return graphPgTemplateStart+graphPgTemplate_dateTime+graphPgTemplateEnd
else:
#the x-axis data is a string; process as such
return graphPgTemplateStart+graphPgTemplate_string+graphPgTemplateEnd
else:
#otherwise, data is simply numeric
return graphPgTemplateStart+graphPgTemplate_numeric+graphPgTemplateEnd
#helper function to combine data
def combineData(xdata,ydata,xlabel):
#if ydata is a simple vector, encapsulate it into a 2D list
if type(ydata[1]) is not list:
ydata = [[val] for val in ydata]
#if xdata is time data, add HH:MM:SS if it is missing (just 00:00:00)
if type(xdata[1]) is str:
#check if first 4 characters of xdata is a valid year
if len(xdata[1]) == 10 and int(xdata[1][:4]) > 0 and int(xdata[1][:4]) < 3000:
xdata[1:] = [val+' 00:00:00' for val in xdata[1:]]
#figure out independent variable headers
# if there is a title row, use that title
if type(ydata[0][0]) is str:
data = [[xdata[0]] + ydata[0]]
for i in xrange(1,len(xdata)):
data.append([xdata[i]]+ydata[i])
# otherwise, use a default labeling
else:
header = [xlabel]
for i in xrange(len(ydata[0])):
header.append('data'+str(i+1))
data = [header]
for i in xrange(len(xdata)):
data.append([xdata[i]]+ydata[i])
return data
#helper function, returns title as a valid JS identifier, prefixed by '_'.
def slugify(title):
return '_' + re.sub('[^\w\d_]', '_', title) #Make valid JS identifier
##main class
class figure:
'''GooPyCharts: a simple plotting tool for Python/Jupyter. See https://github.com/Dfenestrator/GooPyCharts for overview and examples.'''
numFig = 1
def __init__(self,title="Fig",xlabel='',ylabel='',height=600,width=1000):
#set figure number, and increment for each instance
self.figNum = figure.numFig
figure.numFig = figure.numFig + 1
#if title has not been changed, add figure number
if title=="Fig":
self.title = title+str(self.figNum)
else:
self.title = title
self.fname = self.title+'.html'
self.xlabel = xlabel
self.ylabel = ylabel
#for sizing plot
self.height = height
self.width = width
#Set by the chart methods, can be printed out or exported to file.
self.javascript = 'No chart created yet. Use a chart method'
# Get the full HTML of the file.
def __str__(self):
return self.javascript
# Returns the drawFigure function from the JavaScript in its entirety.
def get_drawChart(self):
tabwidth = 4
start = self.javascript.find('function drawChart') - tabwidth
end = self.javascript.find('</head>') - len('</script>') - 1
raw_drawChart = self.javascript[start:end]
#Unindent 4 spaces on all lines
final_drawChart = ''
for line in raw_drawChart.split('\n'):
final_drawChart += line[tabwidth:] + '\n'
final_drawChart = final_drawChart.rstrip()
return final_drawChart
#Write the JavaScript text out to file
def write(self):
with open(self.fname,'w') as f:
f.write(self.javascript)
#display HTML helper method. Trys nb() first, falls back on wb() if no notebook
#the nb parameter has been deprecated and does nothing.
def dispFile(self, nb=None):
if nb is not None:
warn('dispFile() nb paraneter is deprecated and does nothing.',
DeprecationWarning)
try:
self.nb()
except NameError:
self.wb()
#Alias for dispFile()
def show(self):
self.dispFile()
#Displays in a Jupyter notebook. Writes current data first.
def nb(self):
self.write()
display(HTML(self.fname))
#Displays in a web browser. Writes current data first.
def wb(self):
self.write()
webbrowser.open_new(self.fname)
#typical line chart plot
def plot(self,xdata,ydata=[],logScale=False,disp=True,**kwargs):
'''Graphs a line plot.
xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well.
logScale: set to True to set the y axis to log scale.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
#check if only 1 vector was sent, then plot against a count
if ydata:
data = combineData(xdata,ydata,self.xlabel)
else:
data = combineData(range(len(xdata)),xdata,self.xlabel)
#determine log scale parameter
if logScale:
logScaleStr = 'true'
else:
logScaleStr = 'false'
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data': str(data),
'title':self.title,
'functionName':slugify(self.title),
'height': self.height,
'width': self.width,
'logScaleFlag': logScaleStr,
'ylabel': self.ylabel,
'plotType': 'LineChart',
'numFig': self.numFig,
'other': other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile()
#scatter plot
def scatter(self,xdata,ydata=[],trendline=False,disp=True,**kwargs):
'''Graphs a scatter plot.
xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well.
trendline: set to True to plot a linear regression trend line through the first dependend variable.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
#check if only 1 vector was sent, then plot against a count
if ydata:
data = combineData(xdata,ydata,self.xlabel)
else:
data = combineData(range(len(xdata)),xdata,self.xlabel)
#Include other options, supplied by **kwargs
other = ''
#insert trend line, if flag is set
if trendline:
other = 'trendlines: { 0: {showR2: true, visibleInLegend: true} },\n'
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'ScatterChart',
'numFig':self.numFig,
'other':other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile()
#bar chart
def bar(self,xdata,ydata,disp=True,**kwargs):
'''Displays a bar graph.
xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
data = combineData(xdata,ydata,self.xlabel)
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'BarChart',
'numFig':self.numFig,
'other':other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile()
#column chart
def column(self,xdata,ydata,disp=True,**kwargs):
'''Displays a column graph. A bar chart with vertical bars.
xdata: list of column graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
data = combineData(xdata,ydata,self.xlabel)
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'ColumnChart',
'numFig':self.numFig,
'other':other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile()
#histogram
def hist(self,xdata,disp=True,**kwargs):
'''Graphs a histogram.
xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
data = [self.xlabel]+xdata
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'Histogram',
'numFig':self.numFig,
'other':other}
self.javascript = (graphPgTemplateStart+graphPgTemplate_hist+graphPgTemplateEnd) % argDict
if disp:
self.dispFile()
#Jupyter plotting methods (depricated; keeping for now for backwards compatibility)
def plot_nb(self,xdata,ydata=[],logScale=False):
'''Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.'''
self.plot(xdata,ydata,logScale)
def scatter_nb(self,xdata,ydata=[],trendline=False):
'''Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.'''
self.scatter(xdata,ydata,trendline)
def bar_nb(self,xdata,ydata):
'''Displays a bar graph and embeds it in a Jupyter notebook. See 'help(figure.bar)' for more info.'''
self.bar(xdata,ydata)
def column_nb(self,xdata,ydata):
'''Displays a column graph and embeds it in a Jupyter notebook. See 'help(figure.bar)' for more info.'''
self.column(xdata,ydata)
def hist_nb(self,xdata):
'''Graphs a histogram and embeds it in a Jupyter notebook. See 'help(figure.hist)' for more info.'''
self.hist(xdata)
|
|
from __future__ import absolute_import
import os
import logging
import pickle
import oboe
from datetime import datetime as dt
from seabus.common.database import db
from seabus.common.memcached import mc_client
from seabus.common.errors import InvalidBeaconError
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log.addHandler(sh)
def safe_get_type(some_dict, some_key, some_type):
if some_key in some_dict:
val = some_dict.get(some_key)
try:
val = some_type(val)
except Exception as e:
return
return val
class ModelBase(db.Model):
""" provide some useful common functions for db models """
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
def __eq__(self, other):
""" compare class type and (non sqlalchemy) columns for equality """
if not isinstance(other, self.__class__):
return False
for k, v in self.__dict__.iteritems():
if not k.startswith('_'):
if getattr(self, k) != getattr(other, k):
return False
return True
@classmethod
def by_id(cls, id):
return db.session.query(cls).filter_by(id=id).first()
@classmethod
def all(cls):
return db.session.query(cls).all()
@classmethod
def count(cls):
return db.session.query(cls).count()
def save(self):
db.session.add(self)
db.session.commit()
def _mc_key(self):
""" memcached key based on class name + instance id """
assert self.id is not None
return '{}_{}'.format(str(self.__class__.__name__), self.id)
def put_cache(self):
""" write this model to memcached """
mc_client.set(self._mc_key(), pickle.dumps(self))
@classmethod
def get_cache(cls, id):
key = '{}_{}'.format(cls.__name__, id)
cached = mc_client.get(key)
if cached is not None:
return pickle.loads(cached)
class Boat(ModelBase):
"""
Everetying related to the boat metadata coming in via http://catb.org/gpsd/AIVDM.html#_type_5_static_and_voyage_related_data
"""
__tablename__ = 'boats'
telemetry = db.relationship('Telemetry', backref='boat')
is_seabus = db.Column(db.Boolean, default=False)
mmsi = db.Column(db.Integer, nullable=False, unique=True)
name = db.Column(db.String(120), default=None)
dim_to_bow = db.Column(db.Integer, default=None)
dim_to_stern = db.Column(db.Integer, default=None)
dim_to_port = db.Column(db.Integer, default=None)
dim_to_star = db.Column(db.Integer, default=None)
type_and_cargo = db.Column(db.Integer, default=None)
lastseen_on = db.Column(db.DateTime, default = dt.utcnow)
# hard coded from observing data
seabus_mmsis = [316028554, 316014621, 316011651, 316011649]
def __init__(self, mmsi):
self.mmsi = mmsi
self.save()
@classmethod
@oboe.profile_function('all_seabuses')
def all_seabuses(cls):
boats = []
for mmsi in Boat.seabus_mmsis:
# first try to fetch from cache
boat = Boat.from_cache_by_mmsi(mmsi)
if boat is not None:
boats.append(boat)
else:
# fall back to db and cache response
boat = db.session.query(cls).filter_by(mmsi=mmsi).first()
if boat is not None:
boat.put_cache()
boats.append(boat)
if len(boats) > 0:
return boats
@classmethod
def from_beacon(cls, beacon):
""" return existing boat record if present or create and return
a new one """
if beacon.get('id') == 4:
# msg type 4 is a base station, not a boat
return
mmsi = beacon.get('mmsi')
# cant do anything without an mmsi
if mmsi is None:
raise InvalidBeaconError
boat = db.session.query(cls).filter_by(mmsi=mmsi).first()
if boat is None:
boat = Boat(mmsi)
else:
# if we've seen this boat before update lastseen time
boat.lastseen_on = dt.utcnow()
if boat.mmsi in Boat.seabus_mmsis:
boat.is_seabus = True
boat._parse_beacon(beacon)
boat.save()
return boat
def _parse_beacon(self, beacon):
name = beacon.get('name')
if isinstance(name, basestring):
self.name = name.strip()
type_and_cargo = beacon.get('type_and_cargo')
if type_and_cargo is not None:
try:
type_and_cargo = int(type_and_cargo)
self.type_and_cargo = type_and_cargo
except Exception as e:
log.exception(e)
log.info('Bogus type/cargo in beacon {}'.format(beacon))
d2bow = beacon.get('dim_a')
d2stern = beacon.get('dim_b')
d2port = beacon.get('dim_c')
d2star = beacon.get('dim_d')
dimensions = (d2bow, d2stern, d2port, d2star)
if None not in dimensions:
try:
dimensions = filter(int, (d2bow, d2stern, d2port, d2star))
except Exception as e:
log.info('Bogus dimensions in beacon: {}'.format(beacon))
if len(dimensions) == 4:
self.dim_to_bow = int(d2bow)
self.dim_to_stern = int(d2stern)
self.dim_to_port = int(d2port)
self.dim_to_star = int(d2star)
def _mc_key(self):
""" memcached key based on class name + mmsi """
assert self.mmsi is not None
return '{}_{}'.format(str(self.__class__.__name__), self.mmsi)
@classmethod
def from_cache_by_mmsi(cls, mmsi):
key = '{}_{}'.format(cls.__name__, mmsi)
cached = mc_client.get(key)
if cached is not None:
return pickle.loads(cached)
class Telemetry(ModelBase):
"""
Everything related to position, heading, etc coming in via http://catb.org/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
__tablename__ = 'telemetry'
boat_id = db.Column(db.Integer, db.ForeignKey('boats.id'))
nav_status = db.Column(db.Integer)
pos_accuracy = db.Column(db.Integer)
lon = db.Column(db.Float)
lat = db.Column(db.Float)
speed_over_ground = db.Column(db.Float)
course_over_ground = db.Column(db.Float)
true_heading = db.Column(db.Integer)
rate_of_turn = db.Column(db.Float)
rate_of_turn_over_range = db.Column(db.Boolean)
timestamp = db.Column(db.Integer)
received = db.Column(db.DateTime, default=dt.utcnow)
def __init__(self):
pass
def __repr__(self):
return '<% {}, {} %>'.format(self.lat, self.lon)
@classmethod
def from_beacon(cls, beacon):
telemetry = Telemetry()
telemetry._parse_beacon(beacon)
if telemetry.is_valid():
return telemetry
def is_valid(self):
if None in (self.lat, self.lon):
return False
if (self.lat < 0) or (self.lat > 90):
return False
if (self.lon < -180) or (self.lon > 180):
return False
return True
def _parse_beacon(self, beacon):
self.nav_status = safe_get_type(beacon, 'nav_status', int)
self.pos_accuracy = safe_get_type(beacon, 'position_accuracy', int)
self.lon = safe_get_type(beacon, 'x', float)
self.lat = safe_get_type(beacon, 'y', float)
self.speed_over_ground = safe_get_type(beacon, 'sog', float)
self.course_over_ground = safe_get_type(beacon, 'cog', float)
self.true_heading = safe_get_type(beacon, 'true_heading', int)
self.rate_of_turn = safe_get_type(beacon, 'rot', float)
self.rate_of_turn_over_range = safe_get_type(beacon, 'rot_over_range',bool)
self.timestamp = safe_get_type(beacon, 'timestamp', int)
@classmethod
def from_db_for_boat(cls, boat):
return db.session.query(cls).filter_by(boat_id=boat.id).order_by(cls.id.desc()).first()
def set_boat(self, boat):
self.boat_id = boat.id
def smart_save(self):
"""
Save all telemetry for Seabuses, only latest telemetry for everyone else
"""
if Boat.by_id(self.boat_id).is_seabus:
# record every piece of telemetry for each seabus
if self.is_valid():
self.save()
else:
# drop all previous telemetry for this boat
if self.is_valid():
db.session.query(Telemetry).filter_by(boat_id=self.boat_id).delete()
self.save()
def _mc_key(self):
""" key based on class name + boat id should keep memcache pretty clear of junk """
assert self.boat_id is not None
return '{}_{}'.format(str(self.__class__.__name__), self.boat_id)
@classmethod
def from_cache_for_boat(cls, boat):
key = '{}_{}'.format(cls.__name__, boat.id)
cached = mc_client.get(key)
if cached is not None:
return pickle.loads(cached)
@classmethod
@oboe.profile_function('Telemetry.get_for_boat')
def get_for_boat(cls, boat):
""" try to fetch the latest telemetry from cache first, if not grab from db and cache for next time """
telemetry = Telemetry.from_cache_for_boat(boat)
if not telemetry:
telemetry = Telemetry.from_db_for_boat(boat)
telemetry.put_cache()
return telemetry
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bypass TPUEstimator for ResNet-50 Train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import threading
import time
from absl import flags
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.data.util import nest as data_nest
FLAGS = flags.FLAGS
_INITIAL_LOSS = 1e7
def device_for_tpu_core(task=0, core=0):
return "/job:tpu_worker/task:%d/device:TPU_REPLICATED_CORE:%d" % (task, core)
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=1):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=parallel_iterations)
def tpu_ordinal_fn(shard_index_in_host):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
shard_index_in_host: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return shard_index_in_host % FLAGS.tpu_cores_per_host
class TrainRunner(object):
"""Remove init overheads in TPU Estimator via direct session.run calls."""
def __init__(self, iterations, train_steps):
tf.logging.info("TrainRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.iterations = iterations
self.sess = None
self.input_sess = None
self.infeed_thread = None
if train_steps % iterations != 0:
train_steps = iterations * int(math.ceil(train_steps / iterations))
self.train_steps = train_steps
self.input_graph = tf.Graph()
tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
self.config = tf.ConfigProto(operation_timeout_in_ms=600 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.cluster_resolver.cluster_spec()
if cluster_spec:
self.config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.init_sess = tf.Session(self.cluster_resolver.get_master(), config=self.config)
self.init_sess.run(tpu_init)
def device_for_host(self, task=0, cpu=0):
job_name = self.cluster_resolver.get_job_name() or "tpu_worker"
return "/job:%s/task:%d/device:CPU:%d" % (job_name, task, cpu)
def build_enqueue_ops(self, input_fn, params, host_id):
"""Build enqueue operations for the input pipeline in a given host.
Args:
input_fn: dataset input graph generation function
params: input function parameters
host_id: host identifier
"""
iparams = {}
iparams["batch_size"] = params["batch_size"] // FLAGS.num_cores
iparams["dataset_num_shards"] = FLAGS.num_cores // FLAGS.tpu_cores_per_host
def get_enqueue_ops_fn():
"""Generate the enqueue ops graph function."""
iparams["dataset_index"] = host_id
dataset = input_fn(iparams)
iterator = dataset.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Generate the infeed enqueue ops graph."""
per_host_sharded_inputs = []
control_deps = []
with tf.device(self.device_for_host(task=host_id)):
for _ in range(FLAGS.tpu_cores_per_host):
with tf.control_dependencies(control_deps):
features, labels = iterator.get_next()
self.feature_structure["features"] = features
self.feature_structure["labels"] = labels
flattened_inputs = data_nest.flatten(self.feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
with self.input_graph.as_default():
with tf.device(self.device_for_host(host_id)):
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(),
n=self.train_steps,
parallel_iterations=1))
def initialize(self, input_fn, model_fn, params):
"""Build graphs for the TPU device and the input pipelines.
Args:
input_fn: Dataset input graph generation function
model_fn: Model definition function
params: Parameters to input and model functions
"""
tf.logging.info("TrainRunner: initialize method")
def infeed_thread_fn():
"""Build and infeed session.run calls in a background thread."""
i = 1
while i < FLAGS.num_cores // FLAGS.tpu_cores_per_host:
self.build_enqueue_ops(input_fn, params, i)
i += 1
# Build infeed sesssion
self.input_sess = tf.Session(
self.cluster_resolver.get_master(),
graph=self.input_graph,
config=self.config)
self.input_sess.run(self.dataset_initializer)
# Run infeed session.run calls
self.input_sess.run([self.enqueue_ops])
self.build_enqueue_ops(input_fn, params, 0)
def get_tpu_step(mparams):
"""Get the TPU graph generation function."""
def tpu_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
features = unflattened_inputs["features"]
labels = unflattened_inputs["labels"]
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
mparams)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with tf.device(device_for_tpu_core()):
with tf.control_dependencies([train_op]):
return tf.identity(loss)
return tpu_step
tpu_step = get_tpu_step(params)
def tpu_loop():
return tpu.repeat(self.iterations, tpu_step, [_INITIAL_LOSS])
(self.loss,) = tpu.shard(
tpu_loop,
inputs=[],
num_shards=FLAGS.num_cores,
outputs_from_all_shards=False,
)
initializer = tf.global_variables_initializer()
self.saver = tf.train.Saver()
# Build tpu train model session and initialize graph
self.sess = tf.Session(
self.cluster_resolver.get_master(),
config=self.config)
self.sess.run(initializer)
# Complete infeed graph generation and session.run calls
self.infeed_thread = threading.Thread(target=infeed_thread_fn)
self.infeed_thread.start()
def train(self, num_threads=2):
"""Run the Train steps on the TPU device.
Args:
num_threads: number of outstanding checkpointing threads
"""
def checkpoint_thread_fn(saver, sess):
saver.save(sess, FLAGS.model_dir + "/model.ckpt-%d" % (cur_step))
cur_step = 0
thread_id = 0
checkpoint_threads = []
tf.logging.info("TrainRunner: step %d", cur_step)
for i in range(num_threads):
checkpoint_threads.append(None)
while cur_step < self.train_steps:
start = time.time()
tf.logging.info("TrainRunner: start next %d steps", self.iterations)
cur_step += self.iterations
loss = self.sess.run([self.loss])
if checkpoint_threads[thread_id] is not None:
checkpoint_threads[thread_id].join()
checkpoint_threads[thread_id] = threading.Thread(
target=checkpoint_thread_fn, args=(self.saver, self.sess))
checkpoint_threads[thread_id].start()
thread_id += 1
if thread_id >= num_threads:
thread_id = 0
end = time.time()
tf.logging.info(
"TrainRunner: step {} loss {} step time {} sec {} examples/sec"
.format(cur_step, loss, end - start,
self.iterations * FLAGS.train_batch_size / (end - start)))
self.infeed_thread.join()
for i in range(num_threads):
if checkpoint_threads[i] is not None:
checkpoint_threads[i].join()
checkpoint_threads[i] = None
def shutdown(self):
self.init_sess.run(self.tpu_shutdown)
|
|
from Tkinter import *
import random
from basics import *
from node import *
import timeManager
# Some ugly constants to control some behavior
WINDOW_WIDTH = 1000
WINDOW_HEIGHT = 600
BORDER_MARGIN = 5
TARGET_UPDATE_INTERVAL = 500
PACKET_GENERATION_INTERVAL = 500
CLICK_RADIUS = 9
# Keeping track of an ugly global state
STATE_IDLE = "Idle"
STATE_MOVING = "Moving"
STATE_CONNECTING_START = "Connecting start"
STATE_CONNECTING_FINISH = "Connecting finish"
STATE_PACKET_START = "Packet start"
STATE_PACKET_FINISH = "Packet finish"
state = STATE_IDLE
# Some ugly globals
timeMgr = timeManager.TimeManager()
nodes = []
nodeBeingMoved = None
newConnectionSourceNode = None
newPacketSourceNode = None
paused = False
generatePackets = False
# Set up the window and put a canvas in it
master = Tk()
master.resizable(width=False, height=False)
canvas = Canvas(master, width=WINDOW_WIDTH, height=WINDOW_HEIGHT, highlightthickness=0)
canvas.pack()
# Updating the nodes and drawing each frame
def drawAll():
global canvas
global nodes
global timeMgr
timeMgr.startFrameTimer()
# Draw
timeMgr.startTimer("draw")
canvas.delete("all")
canvas.create_rectangle(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT, fill="black")
for node in nodes:
for connection in node.connections:
connection.draw(canvas)
for node in nodes:
node.draw(canvas)
drawInfo(canvas)
timeMgr.stopTimer("draw")
# Continue the loop by starting another timer
millisToNextFrame = timeMgr.stopFrameTimer()
master.after(millisToNextFrame, drawAll)
def drawInfo(canvas):
global state
global timeMgr
global paused
timeMgr.draw(canvas)
canvas.create_text(WINDOW_WIDTH - BORDER_MARGIN, 2, text=state, anchor=NE, fill="white")
if paused:
pausedText = "Paused"
else:
pausedText = "Running"
canvas.create_text(WINDOW_WIDTH - BORDER_MARGIN, 15, text=pausedText, anchor=NE, fill="white")
if generatePackets:
generatingText = "Making Packets"
else:
generatingText = "Not Making Packets"
canvas.create_text(WINDOW_WIDTH - BORDER_MARGIN, 28, text=generatingText, anchor=NE, fill="white")
def updateAll():
global nodes
global paused
if not paused:
timeMgr.startTimer("update")
updateNodesAndConnections()
timeMgr.stopTimer("update")
master.after(TARGET_UPDATE_INTERVAL, updateAll)
def updateNodesAndConnections():
global nodes
for node in nodes:
node.update()
for node in nodes:
node.updateConnections()
def manualUpdate(event):
global paused
if paused:
updateNodesAndConnections()
# Making random packets to send around the network
def toggleGeneratePackets(event):
global generatePackets
generatePackets = not generatePackets
if generatePackets:
generatePacket()
def generatePacket():
global nodes
global state
global paused
global generatePackets
if generatePackets:
if len(nodes) > 2 and state is STATE_IDLE and not paused:
sourceNode = random.choice(nodes)
destNode = random.choice(nodes)
while destNode is sourceNode:
destNode = random.choice(nodes)
packet = Packet(sourceNode, destNode, "Hello, world!")
sourceNode.addPacketToBuffer(packet, None)
master.after(PACKET_GENERATION_INTERVAL, generatePacket)
# Handling mouse input
def mouseClicked(event):
global state
global nodes
clickPoint = Point(event.x, event.y)
clampPointToBounds(clickPoint, WINDOW_WIDTH, WINDOW_HEIGHT, BORDER_MARGIN)
nearbyNode = getNearbyNode(clickPoint)
if state is STATE_IDLE:
if nearbyNode is None:
placeNewNode(clickPoint)
else:
startMovingNode(nearbyNode)
elif state is STATE_MOVING:
stopMovingNode()
elif state is STATE_CONNECTING_START:
if nearbyNode is not None:
startConnection(nearbyNode)
elif state is STATE_CONNECTING_FINISH:
if nearbyNode is not None:
finishConnection(nearbyNode)
elif state is STATE_PACKET_START:
if nearbyNode is not None:
startPacket(nearbyNode)
elif state is STATE_PACKET_FINISH:
if nearbyNode is not None:
finishPacket(nearbyNode)
def mouseMoved(event):
global state
global nodes
mousePos = Point(event.x, event.y)
clampPointToBounds(mousePos, WINDOW_WIDTH, WINDOW_HEIGHT, BORDER_MARGIN)
if state is STATE_MOVING:
nodeBeingMoved.location.setToPoint(mousePos)
def getNearbyNode(clickPoint):
global nodes
closestNode = None
closestDistance = None
for node in nodes:
distance = node.location.distanceTo(clickPoint)
if closestDistance is None or distance < closestDistance:
closestNode = node
closestDistance = distance
if closestDistance is not None and closestDistance < CLICK_RADIUS:
return closestNode
else:
return None
# Placing new nodes
def placeNewNode(clickPoint):
global nodes
newNode = Node(clickPoint)
nodes.append(newNode)
# Moving nodes
def startMovingNode(node):
global state
global nodeBeingMoved
nodeBeingMoved = node
state = STATE_MOVING
def stopMovingNode():
global state
global nodeBeingMoved
nodeBeingMoved = None
state = STATE_IDLE
# Connecting nodes
def toggleConnecting(event):
global state
global newConnectionSourceNode
if state is STATE_IDLE:
state = STATE_CONNECTING_START
elif state is STATE_CONNECTING_START:
state = STATE_IDLE
elif state is STATE_CONNECTING_FINISH:
state = STATE_IDLE
newConnectionSourceNode.clearPendingAction()
newConnectionSourceNode = None
def startConnection(sourceNode):
global state
global newConnectionSourceNode
newConnectionSourceNode = sourceNode
newConnectionSourceNode.setPendingAction()
state = STATE_CONNECTING_FINISH
def finishConnection(destNode):
global state
global newConnectionSourceNode
try:
newConnectionSourceNode.connectTo(destNode)
destNode.connectTo(newConnectionSourceNode)
except NodeError:
pass
finally:
newConnectionSourceNode = None
state = STATE_IDLE
# Making and routing packets
def togglePacketRouting(event):
global state
global newPacketSourceNode
if state is STATE_IDLE:
state = STATE_PACKET_START
elif state is STATE_PACKET_START:
state = STATE_IDLE
elif state is STATE_PACKET_FINISH:
state = STATE_IDLE
newConnectionSourceNode.clearPendingAction()
newConnectionSourceNode = None
def startPacket(sourceNode):
global state
global newPacketSourceNode
newPacketSourceNode = sourceNode
newPacketSourceNode.setPendingAction()
state = STATE_PACKET_FINISH
def finishPacket(destNode):
global state
global newPacketSourceNode
packet = Packet(newPacketSourceNode, destNode, "Hello, world!")
newPacketSourceNode.addPacketToBuffer(packet, None)
newPacketSourceNode.clearPendingAction()
newConnectionSourceNode = None
state = STATE_IDLE
# Generating a random network
def autogenerateNetwork(event):
global nodes
# First get rid of the existing nodes
reset(None)
# Generate all of the nodes at random positions
numberOfNodes = random.randint(50, 75)
for i in range(0, numberOfNodes):
xCoord = int(WINDOW_WIDTH * ((random.random() * 0.5) + 0.25))
yCoord = int(WINDOW_HEIGHT * ((random.random() * 0.5) + 0.25))
nodeLocation = Point(xCoord, yCoord)
clampPointToBounds(nodeLocation, WINDOW_WIDTH, WINDOW_HEIGHT, BORDER_MARGIN)
placeNewNode(nodeLocation)
# Randomly connect nodes to other nodes nearby
numberOfConnectionsToMake = random.randint(int(numberOfNodes * 0.9), int(numberOfNodes * 1.3))
maxNodeDistance = 90
while numberOfConnectionsToMake > 0:
try:
randomNode = random.choice(nodes)
otherNode = random.choice(nodes)
if randomNode.distanceTo(otherNode) < maxNodeDistance:
randomNode.connectTo(otherNode)
otherNode.connectTo(randomNode)
numberOfConnectionsToMake = numberOfConnectionsToMake - 1
except NodeError:
pass
# Remove nodes without any connections
nodes = [node for node in nodes if len(node.connections) > 0]
# Clearing the screen, quitting, pausing
def reset(event):
global nodes
if state is STATE_IDLE:
del(nodes[:])
def togglePause(event):
global paused
paused = not paused
def quit(event):
master.quit()
# Bind some I/O
master.bind("<Button-1>", mouseClicked)
master.bind("<Motion>", mouseMoved)
master.bind("g", autogenerateNetwork)
master.bind("x", toggleGeneratePackets)
master.bind("z", togglePause)
master.bind("s", manualUpdate)
master.bind("c", toggleConnecting)
master.bind("p", togglePacketRouting)
master.bind("r", reset)
master.bind("q", quit)
# Start rendering things
drawAll()
updateAll()
mainloop()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import time
from collections import defaultdict
from django.utils import html
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
import desktop.conf
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document, DocumentTag, Document2, Directory
LOG = logging.getLogger(__name__)
def _get_docs(user):
history_tag = DocumentTag.objects.get_history_tag(user)
dir_ids = [directory.doc.get().id for directory in Directory.objects.filter(doc__isnull=False)]
editor_ids = [document.doc.get().id for document in Document2.objects.filter(type__startswith='query', doc__isnull=False)]
query = Document.objects.get_docs(user) \
.exclude(tags__in=[history_tag]) \
.exclude(id__in=dir_ids) \
.exclude(id__in=editor_ids)
# Work around Oracle not supporting SELECT DISTINCT with the CLOB type.
if desktop.conf.DATABASE.ENGINE.get() == 'django.db.backends.oracle':
query = query.only('id')
else:
query = query.defer(None)
docs = query.order_by('-last_modified')[:100]
if desktop.conf.DATABASE.ENGINE.get() == 'django.db.backends.oracle':
ids = [doc.id for doc in docs]
docs = Document.objects.filter(id__in=ids).defer(None)
docs = docs \
.select_related('owner', 'content_type') \
.prefetch_related('tags', 'documentpermission_set')
return docs
def massaged_tags_for_json(docs, user):
"""
var TAGS_DEFAULTS = {
'history': {'name': 'History', 'id': 1, 'docs': [1], 'type': 'history'},
'trash': {'name': 'Trash', 'id': 3, 'docs': [2]},
'mine': [{'name': 'default', 'id': 2, 'docs': [3]}, {'name': 'web', 'id': 3, 'docs': [3]}],
'notmine': [{'name': 'example', 'id': 20, 'docs': [10]}, {'name': 'ex2', 'id': 30, 'docs': [10, 11]}]
};
"""
ts = {
'trash': {},
'history': {},
'mine': [],
'notmine': [],
}
sharers = defaultdict(list)
trash_tag = DocumentTag.objects.get_trash_tag(user)
history_tag = DocumentTag.objects.get_history_tag(user)
tag_doc_mapping = defaultdict(set) # List of documents available in each tag
for doc in docs:
for tag in doc.tags.all():
tag_doc_mapping[tag].add(doc)
ts['trash'] = massaged_tags(trash_tag, tag_doc_mapping)
ts['history'] = massaged_tags(history_tag, tag_doc_mapping)
tags = list(set(tag_doc_mapping.keys() + [tag for tag in DocumentTag.objects.get_tags(user=user)])) # List of all personal and shared tags
for tag in tags:
massaged_tag = massaged_tags(tag, tag_doc_mapping)
if tag == trash_tag:
ts['trash'] = massaged_tag
elif tag == history_tag:
ts['history'] = massaged_tag
elif tag.owner == user:
ts['mine'].append(massaged_tag)
else:
sharers[tag.owner].append(massaged_tag)
ts['notmine'] = [{'name': sharer.username, 'projects': projects} for sharer, projects in sharers.iteritems()]
# Remove from my tags the trashed and history ones
mine_filter = set(ts['trash']['docs'] + ts['history']['docs'])
for tag in ts['mine']:
tag['docs'] = [doc_id for doc_id in tag['docs'] if doc_id not in mine_filter]
return ts
def massaged_tags(tag, tag_doc_mapping):
return {
'id': tag.id,
'name': html.conditional_escape(tag.tag),
'owner': tag.owner.username,
'docs': [doc.id for doc in tag_doc_mapping[tag]] # Could get with one request groupy
}
def massage_permissions(document):
"""
Returns the permissions for a given document as a dictionary
"""
read_perms = document.list_permissions(perm='read')
write_perms = document.list_permissions(perm='write')
return {
'perms': {
'read': {
'users': [{'id': perm_user.id, 'username': perm_user.username} \
for perm_user in read_perms.users.all()],
'groups': [{'id': perm_group.id, 'name': perm_group.name} \
for perm_group in read_perms.groups.all()]
},
'write': {
'users': [{'id': perm_user.id, 'username': perm_user.username} \
for perm_user in write_perms.users.all()],
'groups': [{'id': perm_group.id, 'name': perm_group.name} \
for perm_group in write_perms.groups.all()]
}
}
}
def massaged_documents_for_json(documents, user):
"""
var DOCUMENTS_DEFAULTS = {
'1': {
'id': 1,
'name': 'my query history', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'2': {
'id': 2,
'name': 'my query 2 trashed', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'3': {
'id': 3,
'name': 'my query 3 tagged twice', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'10': {
'id': 10,
'name': 'my query 3 shared', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'11': {
'id': 11,
'name': 'my query 4 shared', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
}
};
"""
docs = {}
for document in documents:
try:
url = document.content_object and hasattr(document.content_object, 'get_absolute_url') and document.content_object.get_absolute_url() or ''
except:
LOG.exception('failed to get absolute url')
# If app of document is disabled
url = ''
docs[document.id] = massage_doc_for_json(document, user, url)
return docs
@require_GET
def get_document(request):
response = {'status': -1, 'message': ''}
doc_id = request.GET.get('id', '')
if doc_id.isdigit():
doc = None
try:
doc = Document.objects.get(id=doc_id)
except Document.DoesNotExist:
pass
if doc and doc.can_read(request.user):
response = massage_doc_for_json(doc, request.user)
else:
response['message'] = _('get_document requires read priviledge or document does not exist for: %s') % doc_id
else:
response['message'] = _('get_document requires an id integer parameter: %s') % doc_id
return JsonResponse(response)
def massage_doc_for_json(document, user, url=''):
read_perms = document.list_permissions(perm='read')
write_perms = document.list_permissions(perm='write')
massaged_doc = {
'id': document.id,
'contentType': html.conditional_escape(document.content_type.name),
'icon': document.icon,
'name': html.conditional_escape(document.name),
'url': html.conditional_escape(url),
'description': html.conditional_escape(document.description),
'tags': [{'id': tag.id, 'name': html.conditional_escape(tag.tag)} \
for tag in document.tags.all()],
'owner': document.owner.username,
'isMine': document.owner == user,
'lastModified': document.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(document.last_modified.timetuple())
}
permissions = massage_permissions(document)
massaged_doc.update(permissions)
return massaged_doc
def valid_project(name):
project_doc = DocumentTag.objects.filter(tag=name)
return len(project_doc) == 0
@require_POST
def add_tag(request):
response = {'status': -1, 'message': ''}
try:
validstatus = valid_project(name=request.POST['name'])
if validstatus:
tag = DocumentTag.objects.create_tag(request.user, request.POST['name'])
response['name'] = request.POST['name']
response['id'] = tag.id
response['docs'] = []
response['owner'] = request.user.username
response['status'] = 0
else:
response['status'] = -1
except KeyError, e:
response['message'] = _('Form is missing %s field') % e.message
except Exception, e:
response['message'] = force_unicode(e)
return JsonResponse(response)
@require_POST
def tag(request):
response = {'status': -1, 'message': ''}
request_json = json.loads(request.POST['data'])
try:
tag = DocumentTag.objects.tag(request.user, request_json['doc_id'], request_json.get('tag'), request_json.get('tag_id'))
response['tag_id'] = tag.id
response['status'] = 0
except KeyError, e:
response['message'] = _('Form is missing %s field') % e.message
except Exception, e:
response['message'] = force_unicode(e)
return JsonResponse(response)
@require_POST
def update_tags(request):
response = {'status': -1, 'message': ''}
request_json = json.loads(request.POST['data'])
try:
doc = DocumentTag.objects.update_tags(request.user, request_json['doc_id'], request_json['tag_ids'])
response['doc'] = massage_doc_for_json(doc, request.user)
response['status'] = 0
except KeyError, e:
response['message'] = _('Form is missing %s field') % e.message
except Exception, e:
response['message'] = force_unicode(e)
return JsonResponse(response)
@require_POST
def remove_tag(request):
response = {'status': -1, 'message': _('Error')}
try:
DocumentTag.objects.delete_tag(request.POST['tag_id'], request.user)
response['message'] = _('Project removed!')
response['status'] = 0
except KeyError, e:
response['message'] = _('Form is missing %s field') % e.message
except Exception, e:
response['message'] = force_unicode(e)
return JsonResponse(response)
@require_POST
def update_permissions(request):
response = {'status': -1, 'message': _('Error')}
data = json.loads(request.POST['data'])
doc_id = request.POST['doc_id']
try:
doc = Document.objects.get_doc_for_writing(doc_id, request.user)
doc.sync_permissions(data)
response['message'] = _('Permissions updated!')
response['status'] = 0
response['doc'] = massage_doc_for_json(doc, request.user)
except KeyError, e:
response['message'] = _('Form is missing %s field') % e.message
except Exception, e:
LOG.exception(e.message)
response['message'] = force_unicode(e)
return JsonResponse(response)
|
|
#!/usr/bin/env python
# Copyright 2012 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Runs the whole set of swarming client unit tests on swarming itself.
This is done in a few steps:
- Archive the whole directory as a single .isolated file.
- Create one test-specific .isolated for each test to run. The file is created
directly and archived manually with isolateserver.py.
- Trigger each of these test-specific .isolated file per OS.
- Get all results out of order.
"""
__version__ = '0.1'
import glob
import logging
import os
import subprocess
import sys
import tempfile
import time
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Must be first import.
import parallel_execution
from third_party import colorama
from third_party.depot_tools import fix_encoding
from utils import file_path
from utils import tools
def check_output(cmd):
return subprocess.check_output([sys.executable] + cmd, cwd=ROOT_DIR)
def archive_tree(isolate_server):
"""Archives a whole tree and return the sha1 of the .isolated file.
Manually creates a temporary isolated file and archives it.
"""
cmd = [
'isolateserver.py', 'archive', '--isolate-server', isolate_server,
ROOT_DIR, '--blacklist="\\.git"',
]
if logging.getLogger().isEnabledFor(logging.INFO):
cmd.append('--verbose')
out = check_output(cmd)
return out.split()[0]
def archive_isolated_triggers(isolate_server, tree_isolated, tests):
"""Creates and archives all the .isolated files for the tests at once.
Archiving them in one batch is faster than archiving each file individually.
Also the .isolated files can be reused across OSes, reducing the amount of
I/O.
Returns:
list of (test, sha1) tuples.
"""
logging.info('archive_isolated_triggers(%s, %s)', tree_isolated, tests)
tempdir = tempfile.mkdtemp(prefix=u'run_swarming_tests_on_swarming_')
try:
isolateds = []
for test in tests:
test_name = os.path.basename(test)
# Creates a manual .isolated file. See
# https://code.google.com/p/swarming/wiki/IsolatedDesign for more details.
isolated = {
'algo': 'sha-1',
'command': ['python', test],
'includes': [tree_isolated],
'read_only': 0,
'version': '1.4',
}
v = os.path.join(tempdir, test_name + '.isolated')
tools.write_json(v, isolated, True)
isolateds.append(v)
cmd = [
'isolateserver.py', 'archive', '--isolate-server', isolate_server,
] + isolateds
if logging.getLogger().isEnabledFor(logging.INFO):
cmd.append('--verbose')
items = [i.split() for i in check_output(cmd).splitlines()]
assert len(items) == len(tests)
assert all(
items[i][1].endswith(os.path.basename(tests[i]) + '.isolated')
for i in xrange(len(tests)))
return zip(tests, [i[0] for i in items])
finally:
file_path.rmtree(tempdir)
def run_swarming_tests_on_swarming(
swarming_server, isolate_server, priority, oses, tests, logs,
no_idempotent):
"""Archives, triggers swarming jobs and gets results."""
print('Archiving the whole tree.')
start = time.time()
tree_isolated = archive_tree(isolate_server)
# Create and archive all the .isolated files.
isolateds = archive_isolated_triggers(isolate_server, tree_isolated, tests)
print('Archival took %3.2fs' % (time.time() - start))
exploded = []
for test_path, isolated_hash in isolateds:
logging.debug('%s: %s', test_path, isolated_hash)
test_name = os.path.basename(test_path).split('.')[0]
for platform in oses:
exploded.append((test_name, platform, isolated_hash))
tasks = [
(
parallel_execution.task_to_name(name, {'os': platform}, isolated_hash),
isolated_hash,
{'os': platform},
) for name, platform, isolated_hash in exploded
]
extra_args = [
'--hard-timeout', '180',
]
if not no_idempotent:
extra_args.append('--idempotent')
if priority:
extra_args.extend(['--priority', str(priority)])
print('Using priority %s' % priority)
result = 0
for failed_task in parallel_execution.run_swarming_tasks_parallel(
swarming_server, isolate_server, extra_args, tasks):
test_name, dimensions, stdout = failed_task
if logs:
# Write the logs are they are retrieved.
if not os.path.isdir(logs):
os.makedirs(logs)
name = '%s_%s.log' % (dimensions['os'], test_name.split('/', 1)[0])
with open(os.path.join(logs, name), 'wb') as f:
f.write(stdout)
result = 1
return result
def main():
parser = parallel_execution.OptionParser(
usage='%prog [options]', version=__version__)
parser.add_option(
'--logs',
help='Destination where to store the failure logs (recommended!)')
parser.add_option('-o', '--os', help='Run tests only on this OS')
parser.add_option(
'-t', '--test', action='append',
help='Run only these test, can be specified multiple times')
parser.add_option(
'--no-idempotent', action='store_true',
help='Do not use --idempotent to detect flaky tests')
options, args = parser.parse_args()
if args:
parser.error('Unsupported argument %s' % args)
oses = ['Linux', 'Mac', 'Windows']
tests = [
os.path.relpath(i, ROOT_DIR)
for i in (
glob.glob(os.path.join(ROOT_DIR, 'tests', '*_test.py')) +
glob.glob(os.path.join(ROOT_DIR, 'googletest', 'tests', '*_test.py')))
]
valid_tests = sorted(map(os.path.basename, tests))
assert len(valid_tests) == len(set(valid_tests)), (
'Can\'t have 2 tests with the same base name')
if options.test:
for t in options.test:
if not t in valid_tests:
parser.error(
'--test %s is unknown. Valid values are:\n%s' % (
t, '\n'.join(' ' + i for i in valid_tests)))
filters = tuple(os.path.sep + t for t in options.test)
tests = [t for t in tests if t.endswith(filters)]
if options.os:
if options.os not in oses:
parser.error(
'--os %s is unknown. Valid values are %s' % (
options.os, ', '.join(sorted(oses))))
oses = [options.os]
if sys.platform in ('win32', 'cygwin'):
# If we are on Windows, don't generate the tests for Linux and Mac since
# they use symlinks and we can't create symlinks on windows.
oses = ['Windows']
if options.os != 'win32':
print('Linux and Mac tests skipped since running on Windows.')
return run_swarming_tests_on_swarming(
options.swarming,
options.isolate_server,
options.priority,
oses,
tests,
options.logs,
options.no_idempotent)
if __name__ == '__main__':
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main())
|
|
from __future__ import print_function, unicode_literals
import logging
import pkg_resources
import re
import six
import sys
from rbtools.utils.process import die, execute
# The clients are lazy loaded via load_scmclients()
SCMCLIENTS = None
class PatchResult(object):
"""The result of a patch operation.
This stores state on whether the patch could be applied (fully or
partially), whether there are conflicts that can be resolved (as in
conflict markers, not reject files), which files conflicted, and the
patch output.
"""
def __init__(self, applied, has_conflicts=False,
conflicting_files=[], patch_output=None):
self.applied = applied
self.has_conflicts = has_conflicts
self.conflicting_files = conflicting_files
self.patch_output = patch_output
class SCMClient(object):
"""A base representation of an SCM tool.
These are used for fetching repository information and generating diffs.
"""
name = None
supports_diff_extra_args = False
supports_diff_exclude_patterns = False
supports_patch_revert = False
can_amend_commit = False
can_merge = False
can_push_upstream = False
can_delete_branch = False
def __init__(self, config=None, options=None):
self.config = config or {}
self.options = options
self.capabilities = None
def get_repository_info(self):
return None
def check_options(self):
pass
def get_changenum(self, revisions):
"""Return the change number for the given revisions.
This is only used when the client is supposed to send a change number
to the server (such as with Perforce).
Args:
revisions (dict):
A revisions dictionary as returned by ``parse_revision_spec``.
Returns:
unicode:
The change number to send to the Review Board server.
"""
return None
def scan_for_server(self, repository_info):
"""Find the server path.
This will search for the server name in the .reviewboardrc config
files. These are loaded with the current directory first, and searching
through each parent directory, and finally $HOME/.reviewboardrc last.
"""
return self._get_server_from_config(self.config, repository_info)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
'parent_base': (optional) The revision to use as the base of a
parent diff.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip],
and the parent diff (if necessary) will include (parent, base].
If a single revision is passed in, this will return the parent of that
revision for 'base' and the passed-in revision for 'tip'.
If zero revisions are passed in, this will return revisions relevant
for the "current change". The exact definition of what "current" means
is specific to each SCMTool backend, and documented in the
implementation classes.
"""
return {
'base': None,
'tip': None,
}
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
"""
Returns the generated diff and optional parent diff for this
repository.
The return value must be a dictionary, and must have, at a minimum,
a 'diff' field. A 'parent_diff' can also be provided.
It may also return 'base_commit_id', representing the revision/ID of
the commit that the diff or parent diff is based on. This exists
because in some diff formats, this may different from what's provided
in the diff.
"""
return {
'diff': None,
'parent_diff': None,
'base_commit_id': None,
}
def _get_server_from_config(self, config, repository_info):
if 'REVIEWBOARD_URL' in config:
return config['REVIEWBOARD_URL']
elif 'TREES' in config:
trees = config['TREES']
if not isinstance(trees, dict):
die('Warning: "TREES" in config file is not a dict!')
# If repository_info is a list, check if any one entry is in trees.
path = None
if isinstance(repository_info.path, list):
for path in repository_info.path:
if path in trees:
break
else:
path = None
elif repository_info.path in trees:
path = repository_info.path
if path and 'REVIEWBOARD_URL' in trees[path]:
return trees[path]['REVIEWBOARD_URL']
return None
def _get_p_number(self, base_path, base_dir):
"""Return the appropriate value for the -p argument to patch.
This function returns an integer. If the integer is -1, then the -p
option should not be provided to patch. Otherwise, the return value is
the argument to patch -p.
"""
if base_path and base_dir.startswith(base_path):
return base_path.count('/') + 1
else:
return -1
def _strip_p_num_slashes(self, files, p_num):
"""Strips the smallest prefix containing p_num slashes from file names.
To match the behavior of the patch -pX option, adjacent slashes are
counted as a single slash.
"""
if p_num > 0:
regex = re.compile(r'[^/]*/+')
return [regex.sub('', f, p_num) for f in files]
else:
return files
def _execute(self, cmd, *args, **kwargs):
"""
Prints the results of the executed command and returns
the data result from execute.
"""
return execute(cmd, ignore_errors=True, *args, **kwargs)
def has_pending_changes(self):
"""Checks if there are changes waiting to be committed.
Derived classes should override this method if they wish to support
checking for pending changes.
"""
raise NotImplementedError
def apply_patch(self, patch_file, base_path, base_dir, p=None,
revert=False):
"""Apply the patch and return a PatchResult indicating its success."""
# Figure out the -p argument for patch. We override the calculated
# value if it is supplied via a commandline option.
p_num = p or self._get_p_number(base_path, base_dir)
cmd = ['patch']
if revert:
cmd.append('-R')
if p_num >= 0:
cmd.append('-p%d' % p_num)
cmd.extend(['-i', six.text_type(patch_file)])
# Ignore return code 2 in case the patch file consists of only empty
# files, which 'patch' can't handle. Other 'patch' errors also give
# return code 2, so we must check the command output.
rc, patch_output = execute(cmd, extra_ignore_errors=(2,),
return_error_code=True)
only_garbage_in_patch = ('patch: **** Only garbage was found in the '
'patch input.\n')
if (patch_output and patch_output.startswith('patch: **** ') and
patch_output != only_garbage_in_patch):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# Check the patch for any added/deleted empty files to handle.
if self.supports_empty_files():
try:
with open(patch_file, 'rb') as f:
patch = f.read()
except IOError as e:
logging.error('Unable to read file %s: %s', patch_file, e)
return
patched_empty_files = self.apply_patch_for_empty_files(
patch, p_num, revert=revert)
# If there are no empty files in a "garbage-only" patch, the patch
# is probably malformed.
if (patch_output == only_garbage_in_patch and
not patched_empty_files):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# TODO: Should this take into account apply_patch_for_empty_files ?
# The return value of that function is False both when it fails
# and when there are no empty files.
return PatchResult(applied=(rc == 0), patch_output=patch_output)
def create_commit(self, message, author, run_editor,
files=[], all_files=False):
"""Creates a commit based on the provided message and author.
Derived classes should override this method if they wish to support
committing changes to their repositories.
"""
raise NotImplementedError
def get_commit_message(self, revisions):
"""Returns the commit message from the commits in the given revisions.
This pulls out the first line from the commit messages of the
given revisions. That is then used as the summary.
"""
commit_message = self.get_raw_commit_message(revisions)
lines = commit_message.splitlines()
if not lines:
return None
result = {
'summary': lines[0],
}
# Try to pull the body of the commit out of the full commit
# description, so that we can skip the summary.
if len(lines) >= 3 and lines[0] and not lines[1]:
result['description'] = '\n'.join(lines[2:]).strip()
else:
result['description'] = commit_message
return result
def delete_branch(self, branch_name, merged_only=True):
"""Deletes the specified branch.
If merged_only is False, then the branch will be deleted even if not
yet merged into an upstream branch.
"""
raise NotImplementedError
def merge(self, target, destination, message, author, squash=False,
run_editor=False):
"""Merges the target branch with destination branch."""
raise NotImplementedError
def push_upstream(self, remote_branch):
"""Pushes the current branch to upstream."""
raise NotImplementedError
def get_raw_commit_message(self, revisions):
"""Extracts the commit messages on the commits in the given revisions.
Derived classes should override this method in order to allow callers
to fetch commit messages. This is needed for description guessing.
If a derived class is unable to fetch the description, ``None`` should
be returned.
Callers that need to differentiate the summary from the description
should instead use get_commit_message().
"""
raise NotImplementedError
def get_current_branch(self):
"""Returns the repository branch name of the current directory.
Derived classes should override this method if they are able to
determine the current branch of the working directory.
If a derived class is unable to unable to determine the branch,
``None`` should be returned.
"""
raise NotImplementedError
def supports_empty_files(self):
"""Check if the RB server supports added/deleted empty files.
This method returns False. To change this behaviour, override it in a
subclass.
"""
return False
def apply_patch_for_empty_files(self, patch, p_num, revert=False):
"""Return True if any empty files in the patch are applied.
If there are no empty files in the patch or if an error occurs while
applying the patch, we return False.
"""
raise NotImplementedError
def amend_commit_description(self, message, revisions=None):
"""Update a commit message to the given string.
The optional revisions argument exists to provide compatibility with
SCMs that allow modification of multiple changesets at any given time.
It takes a parsed revision spec, and will amend the change referenced
by the tip revision therein.
"""
raise NotImplementedError
class RepositoryInfo(object):
"""
A representation of a source code repository.
"""
def __init__(self, path=None, base_path=None, supports_changesets=False,
supports_parent_diffs=False):
self.path = path
self.base_path = base_path
self.supports_changesets = supports_changesets
self.supports_parent_diffs = supports_parent_diffs
logging.debug('repository info: %s' % self)
def __str__(self):
return 'Path: %s, Base path: %s, Supports changesets: %s' % \
(self.path, self.base_path, self.supports_changesets)
def set_base_path(self, base_path):
if not base_path.startswith('/'):
base_path = '/' + base_path
logging.debug('changing repository info base_path from %s to %s',
(self.base_path, base_path))
self.base_path = base_path
def find_server_repository_info(self, server):
"""
Try to find the repository from the list of repositories on the server.
For Subversion, this could be a repository with a different URL. For
all other clients, this is a noop.
"""
return self
def load_scmclients(config, options):
global SCMCLIENTS
SCMCLIENTS = {}
for ep in pkg_resources.iter_entry_points(group='rbtools_scm_clients'):
try:
SCMCLIENTS[ep.name] = ep.load()(config=config, options=options)
except Exception:
logging.exception('Could not load SCM Client "%s"', ep.name)
def scan_usable_client(config, options, client_name=None):
from rbtools.clients.perforce import PerforceClient
repository_info = None
tool = None
# TODO: We should only load all of the scm clients if the
# client_name isn't provided.
if SCMCLIENTS is None:
load_scmclients(config, options)
if client_name:
if client_name not in SCMCLIENTS:
logging.error('The provided repository type "%s" is invalid.' %
client_name)
sys.exit(1)
else:
scmclients = {
client_name: SCMCLIENTS[client_name]
}
else:
scmclients = SCMCLIENTS
for name, tool in six.iteritems(scmclients):
logging.debug('Checking for a %s repository...' % tool.name)
repository_info = tool.get_repository_info()
if repository_info:
break
if not repository_info:
if client_name:
logging.error('The provided repository type was not detected '
'in the current directory.')
elif getattr(options, 'repository_url', None):
logging.error('No supported repository could be accessed at '
'the supplied url.')
else:
logging.error('The current directory does not contain a checkout '
'from a supported source code repository.')
sys.exit(1)
# Verify that options specific to an SCM Client have not been mis-used.
if (getattr(options, 'change_only', False) and
not repository_info.supports_changesets):
sys.stderr.write('The --change-only option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (getattr(options, 'parent_branch', None) and
not repository_info.supports_parent_diffs):
sys.stderr.write('The --parent option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (not isinstance(tool, PerforceClient) and
(getattr(options, 'p4_client', None) or
getattr(options, 'p4_port', None))):
sys.stderr.write('The --p4-client and --p4-port options are not valid '
'for the current SCM client.\n')
sys.exit(1)
return (repository_info, tool)
def print_clients(config, options):
"""Print the supported detected SCM clients.
Each SCM client, including those provided by third party packages,
will be printed. Additionally, SCM clients which are detected in
the current directory will be highlighted.
"""
print('The following repository types are supported by this installation')
print('of RBTools. Each "<type>" may be used as a value for the')
print('"--repository-type=<type>" command line argument. Repository types')
print('which are detected in the current directory are marked with a "*"')
print('[*] "<type>": <Name>')
if SCMCLIENTS is None:
load_scmclients(config, options)
for name, tool in six.iteritems(SCMCLIENTS):
repository_info = tool.get_repository_info()
if repository_info:
print(' * "%s": %s' % (name, tool.name))
else:
print(' "%s": %s' % (name, tool.name))
|
|
# Django settings for eggtimer project.
import os
from django.utils.dateparse import parse_datetime
import dj_database_url
from email.utils import formataddr
HOME_DIR = os.path.expanduser("~")
BASE_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
ADMINS = (
(os.environ.get('ADMIN_NAME', 'admin'), os.environ.get('ADMIN_EMAIL', 'example@example.com')),
)
# Export a secret value in production; for local development, the default is good enough
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY',
'psu&83=i(4wgd@9*go=nps9=1rw#9b_w6psy4mp6yoxqv1i5g')
# Use env setting if available, otherwise make debug false
DEBUG = bool(int(os.environ.get('DJANGO_DEBUG', '0')))
ALLOWED_HOSTS = ['eggtimer.herokuapp.com', 'localhost', '127.0.0.1']
CORS_ORIGIN_ALLOW_ALL = True
SECURE_SSL_REDIRECT = bool(int(os.environ.get('DJANGO_ENABLE_SSL', '1')))
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'custom_user',
'settings_context_processor',
'gunicorn',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.github',
'rest_framework',
'rest_framework.authtoken',
'floppyforms',
'bootstrapform',
'timezone_field',
'periods',
]
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'periods.middleware.AddAuthTokenMiddleware',
)
ROOT_URLCONF = 'eggtimer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'eggtimer', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
'django.template.context_processors.debug',
'django.template.context_processors.request',
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"settings_context_processor.context_processors.settings",
],
'debug': DEBUG,
},
},
]
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'eggtimer.wsgi.application'
# Parse database configuration from DATABASE_URL environment variable
DATABASES = {
'default': dj_database_url.config(
default="sqlite:///%s" % os.path.join(HOME_DIR, 'eggtimer', 'eggtimer.sqlite')
)
}
SITE_ID = 1
# https://docs.djangoproject.com/en/1.8/topics/i18n/
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'bower_components'),
os.path.join(BASE_DIR, 'eggtimer', 'static'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
# auth and allauth
AUTH_USER_MODEL = 'periods.User'
LOGIN_REDIRECT_URL = '/calendar/'
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email'],
'METHOD': 'oauth2',
}
}
ACCOUNT_ACTIVATION_DAYS = 14
DEFAULT_FROM_EMAIL = formataddr(ADMINS[0])
REPLY_TO = (
os.environ.get('REPLY_TO_EMAIL', 'example@example.com'),
)
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_PORT = 587
EMAIL_HOST_USER = "apikey"
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_API_KEY')
EMAIL_USE_TLS = True
if not EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(HOME_DIR, 'eggtimer', 'emails')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
}
# US Navy API is used for moon phases
# http://aa.usno.navy.mil/data/docs/api.php#phase
MOON_PHASE_URL = 'http://api.usno.navy.mil'
API_DATE_FORMAT = '%Y-%m-%d'
US_DATE_FORMAT = '%-m/%-d/%Y'
# TODO maybe this could be a django plugin?
DEPLOY_DATE = parse_datetime(os.environ.get('DEPLOY_DATE', ''))
VERSION = '0.6'
TEMPLATE_VISIBLE_SETTINGS = ['DEPLOY_DATE', 'VERSION', 'ADMINS']
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# if DEBUG:
# INSTALLED_APPS.extend([
# 'django_extensions',
# ])
|
|
"""Support for Dutch Smart Meter (also known as Smartmeter or P1 port)."""
from __future__ import annotations
import asyncio
from asyncio import CancelledError
from contextlib import suppress
from datetime import timedelta
from functools import partial
import logging
from dsmr_parser import obis_references as obis_ref
from dsmr_parser.clients.protocol import create_dsmr_reader, create_tcp_dsmr_reader
import serial
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
TIME_HOURS,
)
from homeassistant.core import CoreState, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import Throttle
from .const import (
CONF_DSMR_VERSION,
CONF_PRECISION,
CONF_RECONNECT_INTERVAL,
CONF_SERIAL_ID,
CONF_SERIAL_ID_GAS,
CONF_TIME_BETWEEN_UPDATE,
DATA_TASK,
DEFAULT_DSMR_VERSION,
DEFAULT_PORT,
DEFAULT_PRECISION,
DEFAULT_RECONNECT_INTERVAL,
DEFAULT_TIME_BETWEEN_UPDATE,
DEVICE_NAME_ENERGY,
DEVICE_NAME_GAS,
DOMAIN,
ICON_GAS,
ICON_POWER,
ICON_POWER_FAILURE,
ICON_SWELL_SAG,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_DSMR_VERSION, default=DEFAULT_DSMR_VERSION): vol.All(
cv.string, vol.In(["5L", "5B", "5", "4", "2.2"])
),
vol.Optional(CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL): int,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the platform into a config entry."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the DSMR sensor."""
config = entry.data
options = entry.options
dsmr_version = config[CONF_DSMR_VERSION]
# Define list of name,obis,force_update mappings to generate entities
obis_mapping = [
["Power Consumption", obis_ref.CURRENT_ELECTRICITY_USAGE, True],
["Power Production", obis_ref.CURRENT_ELECTRICITY_DELIVERY, True],
["Power Tariff", obis_ref.ELECTRICITY_ACTIVE_TARIFF, False],
["Energy Consumption (tarif 1)", obis_ref.ELECTRICITY_USED_TARIFF_1, True],
["Energy Consumption (tarif 2)", obis_ref.ELECTRICITY_USED_TARIFF_2, True],
["Energy Production (tarif 1)", obis_ref.ELECTRICITY_DELIVERED_TARIFF_1, True],
["Energy Production (tarif 2)", obis_ref.ELECTRICITY_DELIVERED_TARIFF_2, True],
[
"Power Consumption Phase L1",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L1_POSITIVE,
False,
],
[
"Power Consumption Phase L2",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L2_POSITIVE,
False,
],
[
"Power Consumption Phase L3",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L3_POSITIVE,
False,
],
[
"Power Production Phase L1",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L1_NEGATIVE,
False,
],
[
"Power Production Phase L2",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L2_NEGATIVE,
False,
],
[
"Power Production Phase L3",
obis_ref.INSTANTANEOUS_ACTIVE_POWER_L3_NEGATIVE,
False,
],
["Short Power Failure Count", obis_ref.SHORT_POWER_FAILURE_COUNT, False],
["Long Power Failure Count", obis_ref.LONG_POWER_FAILURE_COUNT, False],
["Voltage Sags Phase L1", obis_ref.VOLTAGE_SAG_L1_COUNT, False],
["Voltage Sags Phase L2", obis_ref.VOLTAGE_SAG_L2_COUNT, False],
["Voltage Sags Phase L3", obis_ref.VOLTAGE_SAG_L3_COUNT, False],
["Voltage Swells Phase L1", obis_ref.VOLTAGE_SWELL_L1_COUNT, False],
["Voltage Swells Phase L2", obis_ref.VOLTAGE_SWELL_L2_COUNT, False],
["Voltage Swells Phase L3", obis_ref.VOLTAGE_SWELL_L3_COUNT, False],
["Voltage Phase L1", obis_ref.INSTANTANEOUS_VOLTAGE_L1, False],
["Voltage Phase L2", obis_ref.INSTANTANEOUS_VOLTAGE_L2, False],
["Voltage Phase L3", obis_ref.INSTANTANEOUS_VOLTAGE_L3, False],
["Current Phase L1", obis_ref.INSTANTANEOUS_CURRENT_L1, False],
["Current Phase L2", obis_ref.INSTANTANEOUS_CURRENT_L2, False],
["Current Phase L3", obis_ref.INSTANTANEOUS_CURRENT_L3, False],
]
if dsmr_version == "5L":
obis_mapping.extend(
[
[
"Energy Consumption (total)",
obis_ref.LUXEMBOURG_ELECTRICITY_USED_TARIFF_GLOBAL,
True,
],
[
"Energy Production (total)",
obis_ref.LUXEMBOURG_ELECTRICITY_DELIVERED_TARIFF_GLOBAL,
True,
],
]
)
else:
obis_mapping.extend(
[["Energy Consumption (total)", obis_ref.ELECTRICITY_IMPORTED_TOTAL, True]]
)
# Generate device entities
devices = [
DSMREntity(
name, DEVICE_NAME_ENERGY, config[CONF_SERIAL_ID], obis, config, force_update
)
for name, obis, force_update in obis_mapping
]
# Protocol version specific obis
if CONF_SERIAL_ID_GAS in config:
if dsmr_version in ("4", "5", "5L"):
gas_obis = obis_ref.HOURLY_GAS_METER_READING
elif dsmr_version in ("5B",):
gas_obis = obis_ref.BELGIUM_HOURLY_GAS_METER_READING
else:
gas_obis = obis_ref.GAS_METER_READING
# Add gas meter reading and derivative for usage
devices += [
DSMREntity(
"Gas Consumption",
DEVICE_NAME_GAS,
config[CONF_SERIAL_ID_GAS],
gas_obis,
config,
True,
),
DerivativeDSMREntity(
"Hourly Gas Consumption",
DEVICE_NAME_GAS,
config[CONF_SERIAL_ID_GAS],
gas_obis,
config,
False,
),
]
async_add_entities(devices)
min_time_between_updates = timedelta(
seconds=options.get(CONF_TIME_BETWEEN_UPDATE, DEFAULT_TIME_BETWEEN_UPDATE)
)
@Throttle(min_time_between_updates)
def update_entities_telegram(telegram):
"""Update entities with latest telegram and trigger state update."""
# Make all device entities aware of new telegram
for device in devices:
device.update_data(telegram)
# Creates an asyncio.Protocol factory for reading DSMR telegrams from
# serial and calls update_entities_telegram to update entities on arrival
if CONF_HOST in config:
reader_factory = partial(
create_tcp_dsmr_reader,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_DSMR_VERSION],
update_entities_telegram,
loop=hass.loop,
keep_alive_interval=60,
)
else:
reader_factory = partial(
create_dsmr_reader,
config[CONF_PORT],
config[CONF_DSMR_VERSION],
update_entities_telegram,
loop=hass.loop,
)
async def connect_and_reconnect():
"""Connect to DSMR and keep reconnecting until Home Assistant stops."""
stop_listener = None
transport = None
protocol = None
while hass.state != CoreState.stopping:
# Start DSMR asyncio.Protocol reader
try:
transport, protocol = await hass.loop.create_task(reader_factory())
if transport:
# Register listener to close transport on HA shutdown
stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, transport.close
)
# Wait for reader to close
await protocol.wait_closed()
# Unexpected disconnect
if not hass.is_stopping:
stop_listener()
transport = None
protocol = None
# Reflect disconnect state in devices state by setting an
# empty telegram resulting in `unknown` states
update_entities_telegram({})
# throttle reconnect attempts
await asyncio.sleep(config[CONF_RECONNECT_INTERVAL])
except (serial.serialutil.SerialException, OSError):
# Log any error while establishing connection and drop to retry
# connection wait
_LOGGER.exception("Error connecting to DSMR")
transport = None
protocol = None
except CancelledError:
if stop_listener:
stop_listener() # pylint: disable=not-callable
if transport:
transport.close()
if protocol:
await protocol.wait_closed()
return
# Can't be hass.async_add_job because job runs forever
task = asyncio.create_task(connect_and_reconnect())
# Save the task to be able to cancel it when unloading
hass.data[DOMAIN][entry.entry_id][DATA_TASK] = task
class DSMREntity(SensorEntity):
"""Entity reading values from DSMR telegram."""
def __init__(self, name, device_name, device_serial, obis, config, force_update):
"""Initialize entity."""
self._name = name
self._obis = obis
self._config = config
self.telegram = {}
self._device_name = device_name
self._device_serial = device_serial
self._force_update = force_update
self._unique_id = f"{device_serial}_{name}".replace(" ", "_")
@callback
def update_data(self, telegram):
"""Update data."""
self.telegram = telegram
if self.hass and self._obis in self.telegram:
self.async_write_ha_state()
def get_dsmr_object_attr(self, attribute):
"""Read attribute from last received telegram for this DSMR object."""
# Make sure telegram contains an object for this entities obis
if self._obis not in self.telegram:
return None
# Get the attribute value if the object has it
dsmr_object = self.telegram[self._obis]
return getattr(dsmr_object, attribute, None)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "Sags" in self._name or "Swells" in self.name:
return ICON_SWELL_SAG
if "Failure" in self._name:
return ICON_POWER_FAILURE
if "Power" in self._name:
return ICON_POWER
if "Gas" in self._name:
return ICON_GAS
@property
def state(self):
"""Return the state of sensor, if available, translate if needed."""
value = self.get_dsmr_object_attr("value")
if self._obis == obis_ref.ELECTRICITY_ACTIVE_TARIFF:
return self.translate_tariff(value, self._config[CONF_DSMR_VERSION])
with suppress(TypeError):
value = round(float(value), self._config[CONF_PRECISION])
if value is not None:
return value
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.get_dsmr_object_attr("unit")
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self) -> dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._device_serial)},
"name": self._device_name,
}
@property
def force_update(self):
"""Force update."""
return self._force_update
@property
def should_poll(self):
"""Disable polling."""
return False
@staticmethod
def translate_tariff(value, dsmr_version):
"""Convert 2/1 to normal/low depending on DSMR version."""
# DSMR V5B: Note: In Belgium values are swapped:
# Rate code 2 is used for low rate and rate code 1 is used for normal rate.
if dsmr_version in ("5B",):
if value == "0001":
value = "0002"
elif value == "0002":
value = "0001"
# DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is
# used for normal rate.
if value == "0002":
return "normal"
if value == "0001":
return "low"
return None
class DerivativeDSMREntity(DSMREntity):
"""Calculated derivative for values where the DSMR doesn't offer one.
Gas readings are only reported per hour and don't offer a rate only
the current meter reading. This entity converts subsequents readings
into a hourly rate.
"""
_previous_reading = None
_previous_timestamp = None
_state = None
@property
def state(self):
"""Return the calculated current hourly rate."""
return self._state
@property
def force_update(self):
"""Disable force update."""
return False
@property
def should_poll(self):
"""Enable polling."""
return True
async def async_update(self):
"""Recalculate hourly rate if timestamp has changed.
DSMR updates gas meter reading every hour. Along with the new
value a timestamp is provided for the reading. Test if the last
known timestamp differs from the current one then calculate a
new rate for the previous hour.
"""
# check if the timestamp for the object differs from the previous one
timestamp = self.get_dsmr_object_attr("datetime")
if timestamp and timestamp != self._previous_timestamp:
current_reading = self.get_dsmr_object_attr("value")
if self._previous_reading is None:
# Can't calculate rate without previous datapoint
# just store current point
pass
else:
# Recalculate the rate
diff = current_reading - self._previous_reading
timediff = timestamp - self._previous_timestamp
total_seconds = timediff.total_seconds()
self._state = round(float(diff) / total_seconds * 3600, 3)
self._previous_reading = current_reading
self._previous_timestamp = timestamp
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, per hour, if any."""
unit = self.get_dsmr_object_attr("unit")
if unit:
return f"{unit}/{TIME_HOURS}"
|
|
import asyncio
import asyncio.streams
import traceback
import warnings
from collections import deque
from contextlib import suppress
from html import escape as html_escape
from http import HTTPStatus
from logging import Logger
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Optional,
Type,
cast,
)
import yarl
from .abc import AbstractAccessLogger, AbstractStreamWriter
from .base_protocol import BaseProtocol
from .helpers import CeilTimeout, current_task
from .http import (
HttpProcessingError,
HttpRequestParser,
HttpVersion10,
RawRequestMessage,
StreamWriter,
)
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .tcp_helpers import tcp_keepalive
from .web_exceptions import HTTPException
from .web_log import AccessLogger
from .web_request import BaseRequest
from .web_response import Response, StreamResponse
__all__ = ('RequestHandler', 'RequestPayloadError', 'PayloadAccessError')
if TYPE_CHECKING: # pragma: no cover
from .web_server import Server # noqa
_RequestFactory = Callable[[RawRequestMessage,
StreamReader,
'RequestHandler',
AbstractStreamWriter,
'asyncio.Task[None]'],
BaseRequest]
_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
ERROR = RawRequestMessage(
'UNKNOWN', '/', HttpVersion10, {},
{}, True, False, False, False, yarl.URL('/'))
class RequestPayloadError(Exception):
"""Payload parsing error."""
class PayloadAccessError(Exception):
"""Payload was accessed after response was sent."""
class RequestHandler(BaseProtocol):
"""HTTP protocol implementation.
RequestHandler handles incoming HTTP request. It reads request line,
request headers and request payload and calls handle_request() method.
By default it always returns with 404 response.
RequestHandler handles errors in incoming request, like bad
status line, bad headers or incomplete payload. If any error occurs,
connection gets closed.
:param keepalive_timeout: number of seconds before closing
keep-alive connection
:type keepalive_timeout: int or None
:param bool tcp_keepalive: TCP keep-alive is on, default is on
:param bool debug: enable debug mode
:param logger: custom logger object
:type logger: aiohttp.log.server_logger
:param access_log_class: custom class for access_logger
:type access_log_class: aiohttp.abc.AbstractAccessLogger
:param access_log: custom logging object
:type access_log: aiohttp.log.server_logger
:param str access_log_format: access log format string
:param loop: Optional event loop
:param int max_line_size: Optional maximum header line size
:param int max_field_size: Optional maximum header field size
:param int max_headers: Optional maximum header size
"""
KEEPALIVE_RESCHEDULE_DELAY = 1
__slots__ = ('_request_count', '_keep_alive', '_manager',
'_request_handler', '_request_factory', '_tcp_keepalive',
'_keepalive_time', '_keepalive_handle', '_keepalive_timeout',
'_lingering_time', '_messages', '_message_tail',
'_waiter', '_error_handler', '_task_handler',
'_upgrade', '_payload_parser', '_request_parser',
'_reading_paused', 'logger', 'debug', 'access_log',
'access_logger', '_close', '_force_close')
def __init__(self, manager: 'Server', *,
loop: asyncio.AbstractEventLoop,
keepalive_timeout: float=75., # NGINX default is 75 secs
tcp_keepalive: bool=True,
logger: Logger=server_logger,
access_log_class: Type[AbstractAccessLogger]=AccessLogger,
access_log: Logger=access_logger,
access_log_format: str=AccessLogger.LOG_FORMAT,
debug: bool=False,
max_line_size: int=8190,
max_headers: int=32768,
max_field_size: int=8190,
lingering_time: float=10.0):
super().__init__(loop)
self._request_count = 0
self._keepalive = False
self._manager = manager # type: Optional[Server]
self._request_handler = manager.request_handler # type: Optional[_RequestHandler] # noqa
self._request_factory = manager.request_factory # type: Optional[_RequestFactory] # noqa
self._tcp_keepalive = tcp_keepalive
# placeholder to be replaced on keepalive timeout setup
self._keepalive_time = 0.0
self._keepalive_handle = None # type: Optional[asyncio.Handle]
self._keepalive_timeout = keepalive_timeout
self._lingering_time = float(lingering_time)
self._messages = deque() # type: Any # Python 3.5 has no typing.Deque
self._message_tail = b''
self._waiter = None # type: Optional[asyncio.Future[None]]
self._error_handler = None # type: Optional[asyncio.Task[None]]
self._task_handler = None # type: Optional[asyncio.Task[None]]
self._upgrade = False
self._payload_parser = None # type: Any
self._request_parser = HttpRequestParser(
self, loop,
max_line_size=max_line_size,
max_field_size=max_field_size,
max_headers=max_headers,
payload_exception=RequestPayloadError) # type: Optional[HttpRequestParser] # noqa
self.logger = logger
self.debug = debug
self.access_log = access_log
if access_log:
self.access_logger = access_log_class(
access_log, access_log_format) # type: Optional[AbstractAccessLogger] # noqa
else:
self.access_logger = None
self._close = False
self._force_close = False
def __repr__(self) -> str:
return "<{} {}>".format(
self.__class__.__name__,
'connected' if self.transport is not None else 'disconnected')
@property
def keepalive_timeout(self) -> float:
return self._keepalive_timeout
async def shutdown(self, timeout: Optional[float]=15.0) -> None:
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with CeilTimeout(timeout, loop=self._loop):
if (self._error_handler is not None and
not self._error_handler.done()):
await self._error_handler
if (self._task_handler is not None and
not self._task_handler.done()):
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
real_transport = cast(asyncio.Transport, transport)
if self._tcp_keepalive:
tcp_keepalive(real_transport)
self._task_handler = self._loop.create_task(self.start())
assert self._manager is not None
self._manager.connection_made(self, real_transport)
def connection_lost(self, exc: Optional[BaseException]) -> None:
if self._manager is None:
return
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
self._manager = None
self._force_close = True
self._request_factory = None
self._request_handler = None
self._request_parser = None
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._task_handler is not None:
self._task_handler.cancel()
if self._error_handler is not None:
self._error_handler.cancel()
self._task_handler = None
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
def set_parser(self, parser: Any) -> None:
# Actual type is WebReader
assert self._payload_parser is None
self._payload_parser = parser
if self._message_tail:
self._payload_parser.feed_data(self._message_tail)
self._message_tail = b''
def eof_received(self) -> None:
pass
def data_received(self, data: bytes) -> None:
if self._force_close or self._close:
return
# parse http messages
if self._payload_parser is None and not self._upgrade:
assert self._request_parser is not None
try:
messages, upgraded, tail = self._request_parser.feed_data(data)
except HttpProcessingError as exc:
# something happened during parsing
self._error_handler = self._loop.create_task(
self.handle_parse_error(
StreamWriter(self, self._loop),
400, exc, exc.message))
self.close()
except Exception as exc:
# 500: internal error
self._error_handler = self._loop.create_task(
self.handle_parse_error(
StreamWriter(self, self._loop),
500, exc))
self.close()
else:
if messages:
# sometimes the parser returns no messages
for (msg, payload) in messages:
self._request_count += 1
self._messages.append((msg, payload))
waiter = self._waiter
if waiter is not None:
if not waiter.done():
# don't set result twice
waiter.set_result(None)
self._upgrade = upgraded
if upgraded and tail:
self._message_tail = tail
# no parser, just store
elif self._payload_parser is None and self._upgrade and data:
self._message_tail += data
# feed payload
elif data:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self.close()
def keep_alive(self, val: bool) -> None:
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keepalive = val
if self._keepalive_handle:
self._keepalive_handle.cancel()
self._keepalive_handle = None
def close(self) -> None:
"""Stop accepting new pipelinig messages and close
connection when handlers done processing messages"""
self._close = True
if self._waiter:
self._waiter.cancel()
def force_close(self) -> None:
"""Force close connection"""
self._force_close = True
if self._waiter:
self._waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def log_access(self,
request: BaseRequest,
response: StreamResponse,
time: float) -> None:
if self.access_logger is not None:
self.access_logger.log(request, response, time)
def log_debug(self, *args: Any, **kw: Any) -> None:
if self.debug:
self.logger.debug(*args, **kw)
def log_exception(self, *args: Any, **kw: Any) -> None:
self.logger.exception(*args, **kw)
def _process_keepalive(self) -> None:
if self._force_close or not self._keepalive:
return
next = self._keepalive_time + self._keepalive_timeout
# handler in idle state
if self._waiter:
if self._loop.time() > next:
self.force_close()
return
# not all request handlers are done,
# reschedule itself to next second
self._keepalive_handle = self._loop.call_later(
self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive)
async def start(self) -> None:
"""Process incoming request.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
loop = self._loop
handler = self._task_handler
assert handler is not None
manager = self._manager
assert manager is not None
keepalive_timeout = self._keepalive_timeout
resp = None
assert self._request_factory is not None
assert self._request_handler is not None
while not self._force_close:
if not self._messages:
try:
# wait for next request
self._waiter = loop.create_future()
await self._waiter
except asyncio.CancelledError:
break
finally:
self._waiter = None
message, payload = self._messages.popleft()
if self.access_log:
now = loop.time()
manager.requests_count += 1
writer = StreamWriter(self, loop)
request = self._request_factory(
message, payload, self, writer, handler)
try:
try:
# a new task is used for copy context vars (#3406)
task = self._loop.create_task(
self._request_handler(request))
resp = await task
except HTTPException as exc:
resp = exc
except asyncio.CancelledError:
self.log_debug('Ignored premature client disconnection')
break
except asyncio.TimeoutError as exc:
self.log_debug('Request handler timed out.', exc_info=exc)
resp = self.handle_error(request, 504)
except Exception as exc:
resp = self.handle_error(request, 500, exc)
else:
# Deprecation warning (See #2415)
if getattr(resp, '__http_exception__', False):
warnings.warn(
"returning HTTPException object is deprecated "
"(#2415) and will be removed, "
"please raise the exception instead",
DeprecationWarning)
if self.debug:
if not isinstance(resp, StreamResponse):
if resp is None:
raise RuntimeError("Missing return "
"statement on request handler")
else:
raise RuntimeError("Web-handler should return "
"a response instance, "
"got {!r}".format(resp))
await resp.prepare(request)
await resp.write_eof()
# notify server about keep-alive
self._keepalive = bool(resp.keep_alive)
# log access
if self.access_log:
self.log_access(request, resp, loop.time() - now)
# check payload
if not payload.is_eof():
lingering_time = self._lingering_time
if not self._force_close and lingering_time:
self.log_debug(
'Start lingering close timer for %s sec.',
lingering_time)
now = loop.time()
end_t = now + lingering_time
with suppress(
asyncio.TimeoutError, asyncio.CancelledError):
while not payload.is_eof() and now < end_t:
with CeilTimeout(end_t - now, loop=loop):
# read and ignore
await payload.readany()
now = loop.time()
# if payload still uncompleted
if not payload.is_eof() and not self._force_close:
self.log_debug('Uncompleted request.')
self.close()
payload.set_exception(PayloadAccessError())
except asyncio.CancelledError:
self.log_debug('Ignored premature client disconnection ')
break
except RuntimeError as exc:
if self.debug:
self.log_exception(
'Unhandled runtime exception', exc_info=exc)
self.force_close()
except Exception as exc:
self.log_exception('Unhandled exception', exc_info=exc)
self.force_close()
finally:
if self.transport is None and resp is not None:
self.log_debug('Ignored premature client disconnection.')
elif not self._force_close:
if self._keepalive and not self._close:
# start keep-alive timer
if keepalive_timeout is not None:
now = self._loop.time()
self._keepalive_time = now
if self._keepalive_handle is None:
self._keepalive_handle = loop.call_at(
now + keepalive_timeout,
self._process_keepalive)
else:
break
# remove handler, close transport if no handlers left
if not self._force_close:
self._task_handler = None
if self.transport is not None and self._error_handler is None:
self.transport.close()
def handle_error(self,
request: BaseRequest,
status: int=500,
exc: Optional[BaseException]=None,
message: Optional[str]=None) -> StreamResponse:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
self.log_exception("Error handling request", exc_info=exc)
ct = 'text/plain'
if status == HTTPStatus.INTERNAL_SERVER_ERROR:
title = '{0.value} {0.phrase}'.format(
HTTPStatus.INTERNAL_SERVER_ERROR
)
msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
tb = None
if self.debug:
with suppress(Exception):
tb = traceback.format_exc()
if 'text/html' in request.headers.get('Accept', ''):
if tb:
tb = html_escape(tb)
msg = '<h2>Traceback:</h2>\n<pre>{}</pre>'.format(tb)
message = (
"<html><head>"
"<title>{title}</title>"
"</head><body>\n<h1>{title}</h1>"
"\n{msg}\n</body></html>\n"
).format(title=title, msg=msg)
ct = 'text/html'
else:
if tb:
msg = tb
message = title + '\n\n' + msg
resp = Response(status=status, text=message, content_type=ct)
resp.force_close()
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close()
return resp
async def handle_parse_error(self,
writer: AbstractStreamWriter,
status: int,
exc: Optional[BaseException]=None,
message: Optional[str]=None) -> None:
request = BaseRequest( # type: ignore
ERROR,
EMPTY_PAYLOAD,
self, writer,
current_task(),
self._loop)
resp = self.handle_error(request, status, exc, message)
await resp.prepare(request)
await resp.write_eof()
if self.transport is not None:
self.transport.close()
self._error_handler = None
|
|
from django.test import TestCase
from unittest.mock import patch
from wellsfargo.security import (
encrypt_account_number,
decrypt_account_number,
WFRS_SECURITY,
)
from wellsfargo.security.fernet import FernetEncryption
from wellsfargo.security.kms import KMSEncryption
from wellsfargo.security.multi import MultiEncryption
import botocore
import base64
import binascii
FERNET_KEY_1 = b"U3Nyi57e55H2weKVmEPzrGdv18b0bGt3e542rg1J1N8="
FERNET_KEY_2 = b"mbgOpeXTyhhy1DgXreVOt6QMNu2Eem0RmPvJLCndpIw="
FERNET_KEY_3 = b"uK00vxMv9IG-FWvJPxZ4nz5AG3FuvdRj9XMhC8AWY2A="
KMS_KEY_ARN = (
"arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
)
_orig_make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwargs):
if operation_name == "Encrypt":
return {
"CiphertextBlob": base64.b64encode(kwargs["Plaintext"]),
}
if operation_name == "Decrypt":
resp = {}
try:
resp["Plaintext"] = base64.b64decode(kwargs["CiphertextBlob"])
except binascii.Error:
pass
return resp
return _orig_make_api_call(self, operation_name, kwargs)
def mock_kms(fn):
def wrapper(*args, **kwargs):
with patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call):
resp = fn(*args, **kwargs)
return resp
return wrapper
def patch_encryptor(encryptor, **encryptor_kwargs):
def decorate(fn):
def wrapper(*args, **kwargs):
_old_encryptor = WFRS_SECURITY["encryptor"]
_old_encryptor_kwargs = WFRS_SECURITY["encryptor_kwargs"]
WFRS_SECURITY["encryptor"] = encryptor
WFRS_SECURITY["encryptor_kwargs"] = encryptor_kwargs
resp = fn(*args, **kwargs)
WFRS_SECURITY["encryptor"] = _old_encryptor
WFRS_SECURITY["encryptor_kwargs"] = _old_encryptor_kwargs
return resp
return wrapper
return decorate
class AccountNumberTokenizationTest(TestCase):
@patch_encryptor("wellsfargo.security.fernet.FernetEncryption", key=FERNET_KEY_1)
def test_facade_fernet(self):
acct1 = "9999999999999991"
acct2 = "9999999999999992"
blob1 = encrypt_account_number(acct1)
blob2 = encrypt_account_number(acct2)
self.assertNotEqual(blob1, acct1)
self.assertEqual(decrypt_account_number(blob1), acct1)
self.assertNotEqual(blob2, acct2)
self.assertEqual(decrypt_account_number(blob2), acct2)
def test_fernet_round_trip(self):
encryptor = FernetEncryption(FERNET_KEY_1)
acct1 = "9999999999999991"
acct2 = "9999999999999992"
blob1 = encryptor.encrypt(acct1)
blob2 = encryptor.encrypt(acct2)
self.assertNotEqual(blob1, acct1)
self.assertEqual(encryptor.decrypt(blob1), acct1)
self.assertNotEqual(blob2, acct2)
self.assertEqual(encryptor.decrypt(blob2), acct2)
def test_fernet_decrypt(self):
encryptor = FernetEncryption(FERNET_KEY_1)
acct1 = "9999999999999991"
acct2 = "9999999999999992"
blob1 = b"gAAAAABYxsYabw7ChX1dF66SEsRHmIBZeyTHVvEpSKpS90267Jnxeo2egoNC2By9GrAja9GhccTVzHWYNOI5Kps7U3vcr7D2OKGnrVbe3lpL3rDtYrh3JBg="
blob2 = b"gAAAAABYxsYapFR9zH883pTKPh0y8SoXPzSSOzYIZIR-06HinrqPfK8BQ0iiEeCMTXlAvaw6yzEM1wjLIBlZtRxpzO5E-tMsxVSn9k02yFr_McU8-t_CN1c="
self.assertEqual(encryptor.decrypt(blob1), acct1)
self.assertEqual(encryptor.decrypt(blob2), acct2)
@mock_kms
def test_kms_encrypt(self):
encryptor = KMSEncryption(
KMS_KEY_ARN,
region_name="us-east-1",
encryption_context={"AppName": "Oscar E-Commerce"},
)
self.assertEqual(
encryptor.encrypt("9999999999999991"), b"T1RrNU9UazVPVGs1T1RrNU9UazVNUT09"
)
self.assertEqual(
encryptor.encrypt("9999999999999992"), b"T1RrNU9UazVPVGs1T1RrNU9UazVNZz09"
)
@mock_kms
def test_kms_decrypt(self):
encryptor = KMSEncryption(
KMS_KEY_ARN,
region_name="us-east-1",
encryption_context={"AppName": "Oscar E-Commerce"},
)
self.assertEqual(
encryptor.decrypt(b"T1RrNU9UazVPVGs1T1RrNU9UazVNUT09"), "9999999999999991"
)
self.assertEqual(
encryptor.decrypt(b"T1RrNU9UazVPVGs1T1RrNU9UazVNZz09"), "9999999999999992"
)
@mock_kms
def test_multi_round_trip(self):
# Make some data encrypted with key 1
fernet1 = FernetEncryption(FERNET_KEY_1)
acct1 = "9999999999999991"
blob1 = fernet1.encrypt(acct1)
# Make some data encrypted with key 2
fernet2 = FernetEncryption(FERNET_KEY_2)
acct2 = "9999999999999992"
blob2 = fernet2.encrypt(acct2)
# Make some data encrypted with key 3
fernet3 = FernetEncryption(FERNET_KEY_3)
acct3 = "9999999999999993"
blob3 = fernet3.encrypt(acct3)
# Make some data encrypted with KMS
kms1 = KMSEncryption(
KMS_KEY_ARN,
region_name="us-east-1",
encryption_context={"AppName": "Oscar E-Commerce"},
)
acct4 = "9999999999999994"
blob4 = kms1.encrypt(acct4)
# Ensure that Fernet can't decrypt data encrypted with other keys
self.assertEqual(fernet1.decrypt(blob1), acct1)
self.assertIsNone(fernet1.decrypt(blob2))
self.assertIsNone(fernet1.decrypt(blob3))
self.assertIsNone(fernet1.decrypt(blob4))
self.assertIsNone(fernet2.decrypt(blob1))
self.assertEqual(fernet2.decrypt(blob2), acct2)
self.assertIsNone(fernet2.decrypt(blob3))
self.assertIsNone(fernet2.decrypt(blob4))
self.assertIsNone(fernet3.decrypt(blob1))
self.assertIsNone(fernet3.decrypt(blob2))
self.assertEqual(fernet3.decrypt(blob3), acct3)
self.assertIsNone(fernet3.decrypt(blob4))
self.assertIsNone(kms1.decrypt(blob1))
self.assertIsNone(kms1.decrypt(blob2))
self.assertIsNone(kms1.decrypt(blob3))
self.assertEqual(kms1.decrypt(blob4), acct4)
# Build a multi-encryptor that has knowledge of all the keys
multi = MultiEncryption(
encryptors=[
{
"encryptor": "wellsfargo.security.kms.KMSEncryption",
"encryptor_kwargs": {
"key_id": KMS_KEY_ARN,
"region_name": "us-east-1",
"encryption_context": {
"AppName": "Oscar E-Commerce",
},
},
},
{
"encryptor": "wellsfargo.security.fernet.FernetEncryption",
"encryptor_kwargs": {
"key": FERNET_KEY_3,
},
},
{
"encryptor": "wellsfargo.security.fernet.FernetEncryption",
"encryptor_kwargs": {
"key": FERNET_KEY_2,
},
},
{
"encryptor": "wellsfargo.security.fernet.FernetEncryption",
"encryptor_kwargs": {
"key": FERNET_KEY_1,
},
},
]
)
# Ensure the multi-encryptor can decrypt all of the blobs
self.assertEqual(multi.decrypt(blob1), acct1)
self.assertEqual(multi.decrypt(blob2), acct2)
self.assertEqual(multi.decrypt(blob3), acct3)
self.assertEqual(multi.decrypt(blob4), acct4)
# Ensure the multi-encryptor encrypted all new data with KMS, since it's the most preferred
acct5 = "9999999999999995"
blob5 = multi.encrypt(acct5)
self.assertEqual(multi.decrypt(blob5), acct5)
self.assertIsNone(fernet1.decrypt(blob5))
self.assertIsNone(fernet2.decrypt(blob5))
self.assertIsNone(fernet3.decrypt(blob5))
self.assertEqual(kms1.decrypt(blob5), acct5)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
def testBasic(self):
self.doTestBasic(use_resource=False)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
loss = 5 * var0 * var0 + 3 * var1
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(
x_feed, constant_op.constant([0, 1]), constant_op.constant([2]))
grads_and_vars = [(y_feed, var0), (constant_op.constant(
[3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = momentum_lib.MomentumOptimizer(
learning_rate=1.0, momentum=0.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant(
[[.1, .1]], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]),
constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), -(0.1 * 2.0) - (
(0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval()[2])
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
#This fix db inconsistence, such as vote and tags of linkages (making them the same as sum of included notes)
import sys, os
#TODO: no hardcoding of home_path
#home_path = '/home/leon/projects/notebookWebapp/notebook_src'
from env_settings import HOME_PATH
sys.path.append(HOME_PATH)
from django.core import management; import notebook; import notebook.settings as settings;management.setup_environ(settings)
from django.db import models
from notebook.notes.views import getL
from notebook.bookmarks.views import getL as getL2
from notebook.notes.models import get_storage_loc, fs, create_model #Note_Backup,
from notebook.snippets.models import Snippet
from notebook.notes.views import getT, getNote
#from notebook.bookmarks.views import getN as getB
#from notebook.scraps.views import getN as getS
from notebook.social.models import Social_Frame, Social_Snippet, Social_Tag, Social_Bookmark, Social_Scrap, Social_Note
from notebook.notes.models import Tag, Note
from django.contrib.auth.models import User
from django.db.utils import ConnectionDoesNotExist
import datetime
from datetime import date
#def getNB(username):
# return create_model("NB_"+str(username), Note_Backup, username)
def getBB(username):
return create_model("BB_"+str(username), Bookmark_Backup, username)
def getSB(username):
return create_model("SB_"+str(username), Scrap_Backup, username)
def getSnip(username):
return create_model("Snip_"+str(username), Snippet, username)
#insert snippets backed up to snippet table after letting snippet subclass note
#def restore_snippet(username):
# NB = getNB(username)
# nbs = NB.objects.all()
# Snip = getSnip(username)
# for nb in nbs:
# s = Snip(desc=nb.desc, title=nb.title, private=nb.private, deleted=nb.deleted, attachment=nb.attachment, \
#init_date=nb.init_date, last_modi_date=nb.last_modi_date, vote=nb.vote)
# s.save()
# s.tags = nb.tags.all()
def fix_linkage_vote(username):
L = getL(username)
ls = L.objects.all()
for l in ls:
print 'linkage is:', l
l.vote = l.get_vote()
l.save()
L2 = getL2(username)
ls2 = L2.objects.all()
for l2 in ls2:
l2.vote = l2.get_vote()
l2.save()
#TODO: not tested. Also not sure if the linkage shouldn't have its own unique tags
def fix_linkage_tags(username):
L = getL(username)
ls = L.objects.all()
for l in ls:
l.update_tags(l.get_t_display_of_sum_of_note_tags())
l.save()
L2 = getL2(username)
ls2 = L2.objects.all()
for l2 in ls2:
l.update_tags(l.get_t_display_of_sum_of_note_tags())
l2.save()
#this was written temporarily to fix db attachment path due to change of db structure
#from notebook.notes.views import getN
#def fix_db_attachment_path():
# N = getN(username)
# ns = N.objects.all(attachment__startswith='noteattachments/')
# for n in ns:
# if ns.attachment:
# pass #TODO:
#TODO: add other users' notes into the Social_Note
def init_social_db_snippet(username):
print 'init social db...'
user = User.objects.get(username=username)
N = getNote(username, 'snippetbook')
ns = N.objects.all()
for n in ns:
print 'note is:', n
if not n.private and not n.deleted:
sts = []
for t in n.tags.all():
if not t.private:
st, created = Social_Tag.objects.get_or_create(name=t.name)
sts.append(st)
s, created = Social_Snippet.objects.get_or_create(owner=user, owner_note_id=n.id, desc=n.desc, title=n.title,\
init_date=n.init_date, last_modi_date=n.last_modi_date, vote=n.vote) #attachment
if created:
for t in sts:
s.tags.add(t)
s.save()
#print 'a new sn saved'
def init_social_db_bookmark(username):
user = User.objects.get(username=username)
N = getNote(username, 'bookmarkbook')
ns = N.objects.all()
for n in ns:
print 'bookmark is:', n
if not n.private and not n.deleted:
sts = []
for t in n.tags.all():
if not t.private:
st, created = Social_Tag.objects.get_or_create(name=t.name)
sts.append(st)
s, created = Social_Bookmark.objects.get_or_create(owner=user, owner_note_id=n.id, url=n.url, desc=n.desc, title=n.title,\
init_date=n.init_date, last_modi_date=n.last_modi_date, vote=n.vote) #attachment
if created:
for t in sts:
s.tags.add(t)
s.save()
def init_social_db_scrap(username):
user = User.objects.get(username=username)
N = getNote(username, 'scrapbook')
ns = N.objects.all()
for n in ns:
print 'scrap is:', n
if not n.private and not n.deleted:
sts = []
for t in n.tags.all():
if not t.private:
st, created = Social_Tag.objects.get_or_create(name=t.name)
sts.append(st)
s, created = Social_Scrap.objects.get_or_create(owner=user, url=n.url, owner_note_id=n.id, desc=n.desc, title=n.title,\
init_date=n.init_date, last_modi_date=n.last_modi_date, vote=n.vote) #attachment
if created:
for t in sts:
s.tags.add(t)
s.save()
from django.db import connections, transaction
from django.db.utils import DatabaseError
def fix_table(sql):
for alias in connections.databases.keys():
if alias not in ['leon', 'default']: #TODO: get rid of this
print 'for db ', alias
cursor = connections[alias].cursor()
try:
cursor.execute(sql)
transaction.commit_unless_managed(using=alias)
except DatabaseError, e:
print 'Error updating db ', alias, 'probably it already had the change.'
print e
#show notes that are out of sync btw personal and social
#For now, just check notes that shouldn't be in social
def clean_out_of_sync(username):
sns = Social_Note.objects.filter(owner__username=username)
for sn in sns:
n = Note.objects.using(username).get(id=sn.owner_note_id)
if n.deleted == True:
print 'social:',sn.id, 'personal:',n.id
print 'removing this note from social...'
sn.delete()
def check_frames(username):
sfs = Social_Frame.objects.filter(owner__username=username)
def sync_last_ndays(username, ndays):
N = getNote(username, 'notebook')
now = date.today()
ndays_ago = now - datetime.timedelta(days=ndays)
note_list = N.objects.filter(init_date__gte=ndays_ago.strftime('%Y-%m-%d'), init_date__lte=now.strftime('%Y-%m-%d 23:59:59'))
print 'Sync '+str(len(note_list))+'notes...'
for n in note_list:
n.save()
print 'Sync finished for this user!'
#notes from personal notebook deleted(or privated) before implemenation of "withdrawn from social notebook if
#deleted or made private from personal" are still in social notebook (for example, those with private=True, deleted=False)
#should be removed from db by the following script (although they cannot be viewed in social notebook now) TODO:privacy
#def cleanup_social():
#check inconsistence of db, and fix them
# pass
if __name__ == "__main__":
if len(sys.argv) == 1:
pass
else:
username = sys.argv[1]
command = sys.argv[2]
if command=='all':
fix_linkage_vote(username)
fix_linkage_tags(username)
if command=='vote':
fix_linkage_vote(username)
if command=='tags':
fix_linkage_tags(username)
if command=='init_social_snippet':
init_social_db_snippet(username)
if command=='init_social_bookmark':
init_social_db_bookmark(username)
if command=='init_social_scrap':
init_social_db_scrap(username)
if command=='restore_snippet':
restore_snippet(username)
if command=='restore_bookmark':
restore_bookmark(username)
if command=='restore_scrap':
restore_scrap(username)
if command=='fix_table':
print "add _order column to all db's notes_frame_notes table..."
#TODO: get sql from command input
fix_table('ALTER TABLE notes_frame_notes ADD COLUMN _order integer;')
if command=='show_out_of_sync':
show_out_of_sync(username)
if command=='sync_last_ndays':
ndays = int(sys.argv[3])
if username=='allusers':
print "Sync the last "+str(ndays)+" for all users..."
users = [u.username for u in User.objects.all()]
for u in users:
try:
print 'Sync for user:', u
sync_last_ndays(u, ndays)
except ConnectionDoesNotExist, e:
print e
else:
print "Sync the last "+str(ndays)+" for user "+username+'...'
sync_last_ndays(username, ndays)
|
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for subtopic page domain objects."""
from constants import constants
from core.domain import state_domain
from core.domain import subtopic_page_domain
from core.tests import test_utils
import feconf
import utils
class SubtopicPageDomainUnitTests(test_utils.GenericTestBase):
"""Tests for subtopic page domain objects."""
topic_id = 'topic_id'
subtopic_id = 1
def setUp(self):
super(SubtopicPageDomainUnitTests, self).setUp()
self.subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
self.subtopic_id, self.topic_id))
def test_to_dict(self):
expected_subtopic_page_dict = {
'id': 'topic_id-1',
'topic_id': 'topic_id',
'page_contents': {
'subtitled_html': {
'html': '',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {}
}
}
},
'page_contents_schema_version': (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'version': 0
}
self.assertEqual(self.subtopic_page.to_dict(),
expected_subtopic_page_dict)
def test_create_default_subtopic_page(self):
"""Tests the create_default_topic() function."""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
self.subtopic_id, self.topic_id))
expected_subtopic_page_dict = {
'id': 'topic_id-1',
'topic_id': 'topic_id',
'page_contents': {
'subtitled_html': {
'html': '',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {}
}
}
},
'page_contents_schema_version': (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'version': 0
}
self.assertEqual(subtopic_page.to_dict(), expected_subtopic_page_dict)
def test_get_subtopic_page_id(self):
self.assertEqual(
subtopic_page_domain.SubtopicPage.get_subtopic_page_id('abc', 1),
'abc-1')
def test_get_subtopic_id_from_subtopic_page_id(self):
self.assertEqual(
self.subtopic_page.get_subtopic_id_from_subtopic_page_id(), 1)
def _assert_validation_error(self, expected_error_substring):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.subtopic_page.validate()
def test_subtopic_topic_id_validation(self):
self.subtopic_page.topic_id = 1
self._assert_validation_error('Expected topic_id to be a string')
def test_language_code_validation(self):
self.subtopic_page.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.subtopic_page.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_update_audio(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False
}
}
}
}
expected_subtopic_page_dict = {
'id': 'topic_id-1',
'topic_id': 'topic_id',
'page_contents': {
'subtitled_html': {
'html': '',
'content_id': 'content'
},
'recorded_voiceovers': recorded_voiceovers_dict,
'written_translations': {
'translations_mapping': {
'content': {}
}
}
},
'page_contents_schema_version': (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'version': 0
}
self.subtopic_page.update_page_contents_audio(
state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict))
self.assertEqual(self.subtopic_page.to_dict(),
expected_subtopic_page_dict)
def test_update_html(self):
expected_subtopic_page_dict = {
'id': 'topic_id-1',
'topic_id': 'topic_id',
'page_contents': {
'subtitled_html': {
'html': '<p>hello world</p>',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {}
}
}
},
'page_contents_schema_version': (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'version': 0
}
self.subtopic_page.update_page_contents_html(
state_domain.SubtitledHtml.from_dict({
'html': '<p>hello world</p>',
'content_id': 'content'
}))
self.assertEqual(self.subtopic_page.to_dict(),
expected_subtopic_page_dict)
def test_update_written_translations(self):
written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'html': 'Translation in hindi.',
'needs_update': False
}
}
}
}
expected_subtopic_page_dict = {
'id': 'topic_id-1',
'topic_id': 'topic_id',
'page_contents': {
'subtitled_html': {
'html': '',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {}
}
},
'written_translations': written_translations_dict
},
'page_contents_schema_version': (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'version': 0
}
self.subtopic_page.update_page_contents_written_translations(
written_translations_dict)
self.assertEqual(
self.subtopic_page.to_dict(), expected_subtopic_page_dict)
def test_create_subtopic_page_change(self):
subtopic_page_change_object = subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': self.topic_id,
'subtopic_id': 'subtopic_id'
})
self.assertEqual(
subtopic_page_change_object.to_dict(), {
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': self.topic_id,
'subtopic_id': 'subtopic_id'
})
def test_validate_version_number(self):
self.subtopic_page.version = 'invalid_version'
with self.assertRaisesRegexp(
Exception, 'Expected version number to be an int'):
self.subtopic_page.validate()
def test_validate_page_contents_schema_version_type(self):
self.subtopic_page.page_contents_schema_version = 'invalid_version'
with self.assertRaisesRegexp(
Exception,
'Expected page contents schema version to be an integer'):
self.subtopic_page.validate()
def test_validate_page_contents_schema_version(self):
self.subtopic_page.page_contents_schema_version = 0
with self.assertRaisesRegexp(
Exception,
'Expected page contents schema version to be %s'
% feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
self.subtopic_page.validate()
class SubtopicPageContentsDomainUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(SubtopicPageContentsDomainUnitTests, self).setUp()
self.subtopic_page_contents = (
subtopic_page_domain.SubtopicPageContents
.create_default_subtopic_page_contents())
def _assert_validation_error(self, expected_error_substring):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.subtopic_page_contents.validate()
def test_create_default_subtopic_page(self):
subtopic_page_contents = (
subtopic_page_domain.SubtopicPageContents
.create_default_subtopic_page_contents())
expected_subtopic_page_contents_dict = {
'subtitled_html': {
'html': '',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {}
}
}
}
self.assertEqual(subtopic_page_contents.to_dict(),
expected_subtopic_page_contents_dict)
def test_to_and_from_dict(self):
subtopic_page_contents_dict = {
'subtitled_html': {
'html': '<p>test</p>',
'content_id': 'content'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False
}
}
}
},
'written_translations': {
'translations_mapping': {
'content': {
'en': {
'html': 'Translation.',
'needs_update': False
}
}
}
}
}
subtopic_page_contents = (
subtopic_page_domain.SubtopicPageContents.from_dict(
subtopic_page_contents_dict))
self.assertEqual(subtopic_page_contents.to_dict(),
subtopic_page_contents_dict)
class SubtopicPageChangeTests(test_utils.GenericTestBase):
def test_subtopic_page_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
subtopic_page_domain.SubtopicPageChange({'invalid': 'data'})
def test_subtopic_page_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
subtopic_page_domain.SubtopicPageChange({'cmd': 'invalid'})
def test_subtopic_page_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value, old_value')):
subtopic_page_domain.SubtopicPageChange({
'cmd': 'update_subtopic_page_property',
'property_name': '<p>page_contents_html</p>',
})
def test_subtopic_page_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
})
def test_subtopic_page_change_object_with_invalid_subtopic_page_property(
self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_subtopic_page_property: '
'invalid is not allowed')):
subtopic_page_domain.SubtopicPageChange({
'cmd': 'update_subtopic_page_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_subtopic_page_change_object_with_update_subtopic_page_property(
self):
subtopic_page_change_object = subtopic_page_domain.SubtopicPageChange({
'cmd': 'update_subtopic_page_property',
'subtopic_id': 'subtopic_id',
'property_name': 'page_contents_html',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(
subtopic_page_change_object.cmd, 'update_subtopic_page_property')
self.assertEqual(subtopic_page_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(
subtopic_page_change_object.property_name, 'page_contents_html')
self.assertEqual(subtopic_page_change_object.new_value, 'new_value')
self.assertEqual(subtopic_page_change_object.old_value, 'old_value')
def test_subtopic_page_change_object_with_create_new(self):
subtopic_page_change_object = (
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id'
}))
self.assertEqual(subtopic_page_change_object.cmd, 'create_new')
self.assertEqual(subtopic_page_change_object.topic_id, 'topic_id')
self.assertEqual(subtopic_page_change_object.subtopic_id, 'subtopic_id')
def test_to_dict(self):
subtopic_page_change_dict = {
'cmd': 'create_new',
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id'
}
subtopic_page_change_object = subtopic_page_domain.SubtopicPageChange(
subtopic_page_change_dict)
self.assertEqual(
subtopic_page_change_object.to_dict(), subtopic_page_change_dict)
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iLO Power Driver
"""
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
ilo_error = importutils.try_import('proliantutils.exception')
opts = [
cfg.IntOpt('power_retry',
default=6,
help='Number of times a power operation needs to be retried'),
cfg.IntOpt('power_wait',
default=2,
help='Amount of time in seconds to wait in between power '
'operations'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='ilo')
LOG = logging.getLogger(__name__)
def _attach_boot_iso(task):
"""Attaches boot ISO for a deployed node.
This method checks the instance info of the baremetal node for a
boot iso. It attaches the boot ISO on the baremetal node, and then
sets the node to boot from virtual media cdrom.
:param task: a TaskManager instance containing the node to act on.
"""
i_info = task.node.instance_info
node_state = task.node.provision_state
# NOTE: On instance rebuild, ilo_boot_iso will be present in
# instance_info but the node will be in DEPLOYING state.
# In such a scenario, the ilo_boot_iso shouldn't be
# attached to the node while powering on the node (the node
# should boot from deploy ramdisk instead, which will already
# be attached by the deploy driver).
if 'ilo_boot_iso' in i_info and node_state == states.ACTIVE:
ilo_common.setup_vmedia_for_boot(task, i_info['ilo_boot_iso'])
manager_utils.node_set_boot_device(task, boot_devices.CDROM)
def _get_power_state(node):
"""Returns the current power state of the node.
:param node: The node.
:returns: power state, one of :mod: `ironic.common.states`.
:raises: InvalidParameterValue if required iLO credentials are missing.
:raises: IloOperationError on an error from IloClient library.
"""
ilo_object = ilo_common.get_ilo_object(node)
# Check the current power state.
try:
power_status = ilo_object.get_host_power_status()
except ilo_error.IloError as ilo_exception:
LOG.error(_LE("iLO get_power_state failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': node.uuid, 'error': ilo_exception})
operation = _('iLO get_power_status')
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
if power_status == "ON":
return states.POWER_ON
elif power_status == "OFF":
return states.POWER_OFF
else:
return states.ERROR
def _wait_for_state_change(node, target_state):
"""Wait for the power state change to get reflected."""
state = [None]
retries = [0]
def _wait(state):
state[0] = _get_power_state(node)
# NOTE(rameshg87): For reboot operations, initially the state
# will be same as the final state. So defer the check for one retry.
if retries[0] != 0 and state[0] == target_state:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.ilo.power_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
retries[0] += 1
# Start a timer and wait for the operation to complete.
timer = loopingcall.FixedIntervalLoopingCall(_wait, state)
timer.start(interval=CONF.ilo.power_wait).wait()
return state[0]
def _set_power_state(task, target_state):
"""Turns the server power on/off or do a reboot.
:param task: a TaskManager instance containing the node to act on.
:param target_state: target state of the node.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: IloOperationError on an error from IloClient library.
:raises: PowerStateFailure if the power couldn't be set to target_state.
"""
node = task.node
ilo_object = ilo_common.get_ilo_object(node)
# Trigger the operation based on the target state.
try:
if target_state == states.POWER_OFF:
ilo_object.hold_pwr_btn()
elif target_state == states.POWER_ON:
_attach_boot_iso(task)
ilo_object.set_host_power('ON')
elif target_state == states.REBOOT:
_attach_boot_iso(task)
ilo_object.reset_server()
target_state = states.POWER_ON
else:
msg = _("_set_power_state called with invalid power state "
"'%s'") % target_state
raise exception.InvalidParameterValue(msg)
except ilo_error.IloError as ilo_exception:
LOG.error(_LE("iLO set_power_state failed to set state to %(tstate)s "
" for node %(node_id)s with error: %(error)s"),
{'tstate': target_state, 'node_id': node.uuid,
'error': ilo_exception})
operation = _('iLO set_power_state')
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
# Wait till the state change gets reflected.
state = _wait_for_state_change(node, target_state)
if state != target_state:
timeout = (CONF.ilo.power_wait) * (CONF.ilo.power_retry)
LOG.error(_LE("iLO failed to change state to %(tstate)s "
"within %(timeout)s sec"),
{'tstate': target_state, 'timeout': timeout})
raise exception.PowerStateFailure(pstate=target_state)
class IloPower(base.PowerInterface):
def get_properties(self):
return ilo_common.COMMON_PROPERTIES
def validate(self, task):
"""Check if node.driver_info contains the required iLO credentials.
:param task: a TaskManager instance.
:param node: Single node object.
:raises: InvalidParameterValue if required iLO credentials are missing.
"""
ilo_common.parse_driver_info(task.node)
def get_power_state(self, task):
"""Gets the current power state.
:param task: a TaskManager instance.
:param node: The Node.
:returns: one of :mod:`ironic.common.states` POWER_OFF,
POWER_ON or ERROR.
:raises: InvalidParameterValue if required iLO credentials are missing.
:raises: IloOperationError on an error from IloClient library.
"""
return _get_power_state(task.node)
@task_manager.require_exclusive_lock
def set_power_state(self, task, power_state):
"""Turn the current power state on or off.
:param task: a TaskManager instance.
:param node: The Node.
:param power_state: The desired power state POWER_ON,POWER_OFF or
REBOOT from :mod:`ironic.common.states`.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: IloOperationError on an error from IloClient library.
:raises: PowerStateFailure if the power couldn't be set to power_state.
"""
_set_power_state(task, power_state)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Reboot the node
:param task: a TaskManager instance.
:param node: The Node.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
:raises: IloOperationError on an error from IloClient library.
"""
node = task.node
current_pstate = _get_power_state(node)
if current_pstate == states.POWER_ON:
_set_power_state(task, states.REBOOT)
elif current_pstate == states.POWER_OFF:
_set_power_state(task, states.POWER_ON)
|
|
# "Tiled" TMX loader/renderer and more
# Copyright 2012 Richard Jones <richard@mechanicalcat.net>
# This code is placed in the Public Domain.
# TODO: support properties on more things
import sys
import struct
import collections
import pygame
from pygame.locals import *
from pygame import Rect
from xml.etree import ElementTree
class Tile(object):
def __init__(self, gid, surface, tileset):
self.gid = gid
self.surface = surface
self.tile_width = tileset.tile_width
self.tile_height = tileset.tile_height
self.properties = {}
@classmethod
def fromSurface(cls, surface):
'''Create a new Tile object straight from a pygame Surface.
Its tile_width and tile_height will be set using the Surface dimensions.
Its gid will be 0.
'''
class ts:
tile_width, tile_height = surface.get_size()
return cls(0, surface, ts)
def loadxml(self, tag):
props = tag.find('properties')
if props is None:
return
for c in props.findall('property'):
# store additional properties.
name = c.attrib['name']
value = c.attrib['value']
# TODO hax
if value.isdigit():
value = int(value)
self.properties[name] = value
def __repr__(self):
return '<Tile %d>' % self.gid
class Tileset(object):
def __init__(self, name, tile_width, tile_height, firstgid):
self.name = name
self.tile_width = tile_width
self.tile_height = tile_height
self.firstgid = firstgid
self.tiles = []
self.properties = {}
@classmethod
def fromxml(cls, tag, firstgid=None):
if 'source' in tag.attrib:
firstgid = int(tag.attrib['firstgid'])
with open(tag.attrib['source']) as f:
tileset = ElementTree.fromstring(f.read())
return cls.fromxml(tileset, firstgid)
name = tag.attrib['name']
if firstgid is None:
firstgid = int(tag.attrib['firstgid'])
tile_width = int(tag.attrib['tilewidth'])
tile_height = int(tag.attrib['tileheight'])
tileset = cls(name, tile_width, tile_height, firstgid)
for c in tag.getchildren():
if c.tag == "image":
# create a tileset
tileset.add_image(c.attrib['source'])
elif c.tag == 'tile':
gid = tileset.firstgid + int(c.attrib['id'])
tileset.get_tile(gid).loadxml(c)
return tileset
def add_image(self, file):
image = pygame.image.load(file).convert_alpha()
if not image:
sys.exit("Error creating new Tileset: file %s not found" % file)
id = self.firstgid
for line in xrange(image.get_height()/self.tile_height):
for column in xrange(image.get_width()/self.tile_width):
pos = Rect(column*self.tile_width,
line*self.tile_height,
self.tile_width,
self.tile_height )
self.tiles.append(Tile(id, image.subsurface(pos), self))
id += 1
def get_tile(self, gid):
return self.tiles[gid - self.firstgid]
class Tilesets(dict):
def add(self, tileset):
for i, tile in enumerate(tileset.tiles):
i += tileset.firstgid
self[i] = tile
class Cell(object):
'''Layers are made of Cells (or empty space).
Cells have some basic properties:
x, y - the cell's index in the layer
px, py - the cell's pixel position
left, right, top, bottom - the cell's pixel boundaries
Additionally the cell may have other properties which are accessed using
standard dictionary methods:
cell['property name']
You may assign a new value for a property to or even delete an existing
property from the cell - this will not affect the Tile or any other Cells
using the Cell's Tile.
'''
def __init__(self, x, y, px, py, tile):
self.x, self.y = x, y
self.px, self.py = px, py
self.tile = tile
self.topleft = (px, py)
self.left = px
self.right = px + tile.tile_width
self.top = py
self.bottom = py + tile.tile_height
self.center = (px + tile.tile_width//2, py + tile.tile_height//2)
self._added_properties = {}
self._deleted_properties = set()
def __repr__(self):
return '<Cell %s,%s %d>' % (self.px, self.py, self.tile.gid)
def __contains__(self, key):
if key in self._deleted_properties:
return False
return key in self._added_properties or key in self.tile.properties
def __getitem__(self, key):
if key in self._deleted_properties:
raise KeyError(key)
if key in self._added_properties:
return self._added_properties[key]
if key in self.tile.properties:
return self.tile.properties[key]
raise KeyError(key)
def __setitem__(self, key, value):
self._added_properties[key] = value
def __delitem__(self, key):
self._deleted_properties.add(key)
def intersects(self, other):
'''Determine whether this Cell intersects with the other rect (which has
.x, .y, .width and .height attributes.)
'''
if self.px + self.tile.tile_width < other.x: return False
if other.x + other.width < self.px: return False
if self.py + self.tile.tile_height < other.y: return False
if other.y + other.height < self.py: return False
return True
class LayerIterator(object):
'''Iterates over all the cells in a layer in column,row order.
'''
def __init__(self, layer):
self.layer = layer
self.i, self.j = 0, 0
def next(self):
if self.i == self.layer.width - 1:
self.j += 1
self.i = 0
if self.j == self.layer.height - 1:
raise StopIteration()
value = self.layer[self.i, self.j]
self.i += 1
return value
class Layer(object):
'''A 2d grid of Cells.
Layers have some basic properties:
width, height - the dimensions of the Layer in cells
tile_width, tile_height - the dimensions of each cell
px_width, px_height - the dimensions of the Layer in pixels
tilesets - the tilesets used in this Layer (a Tilesets instance)
properties - any properties set for this Layer
cells - a dict of all the Cell instances for this Layer, keyed off
(x, y) index.
Additionally you may look up a cell using direct item access:
layer[x, y] is layer.cells[x, y]
Note that empty cells will be set to None instead of a Cell instance.
'''
def __init__(self, name, visible, map):
self.name = name
self.visible = visible
self.position = (0, 0)
# TODO get from TMX?
self.px_width = map.px_width
self.px_height = map.px_height
self.tile_width = map.tile_width
self.tile_height = map.tile_height
self.width = map.width
self.height = map.height
self.tilesets = map.tilesets
self.group = pygame.sprite.Group()
self.properties = {}
self.cells = {}
def __repr__(self):
return '<Layer "%s" at 0x%x>' % (self.name, id(self))
def __getitem__(self, pos):
return self.cells.get(pos)
def __setitem__(self, pos, tile):
x, y = pos
px = x * self.tile_width
py = y * self.tile_width
self.cells[pos] = Cell(x, y, px, py, tile)
def __iter__(self):
return LayerIterator(self)
@classmethod
def fromxml(cls, tag, map):
layer = cls(tag.attrib['name'], int(tag.attrib.get('visible', 1)), map)
data = tag.find('data')
if data is None:
raise ValueError('layer %s does not contain <data>' % layer.name)
data = data.text.strip()
data = data.decode('base64').decode('zlib')
data = struct.unpack('<%di' % (len(data)/4,), data)
assert len(data) == layer.width * layer.height
for i, gid in enumerate(data):
if gid < 1: continue # not set
tile = map.tilesets[gid]
x = i % layer.width
y = i // layer.width
layer.cells[x,y] = Cell(x, y, x*map.tile_width, y*map.tile_height, tile)
return layer
def update(self, dt, *args):
pass
def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0):
self.view_x, self.view_y = x, y
self.view_w, self.view_h = w, h
x -= viewport_ox
y -= viewport_oy
self.position = (x, y)
def draw(self, surface):
'''Draw this layer, limited to the current viewport, to the Surface.
'''
ox, oy = self.position
w, h = self.view_w, self.view_h
for x in range(ox, ox+w+self.tile_width, self.tile_width):
i = x // self.tile_width
for y in range(oy, oy+h+self.tile_height, self.tile_height):
j = y // self.tile_height
if (i, j) not in self.cells:
continue
cell = self.cells[i, j]
surface.blit(cell.tile.surface, (cell.px-ox, cell.py-oy))
def find(self, *properties):
'''Find all cells with the given properties set.
'''
r = []
for propname in properties:
for cell in self.cells.values():
if cell and propname in cell:
r.append(cell)
return r
def match(self, **properties):
'''Find all cells with the given properties set to the given values.
'''
r = []
for propname in properties:
for cell in self.cells.values():
if propname not in cell:
continue
if properties[propname] == cell[propname]:
r.append(cell)
return r
def collide(self, rect, propname):
'''Find all cells the rect is touching that have the indicated property
name set.
'''
r = []
for cell in self.get_in_region(rect.left, rect.top, rect.right, rect.bottom):
if not cell.intersects(rect):
continue
if propname in cell:
r.append(cell)
return r
def get_in_region(self, x1, y1, x2, y2):
'''Return cells (in [column][row]) that are within the map-space
pixel bounds specified by the bottom-left (x1, y1) and top-right
(x2, y2) corners.
Return a list of Cell instances.
'''
i1 = max(0, x1 // self.tile_width)
j1 = max(0, y1 // self.tile_height)
i2 = min(self.width, x2 // self.tile_width + 1)
j2 = min(self.height, y2 // self.tile_height + 1)
return [self.cells[i, j]
for i in range(int(i1), int(i2))
for j in range(int(j1), int(j2))
if (i, j) in self.cells]
def get_at(self, x, y):
'''Return the cell at the nominated (x, y) coordinate.
Return a Cell instance or None.
'''
i = x // self.tile_width
j = y // self.tile_height
return self.cells.get((i, j))
def neighbors(self, index):
'''Return the indexes of the valid (ie. within the map) cardinal (ie.
North, South, East, West) neighbors of the nominated cell index.
Returns a list of 2-tuple indexes.
'''
i, j = index
n = []
if i < self.width-1:
n.append((i+1, j))
if i > 0:
n.append((i-1, j))
if j < self.height-1:
n.append((i, j+1))
if j > 0:
n.append((i, j-1))
return n
class SpriteLayer(pygame.sprite.AbstractGroup):
def __init__(self):
super(SpriteLayer, self).__init__()
self.visible = True
def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0):
self.view_x, self.view_y = x, y
self.view_w, self.view_h = w, h
x -= viewport_ox
y -= viewport_oy
self.position = (x, y)
def draw(self, screen):
ox, oy = self.position
w, h = self.view_w, self.view_h
for sprite in self.sprites():
sx, sy = sprite.rect.topleft
screen.blit(sprite.image, (sx-ox, sy-oy))
class Layers(list):
def __init__(self):
self.by_name = {}
def add_named(self, layer, name):
self.append(layer)
self.by_name[name] = layer
def __getitem__(self, item):
if isinstance(item, int):
return self[item]
return self.by_name[item]
class TileMap(object):
'''A TileMap is a collection of Layers which contain gridded maps or sprites
which are drawn constrained by a viewport.
And breathe.
TileMaps are loaded from TMX files which sets the .layers and .tilesets
properties. After loading additional SpriteLayers may be added.
A TileMap's rendering is restricted by a viewport which is defined by the
size passed in at construction time and the focus set by set_focus() or
force_focus().
TileMaps have a number of properties:
width, height - the dimensions of the tilemap in cells
tile_width, tile_height - the dimensions of the cells in the map
px_width, px_height - the dimensions of the tilemap in pixels
properties - any properties set on the tilemap in the TMX file
layers - all layers of this tilemap as a Layers instance
tilesets - all tilesets of this tilemap as a Tilesets instance
fx, fy - viewport focus point
view_w, view_h - viewport size
view_x, view_y - viewport offset (origin)
viewport - a Rect instance giving the current viewport specification
'''
def __init__(self, size, origin=(0,0)):
self.px_width = 0
self.px_height = 0
self.tile_width = 0
self.tile_height = 0
self.width = 0
self.height = 0
self.properties = {}
self.layers = Layers()
self.tilesets = Tilesets()
self.fx, self.fy = 0, 0 # viewport focus point
self.view_w, self.view_h = size # viewport size
self.view_x, self.view_y = origin # viewport offset
self.viewport = Rect(origin, size)
def update(self, dt, *args):
for layer in self.layers:
layer.update(dt, *args)
def draw(self, screen):
for layer in self.layers:
if layer.visible:
layer.draw(screen)
@classmethod
def load(cls, filename, viewport):
with open(filename) as f:
map = ElementTree.fromstring(f.read())
# get most general map informations and create a surface
tilemap = TileMap(viewport)
tilemap.width = int(map.attrib['width'])
tilemap.height = int(map.attrib['height'])
tilemap.tile_width = int(map.attrib['tilewidth'])
tilemap.tile_height = int(map.attrib['tileheight'])
tilemap.px_width = tilemap.width * tilemap.tile_width
tilemap.px_height = tilemap.height * tilemap.tile_height
for tag in map.findall('tileset'):
tilemap.tilesets.add(Tileset.fromxml(tag))
for tag in map.findall('layer'):
layer = Layer.fromxml(tag, tilemap)
tilemap.layers.add_named(layer, layer.name)
return tilemap
_old_focus = None
def set_focus(self, fx, fy, force=False):
'''Determine the viewport based on a desired focus pixel in the
Layer space (fx, fy) and honoring any bounding restrictions of
child layers.
The focus will always be shifted to ensure no child layers display
out-of-bounds data, as defined by their dimensions px_width and px_height.
'''
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
fx, fy = int(fx), int(fy)
self.fx, self.fy = fx, fy
a = (fx, fy)
# check for NOOP (same arg passed in)
if not force and self._old_focus == a:
return
self._old_focus = a
# get our viewport information, scaled as appropriate
w = int(self.view_w)
h = int(self.view_h)
w2, h2 = w//2, h//2
if self.px_width <= w:
# this branch for centered view and no view jump when
# crossing the center; both when world width <= view width
restricted_fx = self.px_width / 2
else:
if (fx - w2) < 0:
restricted_fx = w2 # hit minimum X extent
elif (fx + w2) > self.px_width:
restricted_fx = self.px_width - w2 # hit maximum X extent
else:
restricted_fx = fx
if self.px_height <= h:
# this branch for centered view and no view jump when
# crossing the center; both when world height <= view height
restricted_fy = self.px_height / 2
else:
if (fy - h2) < 0:
restricted_fy = h2 # hit minimum Y extent
elif (fy + h2) > self.px_height:
restricted_fy = self.px_height - h2 # hit maximum Y extent
else:
restricted_fy = fy
# ... and this is our focus point, center of screen
self.restricted_fx = int(restricted_fx)
self.restricted_fy = int(restricted_fy)
# determine child view bounds to match that focus point
x, y = int(restricted_fx - w2), int(restricted_fy - h2)
self.viewport.x = x
self.viewport.y = y
self.childs_ox = x - self.view_x
self.childs_oy = y - self.view_y
for layer in self.layers:
layer.set_view(x, y, w, h, self.view_x, self.view_y)
def force_focus(self, fx, fy):
'''Force the manager to focus on a point, regardless of any managed layer
visible boundaries.
'''
# This calculation takes into account the scaling of this Layer (and
# therefore also its children).
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
self.fx, self.fy = map(int, (fx, fy))
self.fx, self.fy = fx, fy
# get our view size
w = int(self.view_w)
h = int(self.view_h)
w2, h2 = w//2, h//2
# bottom-left corner of the viewport
x, y = fx - w2, fy - h2
self.viewport.x = x
self.viewport.y = y
self.childs_ox = x - self.view_x
self.childs_oy = y - self.view_y
for layer in self.layers:
layer.set_view(x, y, w, h, self.view_x, self.view_y)
def pixel_from_screen(self, x, y):
'''Look up the Layer-space pixel matching the screen-space pixel.
'''
vx, vy = self.childs_ox, self.childs_oy
return int(vx + x), int(vy + y)
def pixel_to_screen(self, x, y):
'''Look up the screen-space pixel matching the Layer-space pixel.
'''
screen_x = x-self.childs_ox
screen_y = y-self.childs_oy
return int(screen_x), int(screen_y)
def index_at(self, x, y):
'''Return the map index at the (screen-space) pixel position.
'''
sx, sy = self.pixel_from_screen(x, y)
return int(sx//self.tile_width), int(sy//self.tile_height)
def load(filename, viewport):
return TileMap.load(filename, viewport)
if __name__ == '__main__':
# allow image load to work
pygame.init()
pygame.display.set_mode((640, 480))
t = load(sys.argv[1], (0, 0))
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TaskError
from pants.build_graph.build_graph import BuildGraph
from pants.goal.products import UnionProducts
class ClasspathEntry(object):
"""Represents a java classpath entry."""
def __init__(self, path):
self._path = path
@property
def path(self):
"""Returns the pants internal path of this classpath entry.
Suitable for use in constructing classpaths for pants executions and pants generated artifacts.
:rtype: string
"""
return self._path
def is_excluded_by(self, excludes):
"""Returns `True` if this classpath entry should be excluded given the `excludes` in play.
:param excludes: The excludes to check this classpath entry against.
:type excludes: list of :class:`pants.backend.jvm.targets.exclude.Exclude`
:rtype: bool
"""
return False
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return isinstance(other, ClasspathEntry) and self.path == other.path
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'ClasspathEntry(path={!r})'.format(self.path)
@classmethod
def is_artifact_classpath_entry(cls, classpath_entry):
return isinstance(classpath_entry, ArtifactClasspathEntry)
@classmethod
def is_internal_classpath_entry(cls, classpath_entry):
return not cls.is_artifact_classpath_entry(classpath_entry)
class ArtifactClasspathEntry(ClasspathEntry):
"""Represents a resolved third party classpath entry."""
def __init__(self, path, coordinate, cache_path):
super(ArtifactClasspathEntry, self).__init__(path)
self._coordinate = coordinate
self._cache_path = cache_path
@property
def coordinate(self):
"""Returns the maven coordinate that used to resolve this classpath entry's artifact.
:rtype: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
"""
return self._coordinate
@property
def cache_path(self):
"""Returns the external cache path of this classpath entry.
For example, the `~/.m2/repository` or `~/.ivy2/cache` location of the resolved artifact for
maven and ivy resolvers respectively.
Suitable for use in constructing classpaths for external tools that should not be subject to
potential volatility in pants own internal caches.
:rtype: string
"""
return self._cache_path
def is_excluded_by(self, excludes):
return any(_matches_exclude(self.coordinate, exclude) for exclude in excludes)
def __hash__(self):
return hash((self.path, self.coordinate, self.cache_path))
def __eq__(self, other):
return (isinstance(other, ArtifactClasspathEntry) and
self.path == other.path and
self.coordinate == other.coordinate and
self.cache_path == other.cache_path)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('ArtifactClasspathEntry(path={!r}, coordinate={!r}, cache_path={!r})'
.format(self.path, self.coordinate, self.cache_path))
def _matches_exclude(coordinate, exclude):
if not coordinate.org == exclude.org:
return False
if not exclude.name:
return True
if coordinate.name == exclude.name:
return True
return False
def _not_excluded_filter(excludes):
def not_excluded(product_to_target):
path_tuple = product_to_target[0]
conf, classpath_entry = path_tuple
return not classpath_entry.is_excluded_by(excludes)
return not_excluded
class ClasspathProducts(object):
def __init__(self, pants_workdir, classpaths=None, excludes=None):
self._classpaths = classpaths or UnionProducts()
self._excludes = excludes or UnionProducts()
self._pants_workdir = pants_workdir
@staticmethod
def init_func(pants_workdir):
return lambda: ClasspathProducts(pants_workdir)
def copy(self):
"""Returns a copy of this ClasspathProducts.
Edits to the copy's classpaths or exclude associations will not affect the classpaths or
excludes in the original. The copy is shallow though, so edits to the the copy's product values
will mutate the original's product values. See `UnionProducts.copy`.
:rtype: :class:`ClasspathProducts`
"""
return ClasspathProducts(pants_workdir=self._pants_workdir,
classpaths=self._classpaths.copy(),
excludes=self._excludes.copy())
def add_for_targets(self, targets, classpath_elements):
"""Adds classpath path elements to the products of all the provided targets."""
for target in targets:
self.add_for_target(target, classpath_elements)
def add_for_target(self, target, classpath_elements):
"""Adds classpath path elements to the products of the provided target."""
self._add_elements_for_target(target, self._wrap_path_elements(classpath_elements))
def add_jars_for_targets(self, targets, conf, resolved_jars):
"""Adds jar classpath elements to the products of the provided targets.
The resolved jars are added in a way that works with excludes.
"""
classpath_entries = []
for jar in resolved_jars:
if not jar.pants_path:
raise TaskError('Jar: {!s} has no specified path.'.format(jar.coordinate))
cp_entry = ArtifactClasspathEntry(jar.pants_path, jar.coordinate, jar.cache_path)
classpath_entries.append((conf, cp_entry))
for target in targets:
self._add_elements_for_target(target, classpath_entries)
def add_excludes_for_targets(self, targets):
"""Add excludes from the provided targets.
Does not look up transitive excludes.
:param targets: The targets to add excludes for.
:type targets: list of :class:`pants.build_graph.target.Target`
"""
for target in targets:
self._add_excludes_for_target(target)
def remove_for_target(self, target, classpath_elements):
"""Removes the given entries for the target."""
self._classpaths.remove_for_target(target, self._wrap_path_elements(classpath_elements))
def get_for_target(self, target):
"""Gets the classpath products for the given target.
Products are returned in order, respecting target excludes.
:param target: The target to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
return self.get_for_targets([target])
def get_for_targets(self, targets):
"""Gets the classpath products for the given targets.
Products are returned in order, respecting target excludes.
:param targets: The targets to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
cp_entries = self.get_classpath_entries_for_targets(targets)
return [(conf, cp_entry.path) for conf, cp_entry in cp_entries]
def get_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
# remove the duplicate, preserve the ordering.
return list(OrderedSet([cp for cp, target in self.get_product_target_mappings_for_targets(
targets, respect_excludes)]))
def get_product_target_mappings_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products-target associations for the given targets.
Product-target tuples are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (classpath products, target) tuples.
"""
classpath_target_tuples = self._classpaths.get_product_target_mappings_for_targets(targets)
if respect_excludes:
return self._filter_by_excludes(classpath_target_tuples, targets)
else:
return classpath_target_tuples
def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_artifact_classpath_entry(cp_entry)]
def get_internal_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the internal classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include internal artifact classpath elements (ie: no resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_internal_classpath_entry(cp_entry)]
def _filter_by_excludes(self, classpath_target_tuples, root_targets):
# Excludes are always applied transitively, so regardless of whether a transitive
# set of targets was included here, their closure must be included.
closure = BuildGraph.closure(root_targets, bfs=True)
excludes = self._excludes.get_for_targets(closure)
return filter(_not_excluded_filter(excludes), classpath_target_tuples)
def _add_excludes_for_target(self, target):
if target.is_exported:
self._excludes.add_for_target(target, [Exclude(target.provides.org,
target.provides.name)])
if isinstance(target, JvmTarget) and target.excludes:
self._excludes.add_for_target(target, target.excludes)
def _wrap_path_elements(self, classpath_elements):
return [(element[0], ClasspathEntry(element[1])) for element in classpath_elements]
def _add_elements_for_target(self, target, elements):
self._validate_classpath_tuples(elements, target)
self._classpaths.add_for_target(target, elements)
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir))
|
|
from __future__ import print_function
import logging
import numpy
from six.moves import xrange
import theano
from theano.tensor import as_tensor_variable
from theano.gof import Op, Apply
from theano.gradient import DisconnectedType
from theano.tensor import basic as tensor
logger = logging.getLogger(__name__)
class MatrixPinv(Op):
"""Computes the pseudo-inverse of a matrix :math:`A`.
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
Note that :math:`Ax=AA^+b`, so :math:`AA^+` is close to the identity matrix.
This method is not faster then `matrix_inverse`. Its strength comes from
that it works for non-square matrices.
If you have a square matrix though, `matrix_inverse` can be both more
exact and faster to compute. Also this op does not get optimized into a
solve op.
"""
__props__ = ()
def __init__(self):
pass
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.pinv(x).astype(x.dtype)
pinv = MatrixPinv()
class MatrixInverse(Op):
"""Computes the inverse of a matrix :math:`A`.
Given a square matrix :math:`A`, ``matrix_inverse`` returns a square
matrix :math:`A_{inv}` such that the dot product :math:`A \cdot A_{inv}`
and :math:`A_{inv} \cdot A` equals the identity matrix :math:`I`.
:note: When possible, the call to this op will be optimized to the call
of ``solve``.
"""
__props__ = ()
def __init__(self):
pass
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = numpy.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
.. math:: V\frac{\partial X^{-1}}{\partial X},
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
once can deduce that the relation corresponds to
.. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T.
"""
x, = inputs
xi = self(x)
gz, = g_outputs
# TT.dot(gz.T,xi)
return [-matrix_dot(xi, gz.T, xi).T]
def R_op(self, inputs, eval_points):
r"""The gradient function should return
.. math:: \frac{\partial X^{-1}}{\partial X}V,
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
once can deduce that the relation corresponds to
.. math:: X^{-1} \cdot V \cdot X^{-1}.
"""
x, = inputs
xi = self(x)
ev, = eval_points
if ev is None:
return [None]
return [-matrix_dot(xi, ev, xi)]
def infer_shape(self, node, shapes):
return shapes
matrix_inverse = MatrixInverse()
def matrix_dot(*args):
""" Shorthand for product between several dots
Given :math:`N` matrices :math:`A_0, A_1, .., A_N`, ``matrix_dot`` will
generate the matrix product between all in the given order, namely
:math:`A_0 \cdot A_1 \cdot A_2 \cdot .. \cdot A_N`.
"""
rval = args[0]
for a in args[1:]:
rval = theano.tensor.dot(rval, a)
return rval
class AllocDiag(Op):
"""
Allocates a square matrix with the given vector as its diagonal.
"""
__props__ = ()
def make_node(self, _x):
x = as_tensor_variable(_x)
if x.type.ndim != 1:
raise TypeError('AllocDiag only works on vectors', _x)
return Apply(self, [x], [theano.tensor.matrix(dtype=x.type.dtype)])
def grad(self, inputs, g_outputs):
return [extract_diag(g_outputs[0])]
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if x.ndim != 1:
raise TypeError(x)
z[0] = numpy.diag(x)
def infer_shape(self, node, shapes):
x_s, = shapes
return [(x_s[0], x_s[0])]
alloc_diag = AllocDiag()
class ExtractDiag(Op):
""" Return the diagonal of a matrix.
:note: work on the GPU.
"""
__props__ = ("view",)
def __init__(self, view=False):
self.view = view
if self.view:
self.view_map = {0: [0]}
def make_node(self, _x):
if not isinstance(_x, theano.Variable):
x = as_tensor_variable(_x)
else:
x = _x
if x.type.ndim != 2:
raise TypeError('ExtractDiag only works on matrices', _x)
return Apply(self, [x], [x.type.__class__(broadcastable=(False,),
dtype=x.type.dtype)()])
def perform(self, node, ins, outs):
""" For some reason numpy.diag(x) is really slow, so we
implemented our own. """
x, = ins
z, = outs
# zero-dimensional matrices ...
if x.shape[0] == 0 or x.shape[1] == 0:
z[0] = node.outputs[0].type.value_zeros((0,))
return
if x.shape[0] < x.shape[1]:
rval = x[:, 0]
else:
rval = x[0]
rval.strides = (x.strides[0] + x.strides[1],)
if self.view:
z[0] = rval
else:
z[0] = rval.copy()
def __str__(self):
return 'ExtractDiag{view=%s}' % self.view
def grad(self, inputs, g_outputs):
x = theano.tensor.zeros_like(inputs[0])
xdiag = alloc_diag(g_outputs[0])
return [theano.tensor.set_subtensor(
x[:xdiag.shape[0], :xdiag.shape[1]],
xdiag)]
def infer_shape(self, node, shapes):
x_s, = shapes
shp = theano.tensor.min(node.inputs[0].shape)
return [(shp,)]
extract_diag = ExtractDiag()
# TODO: optimization to insert ExtractDiag with view=True
def diag(x):
"""
Numpy-compatibility method
If `x` is a matrix, return its diagonal.
If `x` is a vector return a matrix with it as its diagonal.
* This method does not support the `k` argument that numpy supports.
"""
xx = as_tensor_variable(x)
if xx.type.ndim == 1:
return alloc_diag(xx)
elif xx.type.ndim == 2:
return extract_diag(xx)
else:
raise TypeError('diag requires vector or matrix argument', x)
def trace(X):
"""
Returns the sum of diagonal elements of matrix X.
:note: work on GPU since 0.6rc4.
"""
return extract_diag(X).sum()
class Det(Op):
"""Matrix determinant
Input should be a square matrix
"""
__props__ = ()
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
try:
z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype)
except Exception:
print('Failed to compute determinant', x)
raise
def grad(self, inputs, g_outputs):
gz, = g_outputs
x, = inputs
return [gz * self(x) * matrix_inverse(x).T]
def infer_shape(self, node, shapes):
return [()]
def __str__(self):
return "Det"
det = Det()
class Eig(Op):
"""Compute the eigenvalues and right eigenvectors of a square array.
"""
_numop = staticmethod(numpy.linalg.eig)
__props__ = ()
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
w = theano.tensor.vector(dtype=x.dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
def infer_shape(self, node, shapes):
n = shapes[0][0]
return [(n,), (n, n)]
eig = Eig()
class Eigh(Eig):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
"""
_numop = staticmethod(numpy.linalg.eigh)
__props__ = ('UPLO',)
def __init__(self, UPLO='L'):
assert UPLO in ['L', 'U']
self.UPLO = UPLO
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
# Numpy's linalg.eigh may return either double or single
# presision eigenvalues depending on installed version of
# LAPACK. Rather than trying to reproduce the (rather
# involved) logic, we just probe linalg.eigh with a trivial
# input.
w_dtype = self._numop([[numpy.dtype(x.dtype).type()]])[0].dtype.name
w = theano.tensor.vector(dtype=w_dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, v])
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
w[0], v[0] = self._numop(x, self.UPLO)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
.. math:: \sum_n\left(W_n\frac{\partial\,w_n}
{\partial a_{ij}} +
\sum_k V_{nk}\frac{\partial\,v_{nk}}
{\partial a_{ij}}\right),
where [:math:`W`, :math:`V`] corresponds to ``g_outputs``,
:math:`a` to ``inputs``, and :math:`(w, v)=\mbox{eig}(a)`.
Analytic formulae for eigensystem gradients are well-known in
perturbation theory:
.. math:: \frac{\partial\,w_n}
{\partial a_{ij}} = v_{in}\,v_{jn}
.. math:: \frac{\partial\,v_{kn}}
{\partial a_{ij}} =
\sum_{m\ne n}\frac{v_{km}v_{jn}}{w_n-w_m}
"""
x, = inputs
w, v = self(x)
# Replace gradients wrt disconnected variables with
# zeros. This is a work-around for issue #1063.
gw, gv = _zero_disconnected([w, v], g_outputs)
return [EighGrad(self.UPLO)(x, w, v, gw, gv)]
def _zero_disconnected(outputs, grads):
l = []
for o, g in zip(outputs, grads):
if isinstance(g.type, DisconnectedType):
l.append(o.zeros_like())
else:
l.append(g)
return l
class EighGrad(Op):
"""Gradient of an eigensystem of a Hermitian matrix.
"""
__props__ = ('UPLO',)
def __init__(self, UPLO='L'):
assert UPLO in ['L', 'U']
self.UPLO = UPLO
if UPLO == 'L':
self.tri0 = numpy.tril
self.tri1 = lambda a: numpy.triu(a, 1)
else:
self.tri0 = numpy.triu
self.tri1 = lambda a: numpy.tril(a, -1)
def make_node(self, x, w, v, gw, gv):
x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv))
assert x.ndim == 2
assert w.ndim == 1
assert v.ndim == 2
assert gw.ndim == 1
assert gv.ndim == 2
out_dtype = theano.scalar.upcast(x.dtype, w.dtype, v.dtype,
gw.dtype, gv.dtype)
out = theano.tensor.matrix(dtype=out_dtype)
return Apply(self, [x, w, v, gw, gv], [out])
def perform(self, node, inputs, outputs):
"""
Implements the "reverse-mode" gradient for the eigensystem of
a square matrix.
"""
x, w, v, W, V = inputs
N = x.shape[0]
outer = numpy.outer
def G(n):
return sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])
for m in xrange(N) if m != n)
g = sum(outer(v[:, n], v[:, n] * W[n] + G(n))
for n in xrange(N))
# Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a)
# (triu(a)) only. This means that partial derivative of
# eigh(a, 'L') (eigh(a, 'U')) with respect to a[i,j] is zero
# for i < j (i > j). At the same time, non-zero components of
# the gradient must account for the fact that variation of the
# opposite triangle contributes to variation of two elements
# of Hermitian (symmetric) matrix. The following line
# implements the necessary logic.
out = self.tri0(g) + self.tri1(g).T
# The call to self.tri0 in perform upcast from float32 to
# float64 or from int* to int64 in numpy 1.6.1 but not in
# 1.6.2. We do not want version dependent dtype in Theano.
# We think it should be the same as the output.
outputs[0][0] = numpy.asarray(out, dtype=node.outputs[0].dtype)
def infer_shape(self, node, shapes):
return [shapes[0]]
def eigh(a, UPLO='L'):
return Eigh(UPLO)(a)
class QRFull(Op):
"""
Full QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q is orthonormal
and r is upper-triangular.
"""
_numop = staticmethod(numpy.linalg.qr)
__props__ = ('mode',)
def __init__(self, mode):
self.mode = mode
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2, "The input of qr function should be a matrix."
q = theano.tensor.matrix(dtype=x.dtype)
if self.mode != 'raw':
r = theano.tensor.matrix(dtype=x.dtype)
else:
r = theano.tensor.vector(dtype=x.dtype)
return Apply(self, [x], [q, r])
def perform(self, node, inputs, outputs):
(x,) = inputs
(q, r) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0], r[0] = self._numop(x, self.mode)
class QRIncomplete(Op):
"""
Incomplete QR Decomposition.
Computes the QR decomposition of a matrix.
Factor the matrix a as qr and return a single matrix.
"""
_numop = staticmethod(numpy.linalg.qr)
__props__ = ('mode',)
def __init__(self, mode):
self.mode = mode
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2, "The input of qr function should be a matrix."
q = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [q])
def perform(self, node, inputs, outputs):
(x,) = inputs
(q,) = outputs
assert x.ndim == 2, "The input of qr function should be a matrix."
q[0] = self._numop(x,
self.mode)
def qr(a, mode="full"):
"""
Computes the QR decomposition of a matrix.
Factor the matrix a as qr, where q
is orthonormal and r is upper-triangular.
:type a:
array_like, shape (M, N)
:param a:
Matrix to be factored.
:type mode:
one of 'reduced', 'complete', 'r', 'raw', 'full' and
'economic', optional
:keyword mode:
If K = min(M, N), then
'reduced'
returns q, r with dimensions (M, K), (K, N)
'complete'
returns q, r with dimensions (M, M), (M, N)
'r'
returns r only with dimensions (K, N)
'raw'
returns h, tau with dimensions (N, M), (K,)
'full'
alias of 'reduced', deprecated (default)
'economic'
returns h from 'raw', deprecated.
The options 'reduced', 'complete', and 'raw' are new in numpy
1.8, see the notes for more information. The default is
'reduced' and to maintain backward compatibility with earlier
versions of numpy both it and the old default 'full' can be
omitted. Note that array h returned in 'raw' mode is
transposed for calling Fortran. The 'economic' mode is
deprecated. The modes 'full' and 'economic' may be passed
using only the first letter for backwards compatibility, but
all others must be spelled out.
Default mode is 'full' which is also default for numpy 1.6.1.
:note: Default mode was left to full as full and reduced are
both doing the same thing in the new numpy version but only
full works on the old previous numpy version.
:rtype q:
matrix of float or complex, optional
:return q:
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or
not a is real/complex. The determinant may be either +/- 1 in
that case.
:rtype r:
matrix of float or complex, optional
:return r:
The upper-triangular matrix.
"""
x = [[2, 1], [3, 4]]
if isinstance(numpy.linalg.qr(x, mode), tuple):
return QRFull(mode)(a)
else:
return QRIncomplete(mode)(a)
class SVD(Op):
# See doc in the docstring of the function just after this class.
_numop = staticmethod(numpy.linalg.svd)
__props__ = ('full_matrices', 'compute_uv')
def __init__(self, full_matrices=True, compute_uv=True):
"""
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s.
True by default.
"""
self.full_matrices = full_matrices
self.compute_uv = compute_uv
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2, "The input of svd function should be a matrix."
w = theano.tensor.matrix(dtype=x.dtype)
u = theano.tensor.vector(dtype=x.dtype)
v = theano.tensor.matrix(dtype=x.dtype)
return Apply(self, [x], [w, u, v])
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, u, v) = outputs
assert x.ndim == 2, "The input of svd function should be a matrix."
w[0], u[0], v[0] = self._numop(x,
self.full_matrices,
self.compute_uv)
def svd(a, full_matrices=1, compute_uv=1):
"""
This function performs the SVD on CPU.
:type full_matrices: bool, optional
:param full_matrices:
If True (default), u and v have the shapes (M, M) and (N, N),
respectively.
Otherwise, the shapes are (M, K) and (K, N), respectively,
where K = min(M, N).
:type compute_uv: bool, optional
:param compute_uv:
Whether or not to compute u and v in addition to s.
True by default.
:returns: U, V and D matrices.
"""
return SVD(full_matrices, compute_uv)(a)
class lstsq(Op):
__props__ = ()
def make_node(self, x, y, rcond):
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
rcond = theano.tensor.as_tensor_variable(rcond)
return theano.Apply(self, [x, y, rcond],
[theano.tensor.matrix(), theano.tensor.dvector(),
theano.tensor.lscalar(), theano.tensor.dvector()])
def perform(self, node, inputs, outputs):
zz = numpy.linalg.lstsq(inputs[0], inputs[1], inputs[2])
outputs[0][0] = zz[0]
outputs[1][0] = zz[1]
outputs[2][0] = numpy.array(zz[2])
outputs[3][0] = zz[3]
def matrix_power(M, n):
result = 1
for i in xrange(n):
result = theano.dot(result, M)
return result
def norm(x, ord):
x = as_tensor_variable(x)
ndim = x.ndim
if ndim == 0:
raise ValueError("'axis' entry is out of bounds.")
elif ndim == 1:
if ord is None:
return tensor.sum(x**2)**0.5
elif ord == 'inf':
return tensor.max(abs(x))
elif ord == '-inf':
return tensor.min(abs(x))
elif ord == 0:
return x[x.nonzero()].shape[0]
else:
try:
z = tensor.sum(abs(x**ord))**(1. / ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
return z
elif ndim == 2:
if ord is None or ord == 'fro':
return tensor.sum(abs(x**2))**(0.5)
elif ord == 'inf':
return tensor.max(tensor.sum(abs(x), 1))
elif ord == '-inf':
return tensor.min(tensor.sum(abs(x), 1))
elif ord == 1:
return tensor.max(tensor.sum(abs(x), 0))
elif ord == -1:
return tensor.min(tensor.sum(abs(x), 0))
else:
raise ValueError(0)
elif ndim > 2:
raise NotImplementedError("We don't support norm witn ndim > 2")
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.resourcesettings_v1.services.resource_settings_service import pagers
from google.cloud.resourcesettings_v1.types import resource_settings
from .transports.base import ResourceSettingsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ResourceSettingsServiceGrpcAsyncIOTransport
from .client import ResourceSettingsServiceClient
class ResourceSettingsServiceAsyncClient:
"""An interface to interact with resource settings and setting values
throughout the resource hierarchy.
Services may surface a number of settings for users to control how
their resources behave. Values of settings applied on a given Cloud
resource are evaluated hierarchically and inherited by all
descendants of that resource.
For all requests, returns a ``google.rpc.Status`` with
``google.rpc.Code.PERMISSION_DENIED`` if the IAM check fails or the
``parent`` resource is not in a Cloud Organization. For all
requests, returns a ``google.rpc.Status`` with
``google.rpc.Code.INVALID_ARGUMENT`` if the request is malformed.
"""
_client: ResourceSettingsServiceClient
DEFAULT_ENDPOINT = ResourceSettingsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ResourceSettingsServiceClient.DEFAULT_MTLS_ENDPOINT
setting_path = staticmethod(ResourceSettingsServiceClient.setting_path)
parse_setting_path = staticmethod(ResourceSettingsServiceClient.parse_setting_path)
common_billing_account_path = staticmethod(
ResourceSettingsServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
ResourceSettingsServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(ResourceSettingsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
ResourceSettingsServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
ResourceSettingsServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
ResourceSettingsServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(
ResourceSettingsServiceClient.common_project_path
)
parse_common_project_path = staticmethod(
ResourceSettingsServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
ResourceSettingsServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
ResourceSettingsServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ResourceSettingsServiceAsyncClient: The constructed client.
"""
return ResourceSettingsServiceClient.from_service_account_info.__func__(ResourceSettingsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ResourceSettingsServiceAsyncClient: The constructed client.
"""
return ResourceSettingsServiceClient.from_service_account_file.__func__(ResourceSettingsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return ResourceSettingsServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> ResourceSettingsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ResourceSettingsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(ResourceSettingsServiceClient).get_transport_class,
type(ResourceSettingsServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ResourceSettingsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the resource settings service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ResourceSettingsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ResourceSettingsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_settings(
self,
request: Union[resource_settings.ListSettingsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSettingsAsyncPager:
r"""Lists all the settings that are available on the Cloud resource
``parent``.
.. code-block:: python
from google.cloud import resourcesettings_v1
def sample_list_settings():
# Create a client
client = resourcesettings_v1.ResourceSettingsServiceClient()
# Initialize request argument(s)
request = resourcesettings_v1.ListSettingsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_settings(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.resourcesettings_v1.types.ListSettingsRequest, dict]):
The request object. The request for ListSettings.
parent (:class:`str`):
Required. The Cloud resource that parents the setting.
Must be in one of the following forms:
- ``projects/{project_number}``
- ``projects/{project_id}``
- ``folders/{folder_id}``
- ``organizations/{organization_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcesettings_v1.services.resource_settings_service.pagers.ListSettingsAsyncPager:
The response from ListSettings.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = resource_settings.ListSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_settings,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListSettingsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_setting(
self,
request: Union[resource_settings.GetSettingRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resource_settings.Setting:
r"""Gets a setting.
Returns a ``google.rpc.Status`` with
``google.rpc.Code.NOT_FOUND`` if the setting does not exist.
.. code-block:: python
from google.cloud import resourcesettings_v1
def sample_get_setting():
# Create a client
client = resourcesettings_v1.ResourceSettingsServiceClient()
# Initialize request argument(s)
request = resourcesettings_v1.GetSettingRequest(
name="name_value",
)
# Make the request
response = client.get_setting(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcesettings_v1.types.GetSettingRequest, dict]):
The request object. The request for GetSetting.
name (:class:`str`):
Required. The name of the setting to get. See
[Setting][google.cloud.resourcesettings.v1.Setting] for
naming requirements.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcesettings_v1.types.Setting:
The schema for settings.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = resource_settings.GetSettingRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_setting,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_setting(
self,
request: Union[resource_settings.UpdateSettingRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resource_settings.Setting:
r"""Updates a setting.
Returns a ``google.rpc.Status`` with
``google.rpc.Code.NOT_FOUND`` if the setting does not exist.
Returns a ``google.rpc.Status`` with
``google.rpc.Code.FAILED_PRECONDITION`` if the setting is
flagged as read only. Returns a ``google.rpc.Status`` with
``google.rpc.Code.ABORTED`` if the etag supplied in the request
does not match the persisted etag of the setting value.
On success, the response will contain only ``name``,
``local_value`` and ``etag``. The ``metadata`` and
``effective_value`` cannot be updated through this API.
Note: the supplied setting will perform a full overwrite of the
``local_value`` field.
.. code-block:: python
from google.cloud import resourcesettings_v1
def sample_update_setting():
# Create a client
client = resourcesettings_v1.ResourceSettingsServiceClient()
# Initialize request argument(s)
request = resourcesettings_v1.UpdateSettingRequest(
)
# Make the request
response = client.update_setting(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcesettings_v1.types.UpdateSettingRequest, dict]):
The request object. The request for UpdateSetting.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcesettings_v1.types.Setting:
The schema for settings.
"""
# Create or coerce a protobuf request object.
request = resource_settings.UpdateSettingRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_setting,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("setting.name", request.setting.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-resource-settings",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ResourceSettingsServiceAsyncClient",)
|
|
# -*- coding: utf-8 -*-
"""Interface to FTP-servers."""
import ftplib
import tempfile
import os
import stat
from stashutils.core import get_stash
from stashutils.fsi.base import BaseFSI, make_stat, calc_mode
from stashutils.fsi.errors import OperationFailure, IsDir, IsFile
from stashutils.fsi.errors import AlreadyExists
_stash = get_stash()
class FTPFSI(BaseFSI):
"""
a FSI for FTP-server.
Unfortunally, FTP was designed as a human-readable protocol.
Due to this, the protocol is not completly unified.
This means, that this FSI may not work on all FTP-servers.
"""
def __init__(self, logger=None):
self.logger = logger
self.path = "/"
self.ftp = None
self.host = None
def abspath(self, name):
"""returns the absolute path of name"""
return os.path.join(self.path, name)
def connect(self, *args):
if self.ftp is not None:
return "Interface already connected"
if len(args) < 1 or len(args) > 5:
return "Invalid argument count"
user, pswd = None, None
debug = 0
# TODO: make the following code less ugly
if len(args) == 1:
host = args[0]
port = 21
secure = False
elif len(args) == 2:
host, port = args
secure = False
elif len(args) == 5 or len(args) == 4:
user = args[2]
pswd = args[3]
secure = False
host = args[0]
port = args[1]
if len(args) not in (3, 5):
# this prevents the elifs from beeing execeuted
pass
elif args[-1] == "-s":
host, port = args[:2]
secure = True
elif args[-1] == "-n":
host, port = args[:2]
secure = False
elif args[-1] == "-d":
host, port = args[:2]
secure = True
debug = 2
else:
return "Unknown argument(s)"
self.host = host
self.port = port
self.user = user
try:
port = int(port)
except:
return "Invalid port-argument"
if secure:
self.ftp = ftplib.FTP_TLS()
else:
self.ftp = ftplib.FTP()
self.ftp.set_debuglevel(debug)
try:
self.ftp.connect(host, port)
except Exception as e:
self.close()
if isinstance(e, EOFError):
return "EOF"
return e.message
else:
if secure:
self.log(_stash.text_color("Done", "green"))
self.log(".\nSecuring Connection... ")
try:
self.ftp.prot_p()
except Exception as e:
self.close()
return e.message
self.log(_stash.text_color("Done", "green"))
self.log(".\nLogging in... ")
try:
self.ftp.login(user, pswd)
except Exception as e:
self.close()
return e.message
else:
self.path = self.ftp.pwd()
return True
def close(self):
if self.ftp is not None:
try:
self.ftp.quit()
except:
try:
self.ftp.close()
except:
pass
def repr(self):
raw = "FTP-Session for {u} on {h}:{p}"
fo = raw.format(u=self.user, h=self.host, p=self.port)
return fo
def cd(self, name):
ap = self.abspath(name)
try:
self.ftp.cwd(ap)
except Exception as e:
raise OperationFailure(str(e))
else:
self.path = ap
def mkdir(self, name):
ap = self.abspath(name)
try:
self.ftp.mkd(ap)
except Exception as e:
# test wether the dir exists
self.get_path()
try:
self.cd(ap)
except Exception:
raise e
else:
raise AlreadyExists("Already exists!")
raise OperationFailure(str(e))
def listdir(self, path="."):
ap = self.abspath(path)
try:
content = self.ftp.nlst(ap)
ret = [e.split("/")[-1] for e in content]
return ret
except Exception as e:
raise OperationFailure(str(e))
def remove(self, name):
ap = self.abspath(name)
# we dont know wether target is a server or a file, so try both
try:
self.ftp.delete(ap)
except Exception as e:
try:
self.ftp.rmd(ap)
except Exception as e2:
text = _stash.text_color("Error trying to delete file: {e}!\n".format(e=e.message), "red")
self.log(text)
text = _stash.text_color("Error trying to delete dir (after file-deletion failed)!\n", "red")
self.log(text)
raise OperationFailure(e2.message)
def open(self, name, mode="rb", buffering=0):
mode = mode.replace("+", "").replace("U", "")
ap = self.abspath(name)
self.log("Opening '{p}' with mode '{m}'...\n".format(p=ap, m=mode))
if mode in ("r", "rb"):
try:
tf = tempfile.TemporaryFile()
self.ftp.retrbinary("RETR " + ap, tf.write, 4096)
tf.seek(0)
except Exception as e:
self.log('Error during open("{p}","r"): {e}\n'.format(p=ap, e=e.message))
raise OperationFailure(e.message)
return tf
elif "w" in mode:
return FTP_Upload(self.ftp, ap, mode, ap)
else:
raise OperationFailure("Mode not supported!")
def get_path(self):
return self.ftp.pwd()
def isdir(self, name):
ap = self.abspath(name)
op = self.get_path()
try:
self.ftp.cwd(ap)
return True
except:
return False
finally:
self.ftp.cwd(op)
def _get_total_size_and_type(self, path):
"""
returns the file/dir size and the type. Copied from:
http://stackoverflow.com/questions/22090001/get-folder-size-using-ftplib
This is a modified version.
"""
size = 0
op = self.ftp.pwd()
try:
self.ftp.cwd(path)
self.log("stat: cwd worked (->IsDir)\n")
except:
# TODO: raise Exception if file does not exists
self.log("stat: cwd failed (->IsFile or NotFound)\n")
try:
size = self.ftp.size(path)
except:
size = None
if size is None:
self.log("stat: size failed (->NotFound)\n")
raise OperationFailure("NotFound!")
self.log("stat: size worked (->IsFile)\n")
return (size, stat.S_IFREG)
finally:
self.ftp.cwd(op)
return (1, stat.S_IFDIR)
def stat(self, name):
ap = self.abspath(name)
self.log("stat: {p}\n".format(p=ap))
op = self.path
try:
size, type = self._get_total_size_and_type(ap)
except Exception as e:
self.log("Error during stat: {e}\n".format(e=e.message))
raise OperationFailure(e.message)
finally:
self.ftp.cwd(op)
# todo: check permissions
m = calc_mode(type=type)
return make_stat(size=size, mode=m)
class FTP_Upload(object):
"""utility class used for FTP-uploads.
this class creates a tempfile, which is uploaded to the server when closed."""
def __init__(self, ftp, path, mode, name):
self.ftp = ftp
self.path = path
self.mode = mode
self.closed = False
self.name = name
self.tf = tempfile.TemporaryFile()
def write(self, data):
self.tf.write(data)
def flush(self):
pass
def tell(self):
return self.tf.tell()
def seek(self, offset, whence=os.SEEK_SET):
self.tf.seek(offset, whence)
def close(self):
if self.closed:
return
self.closed = True
self.tf.seek(0)
try:
self.ftp.storbinary("STOR " + self.path, self.tf, 4096)
except Exception as e:
raise OperationFailure(e.message)
finally:
self.tf.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __del__(self):
self.close()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Data iterators for common data formats."""
from __future__ import absolute_import
from collections import namedtuple
import sys
import ctypes
import logging
import threading
import numpy as np
from ..base import _LIB
from ..base import c_str_array, mx_uint, py_str
from ..base import DataIterHandle, NDArrayHandle
from ..base import mx_real_t
from ..base import check_call, build_param_doc as _build_param_doc
from ..ndarray import NDArray
from ..ndarray.sparse import CSRNDArray
from ..ndarray import _ndarray_cls
from ..ndarray import array
from ..ndarray import concat
from .utils import _init_data, _has_instance, _getdata_by_idx
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""DataDesc is used to store name, shape, type and layout
information of the data or the label.
The `layout` describes how the axes in `shape` should be interpreted,
for example for image data setting `layout=NCHW` indicates
that the first axis is number of examples in the batch(N),
C is number of channels, H is the height and W is the width of the image.
For sequential data, by default `layout` is set to ``NTC``, where
N is number of examples in the batch, T the temporal axis representing time
and C is the number of channels.
Parameters
----------
cls : DataDesc
The class.
name : str
Data name.
shape : tuple of int
Data shape.
dtype : np.dtype, optional
Data type.
layout : str, optional
Data layout.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'): # pylint: disable=super-on-old-class
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
When data parallelism is used, the data will be automatically split and
concatenated along the batch-size dimension. Axis can be -1, which means
the whole array will be copied for each data-parallelism device.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
int
An axis indicating the batch_size dimension.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""A data batch.
MXNet's data iterator returns a batch of data for each `next` call.
This data contains `batch_size` number of examples.
If the input data consists of images, then shape of these images depend on
the `layout` attribute of `DataDesc` object in `provide_data` parameter.
If `layout` is set to 'NCHW' then, images should be stored in a 4-D matrix
of shape ``(batch_size, num_channel, height, width)``.
If `layout` is set to 'NHWC' then, images should be stored in a 4-D matrix
of shape ``(batch_size, height, width, num_channel)``.
The channels are often in RGB order.
Parameters
----------
data : list of `NDArray`, each array containing `batch_size` examples.
A list of input data.
label : list of `NDArray`, each array often containing a 1-dimensional array. optional
A list of input labels.
pad : int, optional
The number of examples padded at the end of a batch. It is used when the
total number of examples read is not divisible by the `batch_size`.
These extra padded examples are ignored in prediction.
index : numpy.array, optional
The example indices in this batch.
bucket_key : int, optional
The bucket key, used for bucketing module.
provide_data : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the data.
The *i*-th element describes the name and shape of ``data[i]``.
provide_label : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the label.
The *i*-th element describes the name and shape of ``label[i]``.
"""
def __init__(self, data, label=None, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
if data is not None:
assert isinstance(data, (list, tuple)), "Data must be list of NDArrays"
if label is not None:
assert isinstance(label, (list, tuple)), "Label must be list of NDArrays"
self.data = data
self.label = label
self.pad = pad
self.index = index
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
def __str__(self):
data_shapes = [d.shape for d in self.data]
if self.label:
label_shapes = [l.shape for l in self.label]
else:
label_shapes = None
return "{}: data shapes: {} label shapes: {}".format(
self.__class__.__name__,
data_shapes,
label_shapes)
class DataIter(object):
"""The base class for an MXNet data iterator.
All I/O in MXNet is handled by specializations of this class. Data iterators
in MXNet are similar to standard-iterators in Python. On each call to `next`
they return a `DataBatch` which represents the next batch of data. When
there is no more data to return, it raises a `StopIteration` exception.
Parameters
----------
batch_size : int, optional
The batch size, namely the number of items in the batch.
See Also
--------
NDArrayIter : Data-iterator for MXNet NDArray or numpy-ndarray objects.
CSVIter : Data-iterator for csv data.
LibSVMIter : Data-iterator for libsvm data.
ImageIter : Data-iterator for images.
"""
def __init__(self, batch_size=0):
self.batch_size = batch_size
def __iter__(self):
return self
def reset(self):
"""Reset the iterator to the begin of the data."""
pass
def next(self):
"""Get next data batch from iterator.
Returns
-------
DataBatch
The data of next batch.
Raises
------
StopIteration
If the end of the data is reached.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Move to the next batch.
Returns
-------
boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
list of NDArray
The data of the current batch.
"""
pass
def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The indices of examples in the current batch.
"""
return None
def getpad(self):
"""Get the number of padding examples in the current batch.
Returns
-------
int
Number of padding examples in the current batch.
"""
pass
class ResizeIter(DataIter):
"""Resize a data iterator to a given number of batches.
Parameters
----------
data_iter : DataIter
The data iterator to be resized.
size : int
The number of batches per epoch to resize to.
reset_internal : bool
Whether to reset internal iterator on ResizeIter.reset.
Examples
--------
>>> nd_iter = mx.io.NDArrayIter(mx.nd.ones((100,10)), batch_size=25)
>>> resize_iter = mx.io.ResizeIter(nd_iter, 2)
>>> for batch in resize_iter:
... print(batch.data)
[<NDArray 25x10 @cpu(0)>]
[<NDArray 25x10 @cpu(0)>]
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
if hasattr(data_iter, 'default_bucket_key'):
self.default_bucket_key = data_iter.default_bucket_key
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Performs pre-fetch for other data iterators.
This iterator will create another thread to perform ``iter_next`` and then
store the data in memory. It potentially accelerates the data read, at the
cost of more memory usage.
Parameters
----------
iters : DataIter or list of DataIter
The data iterators to be pre-fetched.
rename_data : None or list of dict
The *i*-th element is a renaming map for the *i*-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data.
rename_label : None or list of dict
Similar to ``rename_data``.
Examples
--------
>>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> piter = mx.io.PrefetchingIter([iter1, iter2],
... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])
>>> print(piter.provide_data)
[DataDesc[data_1,(25, 10L),<type 'numpy.float32'>,NCHW],
DataDesc[data_2,(25, 10L),<type 'numpy.float32'>,NCHW]]
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for i in self.data_taken:
i.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for i in self.data_taken:
i.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for i in self.data_ready:
i.wait()
for i in self.iters:
i.reset()
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
def iter_next(self):
for i in self.data_ready:
i.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class NDArrayIter(DataIter):
"""Returns an iterator for ``mx.nd.NDArray``, ``numpy.ndarray``, ``h5py.Dataset``
``mx.nd.sparse.CSRNDArray`` or ``scipy.sparse.csr_matrix``.
Examples
--------
>>> data = np.arange(40).reshape((10,2,2))
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> for batch in dataiter:
... print batch.data[0].asnumpy()
... batch.data[0].shape
...
[[[ 36. 37.]
[ 38. 39.]]
[[ 16. 17.]
[ 18. 19.]]
[[ 12. 13.]
[ 14. 15.]]]
(3L, 2L, 2L)
[[[ 32. 33.]
[ 34. 35.]]
[[ 4. 5.]
[ 6. 7.]]
[[ 24. 25.]
[ 26. 27.]]]
(3L, 2L, 2L)
[[[ 8. 9.]
[ 10. 11.]]
[[ 20. 21.]
[ 22. 23.]]
[[ 28. 29.]
[ 30. 31.]]]
(3L, 2L, 2L)
>>> dataiter.provide_data # Returns a list of `DataDesc`
[DataDesc[data,(3, 2L, 2L),<type 'numpy.float32'>,NCHW]]
>>> dataiter.provide_label # Returns a list of `DataDesc`
[DataDesc[softmax_label,(3, 1L),<type 'numpy.float32'>,NCHW]]
In the above example, data is shuffled as `shuffle` parameter is set to `True`
and remaining examples are discarded as `last_batch_handle` parameter is set to `discard`.
Usage of `last_batch_handle` parameter:
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='pad')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Padding added after the examples read are over. So, 10/3+1 batches are created.
4
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are discarded. So, 10/3 batches are created.
3
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, False, last_batch_handle='roll_over')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are rolled over to the next iteration.
3
>>> dataiter.reset()
>>> dataiter.next().data[0].asnumpy()
[[[ 36. 37.]
[ 38. 39.]]
[[ 0. 1.]
[ 2. 3.]]
[[ 4. 5.]
[ 6. 7.]]]
(3L, 2L, 2L)
`NDArrayIter` also supports multiple input and labels.
>>> data = {'data1':np.zeros(shape=(10,2,2)), 'data2':np.zeros(shape=(20,2,2))}
>>> label = {'label1':np.zeros(shape=(10,1)), 'label2':np.zeros(shape=(20,1))}
>>> dataiter = mx.io.NDArrayIter(data, label, 3, True, last_batch_handle='discard')
`NDArrayIter` also supports ``mx.nd.sparse.CSRNDArray``
with `last_batch_handle` set to `discard`.
>>> csr_data = mx.nd.array(np.arange(40).reshape((10,4))).tostype('csr')
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(csr_data, labels, 3, last_batch_handle='discard')
>>> [batch.data[0] for batch in dataiter]
[
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>]
Parameters
----------
data: array or list of array or dict of string to array
The input data.
label: array or list of array or dict of string to array, optional
The input label.
batch_size: int
Batch size of data.
shuffle: bool, optional
Whether to shuffle the data.
Only supported if no h5py.Dataset inputs are used.
last_batch_handle : str, optional
How to handle the last batch. This parameter can be 'pad', 'discard' or
'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration and
note that it is intended for training and can cause problems if used for prediction.
data_name : str, optional
The data name.
label_name : str, optional
The label name.
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', data_name='data',
label_name='softmax_label'):
super(NDArrayIter, self).__init__(batch_size)
self.data = _init_data(data, allow_empty=False, default_name=data_name)
self.label = _init_data(label, allow_empty=True, default_name=label_name)
if ((_has_instance(self.data, CSRNDArray) or
_has_instance(self.label, CSRNDArray)) and
(last_batch_handle != 'discard')):
raise NotImplementedError("`NDArrayIter` only supports ``CSRNDArray``" \
" with `last_batch_handle` set to `discard`.")
self.idx = np.arange(self.data[0][1].shape[0])
self.shuffle = shuffle
self.last_batch_handle = last_batch_handle
self.batch_size = batch_size
self.cursor = -self.batch_size
self.num_data = self.idx.shape[0]
# shuffle
self.reset()
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
# used for 'roll_over'
self._cache_data = None
self._cache_label = None
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
self._shuffle_data()
# the range below indicate the last batch
if self.last_batch_handle == 'roll_over' and \
self.num_data - self.batch_size < self.cursor < self.num_data:
# (self.cursor - self.num_data) represents the data we have for the last batch
self.cursor = self.cursor - self.num_data - self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
"""Increments the coursor by batch_size for next batch
and check current cursor if it exceed the number of data points."""
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
"""Returns the next batch of data."""
if not self.iter_next():
raise StopIteration
data = self.getdata()
label = self.getlabel()
# iter should stop when last batch is not complete
if data[0].shape[0] != self.batch_size:
# in this case, cache it for next epoch
self._cache_data = data
self._cache_label = label
raise StopIteration
return DataBatch(data=data, label=label, \
pad=self.getpad(), index=None)
def _getdata(self, data_source, start=None, end=None):
"""Load data from underlying arrays."""
assert start is not None or end is not None, 'should at least specify start or end'
start = start if start is not None else 0
if end is None:
end = data_source[0][1].shape[0] if data_source else 0
s = slice(start, end)
return [
x[1][s]
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
array(x[1][sorted(self.idx[s])][[
list(self.idx[s]).index(i)
for i in sorted(self.idx[s])
]]) for x in data_source
]
def _concat(self, first_data, second_data):
"""Helper function to concat two NDArrays."""
assert len(first_data) == len(
second_data), 'data source should contain the same size'
if first_data and second_data:
return [
concat(
first_data[x],
second_data[x],
dim=0
) for x in range(len(first_data))
]
elif (not first_data) and (not second_data):
return []
else:
return [
first_data[0] if first_data else second_data[0]
for x in range(len(first_data))
]
def _batchify(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert self.cursor < self.num_data, 'DataIter needs reset.'
# first batch of next epoch with 'roll_over'
if self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
assert self._cache_data is not None or self._cache_label is not None, \
'next epoch should have cached data'
cache_data = self._cache_data if self._cache_data is not None else self._cache_label
second_data = self._getdata(
data_source, end=self.cursor + self.batch_size)
if self._cache_data is not None:
self._cache_data = None
else:
self._cache_label = None
return self._concat(cache_data, second_data)
# last batch with 'pad'
elif self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
pad = self.batch_size - self.num_data + self.cursor
first_data = self._getdata(data_source, start=self.cursor)
second_data = self._getdata(data_source, end=pad)
return self._concat(first_data, second_data)
# normal case
else:
if self.cursor + self.batch_size < self.num_data:
end_idx = self.cursor + self.batch_size
# get incomplete last batch
else:
end_idx = self.num_data
return self._getdata(data_source, self.cursor, end_idx)
def getdata(self):
"""Get data."""
return self._batchify(self.data)
def getlabel(self):
"""Get label."""
return self._batchify(self.label)
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
# check the first batch
elif self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
return -self.cursor
else:
return 0
def _shuffle_data(self):
"""Shuffle the data."""
# shuffle index
np.random.shuffle(self.idx)
# get the data by corresponding index
self.data = _getdata_by_idx(self.data, self.idx)
self.label = _getdata_by_idx(self.label, self.idx)
class MXDataIter(DataIter):
"""A python wrapper a C++ data iterator.
This iterator is the Python wrapper to all native C++ data iterators, such
as `CSVIter`, `ImageRecordIter`, `MNISTIter`, etc. When initializing
`CSVIter` for example, you will get an `MXDataIter` instance to use in your
Python code. Calls to `next`, `reset`, etc will be delegated to the
underlying C++ data iterators.
Usually you don't need to interact with `MXDataIter` directly unless you are
implementing your own data iterators in C++. To do that, please refer to
examples under the `src/io` folder.
Parameters
----------
handle : DataIterHandle, required
The handle to the underlying C++ Data Iterator.
data_name : str, optional
Data name. Default to "data".
label_name : str, optional
Label name. Default to "softmax_label".
See Also
--------
src/io : The underlying C++ data iterator implementation, e.g., `CSVIter`.
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
# Set the iterator to simply return always first batch. This can be used
# to test the speed of network without taking the loading delay into
# account.
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
if index_size.value:
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
else:
return None
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'Returns\n' +
'-------\n' +
'MXDataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
The resulting data iterator.
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(k)
param_vals.append(str(val))
# create atomic symbol
param_keys = c_str_array(param_keys)
param_vals = c_str_array(param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
_init_io_module()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Meta-architecture definition.
General tensorflow implementation of convolutional Multibox/SSD detection
models with LSTM states, for use on video data.
See https://arxiv.org/abs/1711.06368 for details.
"""
import re
import tensorflow as tf
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
slim = tf.contrib.slim
class LSTMMetaArch(ssd_meta_arch.SSDMetaArch):
"""LSTM Meta-architecture definition."""
def __init__(self,
is_training,
anchor_generator,
box_predictor,
box_coder,
feature_extractor,
encode_background_as_zeros,
image_resizer_fn,
non_max_suppression_fn,
score_conversion_fn,
classification_loss,
localization_loss,
classification_loss_weight,
localization_loss_weight,
normalize_loss_by_num_matches,
hard_example_miner,
unroll_length,
target_assigner_instance,
add_summaries=True):
super(LSTMMetaArch, self).__init__(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=add_summaries)
self._unroll_length = unroll_length
@property
def unroll_length(self):
return self._unroll_length
@unroll_length.setter
def unroll_length(self, unroll_length):
self._unroll_length = unroll_length
def predict(self, preprocessed_inputs, true_image_shapes, states=None,
state_name='lstm_state', feature_scope=None):
with tf.variable_scope(self._extract_features_scope,
values=[preprocessed_inputs], reuse=tf.AUTO_REUSE):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs, states, state_name,
unroll_length=self._unroll_length, scope=feature_scope)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
self._batch_size = preprocessed_inputs.shape[0].value / self._unroll_length
self._states = states
self._anchors = box_list_ops.concatenate(
self._anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2]))
prediction_dict = self._box_predictor.predict(
feature_maps, self._anchor_generator.num_anchors_per_location())
# Multiscale_anchor_generator currently has a different dim compared to
# ssd_anchor_generator. Current fix is to check the dim of the box_encodings
# tensor. If dim is not 3(multiscale_anchor_generator), squeeze the 3rd dim.
# TODO(yinxiao): Remove this check once the anchor generator has unified
# dimension.
if len(prediction_dict['box_encodings'][0].get_shape().as_list()) == 3:
box_encodings = tf.concat(prediction_dict['box_encodings'], axis=1)
else:
box_encodings = tf.squeeze(
tf.concat(prediction_dict['box_encodings'], axis=1), axis=2)
class_predictions_with_background = tf.concat(
prediction_dict['class_predictions_with_background'], axis=1)
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': self._anchors.get(),
'states_and_outputs': self._feature_extractor.states_and_outputs,
}
# In cases such as exporting the model, the states is always zero. Thus the
# step should be ignored.
if states is not None:
predictions_dict['step'] = self._feature_extractor.step
return predictions_dict
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
weights = None
if self.groundtruth_has_field(fields.BoxListFields.weights):
weights = self.groundtruth_lists(fields.BoxListFields.weights)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints, weights)
if self._add_summaries:
self._summarize_target_assignment(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
cls_losses = ops.reduce_sum_trailing_dimensions(
self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights),
ndims=2)
if self._hard_example_miner:
(loc_loss_list, cls_loss_list) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
localization_loss = tf.reduce_sum(tf.stack(loc_loss_list))
classification_loss = tf.reduce_sum(tf.stack(cls_loss_list))
if self._add_summaries:
self._hard_example_miner.summarize()
else:
if self._add_summaries:
class_ids = tf.argmax(batch_cls_targets, axis=2)
flattened_class_ids = tf.reshape(class_ids, [-1])
flattened_classification_losses = tf.reshape(cls_losses, [-1])
self._summarize_anchor_classification_loss(
flattened_class_ids, flattened_classification_losses)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)),
1.0)
with tf.name_scope('localization_loss'):
localization_loss_normalizer = normalizer
if self._normalize_loc_loss_by_codesize:
localization_loss_normalizer *= self._box_coder.code_size
localization_loss = ((self._localization_loss_weight / (
localization_loss_normalizer)) * localization_loss)
with tf.name_scope('classification_loss'):
classification_loss = ((self._classification_loss_weight / normalizer) *
classification_loss)
loss_dict = {
'localization_loss': localization_loss,
'classification_loss': classification_loss
}
return loss_dict
def restore_map(self, fine_tune_checkpoint_type='lstm'):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: the type of checkpoint to restore from, either
SSD/LSTM detection checkpoint (with compatible variable names)
classification checkpoint for initialization prior to training.
Available options: `classification`, `detection`, `interleaved`,
and `lstm`.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is not among
`classification`/`detection`/`interleaved`/`lstm`.
"""
if fine_tune_checkpoint_type not in [
'classification', 'detection', 'lstm'
]:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
variables_to_restore = {}
for variable in tf.global_variables():
var_name = variable.op.name
if 'global_step' in var_name:
continue
# Remove FeatureExtractor prefix for classification checkpoints.
if fine_tune_checkpoint_type == 'classification':
var_name = (
re.split('^' + self._extract_features_scope + '/', var_name)[-1])
# When loading from single frame detection checkpoints, we need to
# remap FeatureMaps variable names.
if ('FeatureMaps' in var_name and
fine_tune_checkpoint_type == 'detection'):
var_name = var_name.replace('FeatureMaps',
self.get_base_network_scope())
variables_to_restore[var_name] = variable
return variables_to_restore
def get_base_network_scope(self):
"""Returns the variable scope of the base network.
Returns:
The variable scope of the feature extractor base network, e.g. MobilenetV1
"""
return self._feature_extractor.get_base_network_scope()
class LSTMFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""LSTM Meta-architecture Feature Extractor definition."""
@property
def depth_multipliers(self):
return self._depth_multipliers
@depth_multipliers.setter
def depth_multipliers(self, depth_multipliers):
self._depth_multipliers = depth_multipliers
@property
def lstm_state_depth(self):
return self._lstm_state_depth
@lstm_state_depth.setter
def lstm_state_depth(self, lstm_state_depth):
self._lstm_state_depth = lstm_state_depth
@property
def states_and_outputs(self):
"""LSTM states and outputs.
This variable includes both LSTM states {C_t} and outputs {h_t}.
Returns:
states_and_outputs: A list of 4-D float tensors, including the lstm state
and output at each timestep.
"""
return self._states_out
@property
def step(self):
return self._step
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def get_base_network_scope(self):
"""Returns the variable scope of the base network.
Returns:
The variable scope of the base network, e.g. MobilenetV1
"""
return self._base_network_scope
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
axis,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
axis: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array,
axis=axis,
keep_dims=keep_dims,
separator=separator)
output_array = self.evaluate(output)
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self, input_array, axis, separator=" "):
"""Tests reduce_join for one input and multiple axes.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
axis: The indices to reduce.
separator: The separator to use when joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=False, separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=True, separator=separator)
truth = input_array
for index in axis:
truth = string_ops.reduce_join(
inputs=truth, axis=index, keep_dims=True, separator=separator)
if not axis:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, axis=axis)
output_array = self.evaluate(output)
output_keep_dims_array = self.evaluate(output_keep_dims)
truth_array = self.evaluate(truth)
truth_squeezed_array = self.evaluate(truth_squeezed)
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, axis=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, axis=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, axis=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(input_array, expected_val, expected_shape, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(input_array, expected_val, expected_shape, axis=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(input_arrays[i], truth, truth_shape, axis=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
separator=" ")
@test_util.run_deprecated_v1
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, axis=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(input_array, axis=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, keep_dims=True, axis=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, axis=permutation)
@test_util.run_deprecated_v1
def testInvalidReductionIndices(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", axis=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=[0, 2])
def testZeroDims(self):
with self.cached_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=0)
self.assertAllEqualUnicode([""], self.evaluate(output))
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=1)
output_shape = self.evaluate(output).shape
self.assertAllEqual([0], output_shape)
@test_util.run_deprecated_v1
def testInvalidArgsUnknownShape(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, axis=1)
duplicate_index = string_ops.reduce_join(placeholder, axis=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
@test_util.run_deprecated_v1
def testInvalidArgsUnknownIndices(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(["test", "test2"], axis=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WALS model input data, training and predict functions."""
import datetime
import numpy as np
import os
import pandas as pd
from scipy.sparse import coo_matrix
import sh
import tensorflow as tf
import wals
# ratio of train set size to test set size
TEST_SET_RATIO = 10
# default hyperparameters
DEFAULT_PARAMS = {
'weights': True,
'latent_factors': 5,
'num_iters': 20,
'regularization': 0.07,
'unobs_weight': 0.01,
'wt_type': 0,
'feature_wt_factor': 130.0,
'feature_wt_exp': 0.08,
'delimiter': '\t'
}
# parameters optimized with hypertuning for the MovieLens data set
OPTIMIZED_PARAMS = {
'latent_factors': 34,
'regularization': 9.83,
'unobs_weight': 0.001,
'feature_wt_factor': 189.8,
}
# parameters optimized with hypertuning for the included web views data set
OPTIMIZED_PARAMS_WEB = {
'latent_factors': 30,
'regularization': 7.27,
'unobs_weight': 0.01,
'feature_wt_exp': 5.05,
}
def create_test_and_train_sets(args, input_file, data_type='ratings'):
"""Create test and train sets, for different input data types.
Args:
args: input args for job
input_file: path to csv data file
data_type: 'ratings': MovieLens style ratings matrix
'web_views': Google Analytics time-on-page data
Returns:
array of user IDs for each row of the ratings matrix
array of item IDs for each column of the rating matrix
sparse coo_matrix for training
sparse coo_matrix for test
Raises:
ValueError: if invalid data_type is supplied
"""
if data_type == 'ratings':
return _ratings_train_and_test(args['headers'], args['delimiter'],
input_file)
elif data_type == 'web_views':
return _page_views_train_and_test(input_file)
else:
raise ValueError('data_type arg value %s not supported.' % data_type)
def _ratings_train_and_test(use_headers, delimiter, input_file):
"""Load data set. Assumes Movielens header, format etc.
MovieLens data starts with user_id=1. The max user id is close to
the number of users, but there may be missing user_id's or item ids
(i.e. movies). For our sparse matrices we need to map the user/item ids
down to a zero-based set of indices, without missing values.
Args:
use_headers: (boolean) true = headers, false = no headers
delimiter: (string) delimiter to use for csv
input_file: path to csv data file
Returns:
array of user IDs for each row of the ratings matrix
array of item IDs for each column of the rating matrix
sparse coo_matrix for training
sparse coo_matrix for test
"""
headers = ['user_id', 'item_id', 'rating', 'timestamp']
header_row = 0 if use_headers else None
ratings_df = pd.read_csv(input_file,
sep=delimiter,
names=headers,
header=header_row,
dtype={
'user_id': np.int32,
'item_id': np.int32,
'rating': np.float32,
'timestamp': np.int32,
})
np_users = ratings_df.user_id.as_matrix()
np_items = ratings_df.item_id.as_matrix()
unique_users = np.unique(np_users)
unique_items = np.unique(np_items)
n_users = unique_users.shape[0]
n_items = unique_items.shape[0]
# make indexes for users and items if necessary
max_user = unique_users[-1]
max_item = unique_items[-1]
if n_users != max_user or n_items != max_item:
# make an array of 0-indexed unique user ids corresponding to the dataset
# stack of user ids
z = np.zeros(max_user+1, dtype=int)
z[unique_users] = np.arange(n_users)
u_r = z[np_users]
# make an array of 0-indexed unique item ids corresponding to the dataset
# stack of item ids
z = np.zeros(max_item+1, dtype=int)
z[unique_items] = np.arange(n_items)
i_r = z[np_items]
# construct the ratings set from the three stacks
np_ratings = ratings_df.rating.as_matrix()
ratings = np.zeros((np_ratings.shape[0], 3), dtype=object)
ratings[:, 0] = u_r
ratings[:, 1] = i_r
ratings[:, 2] = np_ratings
else:
ratings = ratings_df.as_matrix(['user_id', 'item_id', 'rating'])
# deal with 1-based user indices
ratings[:, 0] -= 1
ratings[:, 1] -= 1
tr_sparse, test_sparse = _create_sparse_train_and_test(ratings,
n_users, n_items)
return ratings[:, 0], ratings[:, 1], tr_sparse, test_sparse
def _page_views_train_and_test(input_file):
"""Load page views dataset, and create train and set sparse matrices.
Assumes 'clientId', 'contentId', and 'timeOnPage' columns.
Args:
input_file: path to csv data file
Returns:
array of user IDs for each row of the ratings matrix
array of item IDs for each column of the rating matrix
sparse coo_matrix for training
sparse coo_matrix for test
"""
views_df = pd.read_csv(input_file, sep=',', header=0)
df_items = pd.DataFrame({'contentId': views_df.contentId.unique()})
df_sorted_items = df_items.sort_values('contentId').reset_index()
pds_items = df_sorted_items.contentId
# preprocess data. df.groupby.agg sorts clientId and contentId
df_user_items = views_df.groupby(['clientId', 'contentId']
).agg({'timeOnPage': 'sum'})
# create a list of (userId, itemId, timeOnPage) ratings, where userId and
# clientId are 0-indexed
current_u = -1
ux = -1
pv_ratings = []
user_ux = []
for timeonpg in df_user_items.itertuples():
user = timeonpg[0][0]
item = timeonpg[0][1]
# as we go, build a (sorted) list of user ids
if user != current_u:
user_ux.append(user)
ux += 1
current_u = user
# this search makes the preprocessing time O(r * i log(i)),
# r = # ratings, i = # items
ix = pds_items.searchsorted(item)[0]
pv_ratings.append((ux, ix, timeonpg[1]))
# convert ratings list and user map to np array
pv_ratings = np.asarray(pv_ratings)
user_ux = np.asarray(user_ux)
# create train and test sets
tr_sparse, test_sparse = _create_sparse_train_and_test(pv_ratings,
ux + 1,
df_items.size)
return user_ux, pds_items.as_matrix(), tr_sparse, test_sparse
def _create_sparse_train_and_test(ratings, n_users, n_items):
"""Given ratings, create sparse matrices for train and test sets.
Args:
ratings: list of ratings tuples (u, i, r)
n_users: number of users
n_items: number of items
Returns:
train, test sparse matrices in scipy coo_matrix format.
"""
# pick a random test set of entries, sorted ascending
test_set_size = len(ratings) / TEST_SET_RATIO
test_set_idx = np.random.choice(xrange(len(ratings)),
size=test_set_size, replace=False)
test_set_idx = sorted(test_set_idx)
# sift ratings into train and test sets
ts_ratings = ratings[test_set_idx]
tr_ratings = np.delete(ratings, test_set_idx, axis=0)
# create training and test matrices as coo_matrix's
u_tr, i_tr, r_tr = zip(*tr_ratings)
tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))
u_ts, i_ts, r_ts = zip(*ts_ratings)
test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))
return tr_sparse, test_sparse
def train_model(args, tr_sparse):
"""Instantiate WALS model and use "simple_train" to factorize the matrix.
Args:
args: training args containing hyperparams
tr_sparse: sparse training matrix
Returns:
the row and column factors in numpy format.
"""
dim = args['latent_factors']
num_iters = args['num_iters']
reg = args['regularization']
unobs = args['unobs_weight']
wt_type = args['wt_type']
feature_wt_exp = args['feature_wt_exp']
obs_wt = args['feature_wt_factor']
tf.logging.info('Train Start: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
# generate model
input_tensor, row_factor, col_factor, model = wals.wals_model(tr_sparse,
dim,
reg,
unobs,
args['weights'],
wt_type,
feature_wt_exp,
obs_wt)
# factorize matrix
session = wals.simple_train(model, input_tensor, num_iters)
tf.logging.info('Train Finish: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
# evaluate output factor matrices
output_row = row_factor.eval(session=session)
output_col = col_factor.eval(session=session)
# close the training session now that we've evaluated the output
session.close()
return output_row, output_col
def save_model(args, user_map, item_map, row_factor, col_factor):
"""Save the user map, item map, row factor and column factor matrices in numpy format.
These matrices together constitute the "recommendation model."
Args:
args: input args to training job
user_map: user map numpy array
item_map: item map numpy array
row_factor: row_factor numpy array
col_factor: col_factor numpy array
"""
model_dir = os.path.join(args['output_dir'], 'model')
# if our output directory is a GCS bucket, write model files to /tmp,
# then copy to GCS
gs_model_dir = None
if model_dir.startswith('gs://'):
gs_model_dir = model_dir
model_dir = '/tmp/{0}'.format(args['job_name'])
os.makedirs(model_dir)
np.save(os.path.join(model_dir, 'user'), user_map)
np.save(os.path.join(model_dir, 'item'), item_map)
np.save(os.path.join(model_dir, 'row'), row_factor)
np.save(os.path.join(model_dir, 'col'), col_factor)
if gs_model_dir:
sh.gsutil('cp', '-r', os.path.join(model_dir, '*'), gs_model_dir)
def generate_recommendations(user_idx, user_rated, row_factor, col_factor, k):
"""Generate recommendations for a user.
Args:
user_idx: the row index of the user in the ratings matrix,
user_rated: the list of item indexes (column indexes in the ratings matrix)
previously rated by that user (which will be excluded from the
recommendations)
row_factor: the row factors of the recommendation model
col_factor: the column factors of the recommendation model
k: number of recommendations requested
Returns:
list of k item indexes with the predicted highest rating, excluding
those that the user has already rated
"""
# bounds checking for args
assert (row_factor.shape[0] - len(user_rated)) >= k
# retrieve user factor
user_f = row_factor[user_idx]
# dot product of item factors with user factor gives predicted ratings
pred_ratings = col_factor.dot(user_f)
# find candidate recommended item indexes sorted by predicted rating
k_r = k + len(user_rated)
candidate_items = np.argsort(pred_ratings)[-k_r:]
# remove previously rated items and take top k
recommended_items = [i for i in candidate_items if i not in user_rated]
recommended_items = recommended_items[-k:]
# flip to sort highest rated first
recommended_items.reverse()
return recommended_items
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dsnparse
import logging
from datetime import datetime
from pymongo import DESCENDING
from motor.motor_asyncio import AsyncIOMotorClient
from bson.objectid import ObjectId
from livebridge.storages.base import BaseStorage
logger = logging.getLogger(__name__)
class MongoStorage(BaseStorage):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(MongoStorage, cls).__new__(cls)
logger.debug("MongoDB client: {}".format(cls._instance))
return cls._instance
def __init__(self, **kwargs):
self.dsn = kwargs.get("dsn", None)
self.table_name = kwargs.get("table_name")
self.control_table_name = kwargs.get("control_table_name")
# get db name
info = dsnparse.parse(self.dsn)
self.db_name = info.paths.pop() if len(info.paths) == 1 else ""
assert self.db_name != "", "No database name provided in DSN connection string"
@property
async def db(self):
if hasattr(self, "_db") and self._db:
return self._db
logger.debug("Connecting to {}".format(self.dsn))
client = AsyncIOMotorClient(self.dsn)
self._db = client[self.db_name]
return self._db
async def setup(self):
"""Setting up MongoDB collections, if they not exist."""
try:
db = await self.db
collections = await db.list_collection_names()
created = False
if self.table_name not in collections:
# create table
logger.info("Creating MongoDB collection [{}]".format(self.table_name))
await db.create_collection(self.table_name)
await db[self.table_name].create_index([("target_id", DESCENDING), ("post_id", DESCENDING)])
created = True
# create control collection if not already created.
if self.control_table_name and self.control_table_name not in collections:
# create table
logger.info("Creating MongoDB control data collection [{}]".format(self.control_table_name))
await db.create_collection(self.control_table_name)
created = True
return created
except Exception as exc:
logger.error("[DB] Error when setting up MongoDB collections: {}".format(exc))
return False
async def get_last_updated(self, source_id):
try:
coll = (await self.db)[self.table_name]
cursor = coll.find({"source_id": source_id})
cursor.sort("updated", -1).limit(1)
async for doc in cursor:
if doc.get("updated"):
return doc["updated"]
except Exception as exc:
logger.error("[DB] Error when querying for last updated item on {}".format(source_id))
logger.exception(exc)
return None
async def get_known_posts(self, source_id, post_ids):
results = []
try:
object_ids = list(map(lambda x: ObjectId(x), post_ids))
coll = (await self.db)[self.table_name]
cursor = coll.find({"source_id": source_id, "_id": {"$in": object_ids}})
async for doc in cursor:
results.append(str(doc["_id"]))
except Exception as exc:
logger.error("[DB] Error when querying for posts {}".format(post_ids))
logger.exception(exc)
return results
async def get_post(self, target_id, post_id):
try:
coll = (await self.db)[self.table_name]
doc = await coll.find_one({"target_id": target_id, "post_id": post_id})
if doc:
doc["_id"] = str(doc["_id"])
return doc
except Exception as exc:
logger.error("[DB] Error when querying for a post [{}] on {}".format(post_id, target_id))
logger.error(exc)
return None
async def insert_post(self, **kwargs):
try:
target_id = kwargs.get("target_id")
doc = {
"target_id": target_id,
"post_id": str(kwargs.get("post_id")),
"source_id": kwargs.get("source_id"),
"text": kwargs.get("text") or " ",
"sticky": str(int(kwargs.get("sticky", False))),
"created": kwargs.get("created"),
"updated": kwargs.get("updated"),
"target_id": target_id,
"target_doc": kwargs.get("target_doc", "")
}
coll = (await self.db)[self.table_name]
await coll.insert_one(doc)
logger.info("[DB] Post {} {} was saved!".format(kwargs["source_id"], kwargs["post_id"]))
return True
except Exception as exc:
logger.error("[DB] Error when saving {}".format(kwargs))
logger.error(exc)
return False
async def update_post(self, **kwargs):
try:
target_id = kwargs.get("target_id")
doc = {
"target_id": target_id,
"post_id": str(kwargs.get("post_id")),
"source_id": kwargs.get("source_id"),
"text": kwargs.get("text") or " ",
"sticky": str(int(kwargs.get("sticky", 0))),
"created": kwargs.get("created"),
"updated": kwargs.get("updated"),
"target_id": target_id,
"target_doc": kwargs.get("target_doc", "")
}
coll = (await self.db)[self.table_name]
await coll.replace_one({"target_id": kwargs.get("target_id"), "post_id": kwargs.get("post_id")}, doc)
logger.info("[DB] Post {} {} was updated!".format(kwargs.get("post_id"), kwargs.get("target_id")))
return True
except Exception as exc:
logger.error("[DB] Error when updating for a post [{}] on {}".format(
kwargs.get("post_id"), kwargs.get("target_id")))
logger.error(exc)
return False
async def delete_post(self, target_id, post_id):
try:
coll = (await self.db)[self.table_name]
await coll.delete_one({"target_id": target_id, "post_id": post_id}) #, {"justOne": True})
logger.info("[DB] Post {} {} was deleted!".format(target_id, post_id))
return True
except Exception as exc:
logger.error("[DB] Error when deleting for a post [{}] on {}".format(post_id, target_id))
logger.error(exc)
return False
async def get_control(self, updated=None):
try:
query = {"type": "control"}
if updated:
query["updated"] = {"$gt": updated}
coll = (await self.db)[self.control_table_name]
doc = await coll.find_one(query)
if doc:
return doc
except Exception as exc:
logger.error("[DB] Error when querying for a control data on {}".format(self.control_table_name))
logger.error(exc)
return False
async def save_control(self, data):
try:
query = {"type": "control"}
doc = {"type": "control", "data": data, "updated": datetime.now()}
coll = (await self.db)[self.control_table_name]
res = await coll.replace_one(query, doc, upsert=True)
if res.modified_count != 1 and not res.upserted_id:
logger.error("[DB] Control data was not saved.")
else:
logger.info("[DB] Control data was saved.")
return True
except Exception as exc:
logger.error("[DB] Error when saving control data on {}".format(self.control_table_name))
logger.error(exc)
return False
|
|
#!/usr/bin/env python2
import argparse
import copy
import pyximport;
pyximport.install()
import os
import itertools
import operator
from model import DupGenome, DupChromosome, Ext
import networkx as nx
from networkx.algorithms import connected_components
import matplotlib.pyplot as plt
# TODO: this is a hack to import from other directory; should use packages
ringo_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../ringo"))
os.sys.path.insert(0, ringo_path)
import file_ops
# HELPER functions:
def vertex_name(genome, gene, copy, ext):
return "%s%s_%s%s" % (genome, gene, copy, ext)
def matching_edge_name(gene, copyA, copyB, ext):
return "x_%s,%s" % (vertex_name("A", gene, copyA, ext), vertex_name("B", gene, copyB, ext))
def balancing_edge_name(genome, gene1, copy1, ext1, gene2, copy2, ext2):
if (gene1, copy1, ext1) > (gene2, copy2, ext2):
gene1, copy1, ext1, gene2, copy2, ext2 = gene2, copy2, ext2, gene1, copy1, ext1
return "w_%s,%s" % (vertex_name(genome, gene1, copy1, ext1), vertex_name(genome, gene2, copy2, ext2))
def define_y_label(gene_count):
y_label = {}
idx = 1
for genome in ["A", "B"]:
for gene, copies in gene_count.iteritems():
for i in xrange(1, copies + 1):
y_label[vertex_name(genome, gene, i, Ext.HEAD)] = idx
idx += 1
y_label[vertex_name(genome, gene, i, Ext.TAIL)] = idx
idx += 1
return y_label
def build_extremity_order_lists(genome, gene_count):
copy_number = {gene: 1 for gene in gene_count.iterkeys()}
ext_order_list = []
for idx, chrom in enumerate(genome.chromosomes):
ext_order_list.append(build_chromosome_ext_order(copy_number, chrom))
return ext_order_list
def build_chromosome_ext_order(copy_number, chromosome):
# returns a list of tuplets (gene, copy, extremity) for the extremities of a given chromosome "chrom"
ext_list = []
for gene in chromosome.gene_order:
if gene >= 0:
orientation = [Ext.TAIL, Ext.HEAD]
else:
orientation = [Ext.HEAD, Ext.TAIL]
ext_list.extend([(abs(gene), copy_number[abs(gene)], ext) for ext in orientation])
copy_number[abs(gene)] += 1
return ext_list
def adjacency_list(genome, gene_count):
# using the tuplet list generated from "build_extremity_order_lists", outputs a
# list of pairs of tuplets, represeting the adjacencies.
ext_order_list = build_extremity_order_lists(genome, gene_count)
for ext_order in ext_order_list:
# rotate 1 to make the adjacencies:
a = iter(ext_order[1:] + ext_order[:1])
# yield
for i, j in itertools.izip(a, a):
yield i, j
def balancing_extremities(balancing, exclude=None):
if exclude is None:
exclude = set()
for gene, copies in balancing.iteritems():
for copy in copies:
if (gene, copy, Ext.HEAD) not in exclude:
yield gene, copy, Ext.HEAD
if (gene, copy, Ext.TAIL) not in exclude:
yield gene, copy, Ext.TAIL
######################################################################
# MAIN function
######################################################################
def dcj_dupindel_ilp(genome_a, genome_b, output):
# copy genomes to possibly make some changes:
genome_a = copy.deepcopy(genome_a)
genome_b = copy.deepcopy(genome_b)
max_chromosomes = max(genome_a.n_chromosomes(), genome_b.n_chromosomes())
# add capping genes:
for genome in [genome_a, genome_b]:
for c in genome.chromosomes:
if not c.circular:
c.gene_order.append(0)
c.circular = True
for i in range(genome.n_chromosomes(), max_chromosomes):
genome.add_chromosome(DupChromosome([0], circular=True))
# count of each gene on each genome
gene_count = {"A": genome_a.gene_count(), "B": genome_b.gene_count()}
# for all genes ,the total "balanced" count:
total_gene_count = {g: max(gene_count["A"][g], gene_count["B"][g]) for g in
set(gene_count["A"].keys()).union(set(gene_count["B"].keys()))}
# define the y labels -> integer 1..n
y_label = define_y_label(total_gene_count)
# list of possible edges for each vertex:
edges = {}
for gene, copies in total_gene_count.iteritems():
for i in xrange(1, copies + 1):
edges[(gene, i)] = set(range(1, copies + 1))
# try to fix variables:
# Build the BP graph of fixed elements to try to find more variables to fix:
master_graph = nx.Graph()
# fixed vars:
y_fix = {}
z_fix = {}
balancing_fix = {"A": {}, "B": {}}
# add matching edges of genes with single copy:
for (gene, copy_a), set_y in edges.iteritems():
if len(set_y) == 1:
copy_b = list(set_y)[0]
for ext in [Ext.HEAD, Ext.TAIL]:
master_graph.add_edge(("A", gene, copy_a, ext), ("B", gene, copy_b, ext))
# add adjacency edges:
for genome, genome_name in [(genome_a, "A"), (genome_b, "B")]:
for (g_i, copy_a, e_i), (g_j, copy_b, e_j) in adjacency_list(genome, total_gene_count):
master_graph.add_edge((genome_name, g_i, copy_a, e_i), (genome_name, g_j, copy_b, e_j))
# Search components to fix:
rescan = True
edges_to_add = []
vertices_to_remove = []
while rescan:
rescan = False
master_graph.add_edges_from(edges_to_add)
master_graph.remove_nodes_from(vertices_to_remove)
edges_to_add = []
vertices_to_remove = []
# check each connected component:
for comp in connected_components(master_graph):
# get degree-1 vertices:
degree_one = [v for v in comp if master_graph.degree(v) == 1]
# if two degree one vertices, it is a path;
if len(degree_one) == 2:
genome_i, g_i, copy_a, e_i = degree_one[0]
genome_j, g_j, copy_b, e_j = degree_one[1]
# 1 - check if nodes are balancing, to find AA-, BB- and AB- paths that can be fixed.
i_is_balancing = g_i != 0 and copy_a > gene_count[genome_i][g_i]
j_is_balancing = g_j != 0 and copy_b > gene_count[genome_j][g_j]
if i_is_balancing and j_is_balancing:
if genome_i == genome_j: # AA- or BB-path, close it
balancing_fix[genome_i][degree_one[0][1:]] = degree_one[1][1:]
balancing_fix[genome_i][degree_one[1][1:]] = degree_one[0][1:]
degree_one = []
else:
# TODO: deal with AB-components;
pass
# if the path has homologous genes at the ends, I can join:
elif genome_i != genome_j and g_i == g_j:
# invert to put genome A always in variables _i :
if genome_j == "A":
genome_i, g_i, copy_a, e_i, genome_j, g_j, copy_b, e_j = genome_j, g_j, copy_b, e_j, genome_i, g_i, copy_a, e_i
# check conflict, only add edge if ok:
if copy_b in edges[(g_i, copy_a)]:
edges[(g_i, copy_a)] = {copy_b}
# save edges to add to graph:
for ext in [Ext.HEAD, Ext.TAIL]:
edges_to_add.append((("A", g_i, copy_a, ext), ("B", g_i, copy_b, ext)))
# new edges, re-scan:
rescan = True
# remove possible edges from other copies:
for idx in xrange(1, total_gene_count[g_i] + 1):
if idx == copy_a:
continue
try:
# if not there already, exception is thrown, that' ok
edges[(g_i, idx)].remove(copy_b)
# Add new edges to graph, if the removal created degree 1 vertices:
if len(edges[(g_i, idx)]) == 1:
idx_c = list(edges[(g_i, idx)])[0]
for ext in [Ext.HEAD, Ext.TAIL]:
edges_to_add.append((("A", g_i, idx, ext), ("B", g_i, idx_c, ext)))
except KeyError:
pass
# if no degree one vertices, it is a cycle, I can fix the y_i:
elif len(degree_one) == 0:
# get indexes of the y_i:
indexes = [(v, y_label[vertex_name(*v)]) for v in comp]
min_label = min([x[1] for x in indexes])
for v, label in indexes:
y_fix[label] = min_label
z_fix[label] = 0
z_fix[min_label] = 1
vertices_to_remove.extend(comp)
# DRAW?
# nx.draw_circular(master_graph, font_size=8, width=0.5, node_shape="8", node_size=1, with_labels=True)
# nx.draw_spring(master_graph, font_size=8, width=0.5, node_shape="8", node_size=20, with_labels=True)
# nx.draw_spectral(master_graph, font_size=8, width=0.5, node_shape="8", node_size=20, with_labels=True)
# nx.draw_graphviz(master_graph, font_size=8, width=0.5, node_shape="8", node_size=20, with_labels=True)
# plt.savefig('graph.pdf', bbox_inches='tight')
# all fixed, generate ILP:
constraints = []
# consistency and matching 1-to-1
constraints.append("\ Matching and consistency constraints")
# sorting just to make it nicer looking:
for (gene, copy_a) in sorted(edges):
copy_set_b = edges[(gene, copy_a)]
if len(copy_set_b) > 1:
for copy_b in copy_set_b:
constraints.append("%s - %s = 0" % (
matching_edge_name(gene, copy_a, copy_b, Ext.TAIL),
matching_edge_name(gene, copy_a, copy_b, Ext.HEAD)))
constraints.append(
" + ".join([matching_edge_name(gene, copy_a, copy_b, Ext.TAIL) for copy_b in copy_set_b]) + " = 1")
constraints.append("\ Balancing:")
balancing_genes_A = {g: range(gene_count["A"][g] + 1, gene_count["B"][g] + 1) for g in total_gene_count.iterkeys()
if gene_count["A"][g] < gene_count["B"][g]}
balancing_genes_B = {g: range(gene_count["B"][g] + 1, gene_count["A"][g] + 1) for g in total_gene_count.iterkeys()
if gene_count["B"][g] < gene_count["A"][g]}
for genome, balancing in [("A", balancing_genes_A), ("B", balancing_genes_B)]:
constraints.append("\ Genome %s" % genome)
for gene_i, copy_i, ext_i in balancing_extremities(balancing):
# check if fixed:
if (gene_i, copy_i, ext_i) in balancing_fix[genome]:
gene_j, copy_j, ext_j = balancing_fix[genome][(gene_i, copy_i, ext_i)]
if (gene_i, copy_i, ext_i) < (gene_j, copy_j, ext_j):
constraints.append(
"%s = 1" % balancing_edge_name(genome, gene_i, copy_i, ext_i, gene_j, copy_j, ext_j))
# if not, matching 1-to-1:
else:
constraints.append(
" + ".join([balancing_edge_name(genome, gene_i, copy_i, ext_i, gene_j, copy_j, ext_j) for
gene_j, copy_j, ext_j in
balancing_extremities(balancing, exclude=balancing_fix[genome].keys()) if
(gene_i, copy_i, ext_i) != (gene_j, copy_j, ext_j)]) + " = 1")
constraints.append("\ Labelling")
#
# for each adjacency, fix label:
constraints.append("\\ Adjacency have the same label:")
for genome, genome_name in [(genome_a, "A"), (genome_b, "B")]:
for i, j in adjacency_list(genome, total_gene_count):
v_i = vertex_name(genome_name, *i)
v_j = vertex_name(genome_name, *j)
# if already fixed, skip
if y_label[v_i] in y_fix and y_label[v_j] in y_fix:
continue
# if the edge is 0 for sure, also skip:
constraints.append("y_%s - y_%s = 0 \\ %s <-> %s " % (y_label[v_i], y_label[v_j], v_i, v_j))
#
constraints.append("\\ Matching edges with the same label:")
for (gene, copy_a) in sorted(edges):
copy_set_b = edges[(gene, copy_a)]
for ext in [Ext.HEAD, Ext.TAIL]:
y_i = y_label[vertex_name("A", gene, copy_a, ext)]
# if edge is set, just make the y_i's equal;
if len(copy_set_b) == 1:
y_j = y_label[vertex_name("B", gene, list(copy_set_b)[0], ext)]
# skip if this y_i's are already fixed
if y_i in y_fix and y_j in y_fix:
continue
constraints.append("y_%s - y_%s = 0 " % (y_i, y_j))
else:
# if edge not set, add both ineqs.
for copy_b in copy_set_b:
y_j = y_label[vertex_name("B", gene, copy_b, ext)]
constraints.append(
"y_%s - y_%s + %s %s <= %d" % (
y_i, y_j, y_i, matching_edge_name(gene, copy_a, copy_b, ext), y_i))
constraints.append(
"y_%s - y_%s + %s %s <= %d" % (
y_j, y_i, y_j, matching_edge_name(gene, copy_a, copy_b, ext), y_j))
constraints.append("\\ Balancing edges with same label:")
for genome, balancing in [("A", balancing_genes_A), ("B", balancing_genes_B)]:
constraints.append("\\ Genome %s" % genome)
for gene_i, copy_i, ext_i in balancing_extremities(balancing, exclude=balancing_fix[genome].keys()):
for gene_j, copy_j, ext_j in balancing_extremities(balancing, exclude=balancing_fix[genome].keys()):
if (gene_i, copy_i, ext_i) >= (gene_j, copy_j, ext_j):
continue
y_i = y_label[vertex_name(genome, gene_i, copy_i, ext_i)]
y_j = y_label[vertex_name(genome, gene_j, copy_j, ext_j)]
# should not have someone here if I'm excluding fixed edges:
if y_i in y_fix and y_j in y_fix:
continue
constraints.append("y_%s - y_%s + %s %s <= %d" % (
y_i, y_j, y_i, balancing_edge_name(genome, gene_i, copy_i, ext_i, gene_j, copy_j, ext_j), y_i))
constraints.append("y_%s - y_%s + %s %s <= %d" % (
y_j, y_i, y_j, balancing_edge_name(genome, gene_i, copy_i, ext_i, gene_j, copy_j, ext_j), y_j))
# z variables: since all cycles have to contains vertices from both genomes, we only add z variables
# for genome A, that have smallest labels, so a genome B z variable will never be =1.
constraints.append("\\ Z variables")
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if vertex[0] == "A":
# if i in z_fix and z_fix[i] == 0:
# continue
# if i in z_fix and z_fix[i] == 1:
# constraints.append("z_%s = 1" % i)
if i not in z_fix:
constraints.append("%d z_%s - y_%s <= 0" % (i, i, i))
#
# # number of genes, to fix distance:
constraints.append("n = %d" % (sum(total_gene_count.itervalues())))
# # number of fixed cycles
constraints.append("c = %d" % (sum(z_fix.itervalues())))
# for g in sorted(total_gene_count):
# print g,total_gene_count[g]
#
# # bounds:
bounds = []
for i in sorted(y_label.itervalues()):
if i not in y_fix:
bounds.append("y_%d <= %d" % (i, i))
#
# # variables:
binary = []
#
# # matching edges
# matching edges, skipping fixed pairs.
matching = ["\ match"]
# for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
for (gene, copy_a), copy_set_b in sorted(edges.items(), key=operator.itemgetter(0)):
if len(copy_set_b) > 1:
for copy_b in copy_set_b:
for ext in [Ext.HEAD, Ext.TAIL]:
matching.append(matching_edge_name(gene, copy_a, copy_b, ext))
print "%d matching edges" % len(matching)
# print "Potentially %d matching edges" % sum([2*x ** 2 for x in gene_count.itervalues()])
binary.extend(matching)
#
# balancing edges:
balancing_edges = [balancing_edge_name(genome, gene_i, copy_i, ext_i, gene_j, copy_j, ext_j) for genome, balancing
in [("A", balancing_genes_A), ("B", balancing_genes_B)] for gene_i, copy_i, ext_i in
balancing_extremities(balancing, exclude=balancing_fix[genome].keys()) for gene_j, copy_j, ext_j
in balancing_extremities(balancing, exclude=balancing_fix[genome].keys()) if
(gene_i, copy_i, ext_i) < (gene_j, copy_j, ext_j)]
print "%d balancing edges" % len(balancing_edges)
binary.extend(balancing_edges)
#
# z cycles:
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if i in z_fix: # and z_fix[i] == 0:
continue
if vertex[0] == "B":
continue
binary.append("z_%d" % i)
#
# # Y label are general:
# TODO: remove unused y' and z's from model. If y=1, it can be removed, just set z=1.
general = []
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if i not in y_fix:
general.append("y_%d" % i)
#
# # number of genes and fixed cycles:
general.append("n")
general.append("c")
# # objective function:
objective = ["obj: n - c - " + " - ".join(
["z_%d" % i for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)) if
vertex[0] == "A" and i not in z_fix])]
# write:
with open(output, "w") as f:
for header, lines in [("Minimize", objective), ("Subject to", constraints),
("Bounds", bounds), ("Binary", binary), ("General", general)]:
print >> f, header
print >> f, "\n".join(lines)
def solve_ilp(filename, timelimit=60):
# import here, so only if actually solving we will need gurobi.
from gurobipy import read, GRB
# pycharm complains of gurobi commands, cannot see them from the import
model = read(filename)
# set some options:
# time limit in seconds:
model.params.timeLimit = timelimit
# not verbose:
# model.setParam('OutputFlag', False)
# MIP focus, from 0 to 3:
model.params.MIPFocus = 1 # best solutions, less focus on bounds.
model.optimize()
if model.status != GRB.Status.INFEASIBLE:
print('FINISHED: Best objective: %g' % model.objVal)
print('Optimization ended with status %d' % model.status)
model.write(filename + '.sol')
if model.status == GRB.INFEASIBLE:
model.computeIIS()
model.write("unfeasible.lp")
print('\nThe following constraint(s) cannot be satisfied:')
for c in model.getConstrs():
if c.IISConstr:
print('%s' % c.constrName)
else:
z = 0
n = 0
for v in model.getVars():
if v.varName == "n":
n = v.x
if v.varName.startswith("z") and v.x == 1:
z += 1
print "N: %d cycles:%d" % (n, z)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Generates and optionally solve an ILP for the DCJ duplication and indel distance.")
parser.add_argument("-s", "--solve", action="store_true", default=False, help="Solve the model with Gurobi.")
parser.add_argument("file", type=str, help="Genomes file.")
parser.add_argument("g1", type=int, help="Index of genome 1 in the file. (0-indexed).")
parser.add_argument("g2", type=int, help="Index of genome 1 in the file. (0-indexed).")
param = parser.parse_args()
genomes = file_ops.open_genome_file(param.file, as_list=True)
filename = "%s_%d_%d.lp" % (os.path.basename(param.file), param.g1, param.g2)
dcj_dupindel_ilp(genomes[param.g1], genomes[param.g2], filename)
if param.solve:
model = solve_ilp(filename)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._availability_sets_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_available_sizes_request, build_list_by_subscription_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailabilitySetsOperations:
"""AvailabilitySetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySet",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.AvailabilitySet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySet')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySetUpdate",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.AvailabilitySetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySetUpdate')
request = build_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> None:
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_03_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_03_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'} # type: ignore
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/lsp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines LSP PDU counters.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"lsp",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/lsp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines LSP PDU counters.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"lsp",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/lsp/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of methods for running perf tests on the legacy browser."""
import collections
import fnmatch
import optparse
import pexpect
import random
import re
import sys
import time
import urlparse
import android_commands
from base_test_runner import BaseTestRunner
from perf_tests_helper import PrintPerfResult
from run_tests_helper import *
# Match logcat output that corresponds to console.log() in JavaScript.
LEGACY_BROWSER_CONSOLE_FORMAT_RE = '.*Console: %s: ([^\s]+).*'
CHROME_CONSOLE_FORMAT_RE = '.*INFO:CONSOLE.*"%s: ([^\s"]+)".*'
# Identify browser crashes in logcat.
ACTIVITY_CRASH_RE = 'ActivityManager: Process %s \(pid \d+\) has died.'
# Log marker controlling monitor of page flip count of Surface.
SURFACE_FPS_MONITOR_START = 'startSurfaceFpsMonitor'
SURFACE_FPS_MONITOR_STOP = 'stopSurfaceFpsMonitor'
class PerfTestRunner(BaseTestRunner):
"""Class for running performance tests.
Args:
device: Tests will run on the device of this ID.
"""
TARGET_TRACE_FILE = '/sdcard/prof.dat'
def __init__(self, device):
BaseTestRunner.__init__(self, device, 0)
self.trace = None
@classmethod
def GetAllTests(cls, test_filter=None):
"""Returns a list of all tests available in the test suite."""
all_tests = [f for f in dir(cls) if f.endswith('Benchmark')]
if not test_filter:
return all_tests
re_filter = None
try:
re_filter = re.compile(test_filter)
except re.exception as e:
print 'Bad filter: ', e
return None
return [t for t in all_tests if re_filter.match(t)]
@staticmethod
def OutputFailure(msg):
print msg
print '[ FAILED ]'
def _SetupBrowserPreferences(self, package):
"""Sets up the browser's preferences for perf testing.
This includes suppressing the "restore tabs" prompt and allowing replay of
SSL content with WPR.
"""
# Only necessary on legacy browser, as Chrome uses command line flags.
if package != LEGACY_BROWSER_PACKAGE:
return
# After force-stopping the android browser, it will display a "restore tabs"
# prompt any time it is opened over the next 30 minutes. Since we need the
# tabs to be restored but don't want a prompt, we set this pref to make
# the browser think it crashed longer than 30 minutes ago.
self.adb.SetFileContents(
'/data/data/%s/shared_prefs/browser_recovery_prefs.xml' % package,
"""<?xml version="1.0" encoding="utf-8" standalone="yes" ?>
<map>
<long name="last_recovered" value="0" />
</map>
""")
# Avoid security prompts to allow WPR to serve SSL content.
self.adb.SetFileContents(
'/data/data/%s/shared_prefs/%s_preferences.xml' % (package, package),
"""<?xml version="1.0" encoding="utf-8" standalone="yes" ?>
<map>
<boolean name="show_security_warnings" value="false" />
</map>
""")
def WaitForLogMatchOrPackageCrash(self, success_re, package, crash_msg):
"""Blocks until a matching line is logged, package crash or timeout.
Args:
success_re: A compiled re to search each line for.
package: Package to monitor for crash.
msg: Additional message to be output upon crash
Raises:
pexpect.TIMEOUT upon the timeout specified by StartMonitoringLogcat().
Returns:
The re match object if |success_re| is matched first or None if crashed.
"""
error_re = re.compile(ACTIVITY_CRASH_RE % re.escape(package))
m = self.adb.WaitForLogMatch(success_re, error_re)
if m:
return m
# TODO(tonyg): Dump crash stack here (b/5915899).
PerfTestRunner.OutputFailure(
'%s CRASHED while waiting for %s' % (package, crash_msg))
return None
def RunChromeTestLauncherPerfTest(self, url, perf_test_param, test_activity,
expected_results, trace_tag='', timeout=30):
"""Runs a JavaScript based performance test on Chrome's TestLauncher.
The results are printed to the console in a format suitable for the perfbot.
Args:
url: The URL of the JavaScript performance test. The caller is responsible
for ensuring this URL is accessible on the phone (either by copying
locally or starting an HTTP server + forwarder).
perf_test_param: A param to be used by this test (such as fling speed,
zoom distance, etc.).
test_activity: Name of the test activity.
expected_results: A list of tuple of (log_marker, chart_name, trace_name,
units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunPerfTest(
CHROME_PACKAGE, '%s.%s' % (CHROME_TESTS_PACKAGE, test_activity),
'.*ChromeTest.*%s: ([^\s]+)$', url, expected_results,
trace_tag=trace_tag, browser_extras={'speed': perf_test_param},
timeout=timeout) != None
def RunChromePerfTest(self, url, expected_results, trace_tag='', timeout=30):
"""Runs a JavaScript based performance test on Chrome.
The results are printed to the console in a format suitable for the perfbot.
Args:
url: The URL of the JavaScript performance test. The caller is responsible
for ensuring this URL is accessible on the phone (either by copying
locally or starting an HTTP server + forwarder).
expected_results: A list of tuple of (log_marker, chart_name, trace_name,
units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunPerfTest(
CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,
url, expected_results, trace_tag=trace_tag, timeout=timeout) != None
def RunChromePerfTestResults(self, url, expected_results, trace_tag='', timeout=30):
"""Runs a JavaScript based performance test on Chrome.
Same as RunChromePerfTest, except that this returns a list of results
in case of success, or None on failure.
Args:
url: The URL of the JavaScript performance test. The caller is responsible
for ensuring this URL is accessible on the phone (either by copying
locally or starting an HTTP server + forwarder).
expected_results: A list of tuple of (log_marker, chart_name, trace_name,
units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunPerfTest(
CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,
url, expected_results, trace_tag=trace_tag, timeout=timeout)
def RunChromeUrlCyclerPerfTest(self, urls, expected_results, trace_tag='',
timeout=30):
"""Runs a page loading performance test on Chrome.
The results are printed to the console in a format suitable for the perfbot.
Args:
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of tuple of (log_marker, units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunUrlCyclerPerfTest(
CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,
urls, expected_results, trace_tag=trace_tag, timeout=timeout)
def RunChromeBackgroundMemoryPerfTest(self, urls, expected_results,
trace_tag='', timeout=60):
"""Measure memory usage while Chrome has the given URLs open but is hidden.
The results are printed to the console in a format suitable for the perfbot.
Args:
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of log marker strings.
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunBackgroundMemoryPerfTest(
CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,
urls, expected_results, trace_tag=trace_tag, timeout=timeout)
def RunLegacyBrowserBackgroundMemoryPerfTest(self, urls, expected_results,
trace_tag='', timeout=60):
"""Measure memory usage while Browser has the given URLs open but is hidden.
The results are printed to the console in a format suitable for the perfbot.
Args:
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of log marker strings.
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunBackgroundMemoryPerfTest(
LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,
LEGACY_BROWSER_CONSOLE_FORMAT_RE, urls, expected_results,
trace_tag=trace_tag, timeout=timeout)
def RunLegacyBrowserUrlCyclerPerfTest(self, urls, expected_results,
trace_tag='', timeout=30):
"""Runs a page loading performance test on the legacy browser.
The results are printed to the console in a format suitable for the perfbot.
Args:
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of tuple of (log_marker, units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunUrlCyclerPerfTest(
LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,
LEGACY_BROWSER_CONSOLE_FORMAT_RE, urls, expected_results,
trace_tag=trace_tag, timeout=timeout)
def RunLegacyBrowserPerfTest(self, url, expected_results, trace_tag='',
timeout=30):
"""Runs a JavaScript based performance test on the legacy Android browser.
The results are printed to the console in a format suitable for the perfbot.
Args:
url: The URL of the JavaScript performance test. The caller is responsible
for ensuring this URL is accessible on the phone (either by copying
locally or starting an HTTP server + forwarder).
expected_results: A list of tuple of (log_marker, chart_name, trace_name,
units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
return self._RunPerfTest(
LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,
LEGACY_BROWSER_CONSOLE_FORMAT_RE, url, expected_results,
trace_tag=trace_tag, timeout=timeout) != None
def StartupBrowser(self, browser_package, browser_activity, token=None,
action='android.intent.action.VIEW', url=None,
browser_extras=None):
"""Starts the given browser to url.
Args:
browser_package: The package of the browser to start (e.g.
'com.google.android.apps.chrome').
browser_activity: The activity of the browser to start (e.g. 'Main').
token: A unique token to identify this load.
action: The action to pass to the browser Intent.
url: The URL to start in the browser, may be empty.
browser_extras: Extra data to pass to the browser Intent.
Returns:
The time at which the intent to start the browser was sent.
"""
benchmark_url = None
log_match_url_re = ''
if url:
benchmark_url = url
if token:
benchmark_url += '#' + token
# We did not match the token in here because now Android does NOT output
# the fragment or query params into log due to privacy concern, or output
# them in different encoding. But you can still check it in console log
# if you want to identify the specified load. We need to remove any
# fragment or query params from the original url.
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(benchmark_url)
log_match_url = urlparse.urlunsplit((scheme, netloc, path, '', ''))
# Android may log URLs unescaped, so we must tolerate the mismatches.
log_match_url_re = re.sub(
r'(%[0-9A-Fa-f][0-9A-Fa-f])', r'(\1|.)', re.escape(log_match_url))
activity_started_re = re.compile(
'.*ActivityManager: START.*%s.*%s.*' % (
log_match_url_re, re.escape(browser_package)))
self.adb.StartActivity(browser_package, browser_activity, action=action,
data=benchmark_url, extras=browser_extras,
trace_file_name=PerfTestRunner.TARGET_TRACE_FILE
if self.trace else None)
m = self.WaitForLogMatchOrPackageCrash(
activity_started_re, browser_package, url)
assert m
start_line = m.group(0)
print 'Starting %s...' % browser_package
return android_commands.GetLogTimestamp(start_line)
def CollectTraceIfNeeded(self, host_file_name, delay_until_trace_ready):
"""Collect both traceview and chrome trace files from the device."""
if not self.trace:
return
host_file_name = os.path.join(CHROME_DIR, host_file_name.replace('/', '_'))
print 'Waiting for tracing to complete...'
time.sleep(max(delay_until_trace_ready, 2.5))
print 'Collecting traceview file: %s' % host_file_name
host_trace_view = host_file_name + '.traceview'
for i in xrange(sys.maxint):
if not os.path.exists(host_trace_view):
break
host_trace_view = host_file_name +'_' + str(i) + '.traceview'
self.adb.Adb().Pull(PerfTestRunner.TARGET_TRACE_FILE, host_trace_view)
CHROME_TRACE_DIRECTORY = '/sdcard/Download/'
CHROME_TRACE_FILE_PATTERN = 'chrome-profile-results-*'
device_contents = self.adb.ListPathContents(CHROME_TRACE_DIRECTORY)
for device_content in device_contents:
if fnmatch.fnmatch(device_content, CHROME_TRACE_FILE_PATTERN):
print 'Collecting chrome_trace: %s' % device_content
device_content = os.path.join(CHROME_TRACE_DIRECTORY, device_content)
trace_event_name = os.path.join(CHROME_DIR,
host_file_name + '_' +
os.path.basename(device_content) +
'.chrometrace')
self.adb.Adb().Pull(device_content, trace_event_name)
self.adb.RunShellCommand('rm %s' % device_content)
def _RunUrlCyclerPerfTest(self, browser_package, browser_activity,
browser_console_log_re, urls, expected_results,
trace_tag='', timeout=30):
"""Runs a JavaScript based performance test on a list of cycling URLs.
The results are printed to the console in a format suitable for the perfbot.
Args:
browser_package: The package of the browser to start (e.g.
'com.google.android.apps.chrome').
browser_activity: The activity of the browser to start (e.g. 'Main').
browser_console_log_re: Regular expression string which identifies
console.log output in adb logcat. Must contain a %s placeholder for
the log_marker.
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of tuple of (log_marker, units).
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
self.adb.StartMonitoringLogcat(timeout=timeout)
self.adb.ClearApplicationState(browser_package)
self.StartupBrowser(browser_package, browser_activity, url=urls[0])
results = collections.defaultdict(list)
pss_usages = []
private_dirty_usages = []
try:
for i, url in enumerate(urls):
group_floor = 1 + 10 * (i / 10)
group_ceil = group_floor + 9
group = '%d-%d' % (group_floor, group_ceil)
display_url = url.replace('http://', '')
for log_marker, units in expected_results:
result_re_str = browser_console_log_re % (
log_marker + '_' + re.escape(url))
result_re = re.compile(result_re_str)
m = self.WaitForLogMatchOrPackageCrash(
result_re, browser_package, url)
if not m:
return False
result = m.group(1).split(',')
results[log_marker] += result
# Create chart tabs for 1-10, 11-20, etc.
# Each tab displays 10 individual page traces.
PrintPerfResult(log_marker + trace_tag + '_' + group,
log_marker + '_' + display_url,
result, units)
# Sample memory usage after each collection of expectations.
memory_usage = self.adb.GetMemoryUsage(browser_package)
pss_usages.append(memory_usage['Pss'])
private_dirty_usages.append(memory_usage['Private_Dirty'])
PrintPerfResult('pss' + trace_tag + '_' + group, 'pss_' + display_url,
[memory_usage['Pss']], 'kb')
PrintPerfResult('private_dirty' + trace_tag + '_' + group,
'private_dirty_' + display_url,
[memory_usage['Private_Dirty']], 'kb')
if 'Nvidia' in memory_usage:
PrintPerfResult('nvidia' + trace_tag + '_' + group,
'nvidia_' + display_url,
[memory_usage['Nvidia']], 'kb')
# Create a chart tabs for averages of all pages.
for log_marker, units in expected_results:
PrintPerfResult(log_marker + '_avg', log_marker + '_avg' + trace_tag,
results[log_marker], units)
PrintPerfResult('pss_avg', 'pss_avg' + trace_tag, pss_usages, 'kb')
PrintPerfResult('private_dirty_avg', 'private_dirty_avg' + trace_tag,
private_dirty_usages, 'kb')
except pexpect.TIMEOUT:
PerfTestRunner.OutputFailure(
'Timed out after %d seconds while waiting for %s' % (timeout,
result_re_str))
return False
finally:
self.adb.CloseApplication(browser_package)
return True
def _RunBackgroundMemoryPerfTest(self, browser_package, browser_activity,
browser_console_log_re, urls,
expected_results, trace_tag='', timeout=30):
"""Measure memory usage while browser has the given URLs open but is hidden.
The results are printed to the console in a format suitable for the perfbot.
Args:
browser_package: The package of the browser to start (e.g.
'com.google.android.apps.chrome').
browser_activity: The activity of the browser to start (e.g. 'Main').
browser_console_log_re: Regular expression string which identifies
console.log output in adb logcat. Must contain a %s placeholder for
the log_marker.
urls: List of URLs to load. The caller is responsible for ensuring this
URL is accessible on the phone.
expected_results: A list of log marker strings.
trace_tag: An optional tag string to append to all trace_names.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
True if the test ran successfully.
"""
self.adb.StartMonitoringLogcat(timeout=timeout)
self.adb.ClearApplicationState(browser_package)
try:
for url in urls:
self.StartupBrowser(browser_package, browser_activity, url=url,
browser_extras={'create_new_tab': True})
# Wait for results to ensure the page loaded. We don't log any values.
for expected_result in expected_results:
result_re_str = browser_console_log_re % re.escape(expected_result)
result_re = re.compile(result_re_str)
m = self.WaitForLogMatchOrPackageCrash(
result_re, browser_package, url)
if not m:
return False
self.adb.StartActivity(package=None, activity=None,
action="android.intent.action.MAIN",
category="android.intent.category.HOME",
wait_for_completion=True)
memory_usage = self.adb.GetMemoryUsage(browser_package)
PrintPerfResult('pss' + trace_tag, 'pss',
[memory_usage['Pss']], 'kb')
PrintPerfResult('private_dirty' + trace_tag, 'private_dirty',
[memory_usage['Private_Dirty']], 'kb')
if 'Nvidia' in memory_usage:
PrintPerfResult('nvidia' + trace_tag, 'nvidia',
[memory_usage['Nvidia']], 'kb')
time.sleep(5)
# TODO(tonyg, husky): factor out this common code! (across all methods)
memory_usage = self.adb.GetMemoryUsage(browser_package)
PrintPerfResult('pss' + trace_tag, 'pss_after_5_secs',
[memory_usage['Pss']], 'kb')
PrintPerfResult('private_dirty' + trace_tag, 'private_dirty_after_5_secs',
[memory_usage['Private_Dirty']], 'kb')
if 'Nvidia' in memory_usage:
PrintPerfResult('nvidia' + trace_tag, 'nvidia_after_5_secs',
[memory_usage['Nvidia']], 'kb')
except pexpect.TIMEOUT:
PerfTestRunner.OutputFailure(
'Timed out after %d seconds while waiting for page to load' % timeout)
return False
finally:
self.adb.CloseApplication(browser_package)
return True
def _RunPerfTest(self, browser_package, browser_activity,
browser_console_log_re, url, expected_results,
trace_tag='', browser_extras=None, timeout=30):
"""Runs a JavaScript based performance test.
The results are printed to the console in a format suitable for the perfbot.
Args:
browser_package: The package of the browser to start (e.g.
'com.google.android.apps.chrome').
browser_activity: The activity of the browser to start (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
browser_console_log_re: Regular expression string which identifies
console.log output in adb logcat. Must contain a %s placeholder for
the log_marker.
url: The URL of the JavaScript performance test. The caller is responsible
for ensuring this URL is accessible on the phone (either by copying
locally or starting an HTTP server + forwarder).
expected_results: A list of tuple of (log_marker, chart_name, trace_name,
units). log_marker is usually shown as the graph name, with the
exception of SURFACE_FPS_MONITOR_START/STOP which are used to control
the monitor of page flip count of Surface.
trace_tag: An optional tag string to append to all trace_names.
browser_extras: Extra data to pass to the browser Intent.
timeout: The browser is killed after this many seconds of inactivity.
Returns:
List of results if test ran successfully. None upon failure.
"""
self.adb.StartMonitoringLogcat(timeout=timeout)
self.adb.ClearApplicationState(browser_package)
error_re = re.compile(ACTIVITY_CRASH_RE % re.escape(browser_package))
results = []
io_stats_before = self.adb.GetIoStats()
self.StartupBrowser(browser_package, browser_activity,
token=str(random.randint(100000, 999999)), url=url,
browser_extras=browser_extras)
try:
for log_marker, chart_name, trace_name, units in expected_results:
result_re_str = browser_console_log_re % re.escape(log_marker)
result_re = re.compile(result_re_str)
m = self.WaitForLogMatchOrPackageCrash(result_re, browser_package, url)
if not m:
return None
# For certain tests, the result is a list enclosed in braces, as in:
# '{3.134553, 40389443}'; remove these if we find them before
# splitting, otherwise we'll get an error when converting result[0]
# to a float below. Same for angle brackets which also happen.
result = m.group(1)
if len(result) > 2:
if result[0] == '{' and result[-1] == '}':
result = result[1:-1]
elif result[0] == '[' and result[-1] == ']':
result = result[1:-1]
result = result.split(',')
results.append(float(result[0]))
if log_marker == SURFACE_FPS_MONITOR_START:
surface_before = self.adb.GetSurfaceStats()
elif log_marker == SURFACE_FPS_MONITOR_STOP:
surface_after = self.adb.GetSurfaceStats()
td = surface_after['timestamp'] - surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
print 'SurfaceMonitorTime: %fsecs' % seconds
surface_fps = (surface_after['page_flip_count'] -
surface_before['page_flip_count']) / seconds
PrintPerfResult('avg_surface_fps', 'avg_surface_fps' + trace_tag,
[int(round(surface_fps))], 'fps')
else:
PrintPerfResult(chart_name, trace_name + trace_tag, result, units)
memory_usage = self.adb.GetMemoryUsage(browser_package)
PrintPerfResult('pss_final_t', 'pss_final_t' + trace_tag,
[memory_usage['Pss']], 'kb')
PrintPerfResult('private_dirty_final_t',
'private_dirty_final_t' + trace_tag,
[memory_usage['Private_Dirty']], 'kb')
if 'Nvidia' in memory_usage:
PrintPerfResult('nvidia_final_t', 'nvidia_final_t' + trace_tag,
[memory_usage['Nvidia']], 'kb')
io_stats_after = self.adb.GetIoStats()
for stat in io_stats_after:
PrintPerfResult(stat, stat + trace_tag,
[io_stats_after[stat] - io_stats_before[stat]],
stat.split('_')[1])
except pexpect.TIMEOUT:
PerfTestRunner.OutputFailure(
'Timed out after %d seconds while waiting for %s' % (timeout,
result_re_str))
return None
finally:
self.adb.CloseApplication(browser_package)
return results
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to volumes."""
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from cinder.api import common
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import keymgr
from cinder import objects
from cinder.objects import base as objects_base
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume.flows.api import manage_existing
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
allow_force_upload = cfg.BoolOpt('enable_force_upload',
default=False,
help='Enables the Force option on '
'upload_to_image. This enables '
'running upload_volume on in-use '
'volumes for backends that support it.')
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(allow_force_upload)
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
else:
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval tuple of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info(_LI("Availability Zones retrieved successfully."))
return tuple(azs)
def _retype_is_possible(self, context,
first_type_id, second_type_id,
first_type=None, second_type=None):
safe = False
if len(self.db.service_get_all_by_topic(context,
'cinder-volume',
disabled=True)) == 1:
safe = True
else:
type_a = first_type or volume_types.get_volume_type(
context,
first_type_id)
type_b = second_type or volume_types.get_volume_type(
context,
second_type_id)
if(volume_utils.matching_backend_name(type_a['extra_specs'],
type_b['extra_specs'])):
safe = True
return safe
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False, source_cg=None):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup and (not cgsnapshot and not source_cg):
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
if not self._retype_is_possible(
context,
volume_type['id'],
source_volume['volume_type_id'],
volume_type):
msg = _("Invalid volume_type provided: %s (requested type "
"is not compatible; either match source volume, "
"or omit type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
if not self._retype_is_possible(context,
volume_type['id'],
snapshot['volume_type_id'],
volume_type):
msg = _("Invalid volume_type provided: %s (requested "
"type is not compatible; recommend omitting "
"the type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata or {},
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
}
try:
sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and
not source_cg) else None)
volume_rpcapi = (self.volume_rpcapi if (not cgsnapshot and
not source_cg) else None)
flow_engine = create_volume.get_flow(self.db,
self.image_service,
availability_zones,
create_what,
sched_rpcapi,
volume_rpcapi)
except Exception:
msg = _('Failed to create api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Volume created successfully."), resource=vref)
return vref
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if not volume.host:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
volume.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume',
'id': volume.id})
return
if volume.attach_status == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume.id)
raise exception.VolumeAttached(volume_id=volume.id)
if not force and volume.status not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s.") % volume.status
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume.id,
'vol_status': volume.status})
raise exception.InvalidVolume(reason=msg)
if volume.migration_status not in (None, 'deleting'):
# Volume is migrating, wait until done
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently migrating.'), volume.id)
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume.consistencygroup_id is not None:
msg = _("Volume cannot be deleted while in a consistency group.")
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently part of a '
'consistency group.'), volume.id)
raise exception.InvalidVolume(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume.id)
msg = _("Volume still has %d dependent "
"snapshots.") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
volume.status = 'deleting'
volume.terminated_at = timeutils.utcnow()
volume.save()
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."),
resource=volume)
@wrap_check_policy
def update(self, context, volume, fields):
volume.update(fields)
volume.save()
LOG.info(_LI("Volume updated successfully."), resource=volume)
def get(self, context, volume_id, viewable_admin_meta=False):
volume = objects.Volume.get_by_id(context, volume_id)
if viewable_admin_meta:
ctxt = context.elevated()
admin_metadata = self.db.volume_admin_metadata_get(ctxt,
volume_id)
volume.admin_metadata = admin_metadata
volume.obj_reset_changes()
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
LOG.info(_LI("Volume info retrieved successfully."), resource=volume)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, viewable_admin_meta=False,
offset=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = objects.VolumeList.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = objects.VolumeList.get_all_by_project(
context, context.project_id, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
offset=offset)
LOG.info(_LI("Get all volumes completed successfully."))
return volumes
def get_snapshot(self, context, snapshot_id):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info(_LI("Snapshot retrieved successfully."),
resource={'type': 'snapshot',
'id': snapshot.id})
return snapshot
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
volume = objects.Volume.get_by_id(context, volume_id)
LOG.info(_LI("Volume retrieved successfully."), resource=volume)
return volume
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = objects.SnapshotList.get_all(context,
search_opts)
else:
snapshots = objects.SnapshotList.get_all_by_project(
context, context.project_id, search_opts)
LOG.info(_LI("Get all snaphsots completed successfully."))
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
# NOTE(jdg): check for Race condition bug 1096983
# explicitly get updated ref and check
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['multiattach']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume must be multiattachable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Reserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
LOG.info(_LI("Unreserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def begin_detaching(self, context, volume):
# NOTE(vbala): The volume status might be 'detaching' already due to
# a previous begin_detaching call. Get updated volume status so that
# we fail such cases.
volume.refresh()
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume.migration_status:
return
if (volume.status != 'in-use' or
volume.attach_status != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s.'") %
{'status': volume.status,
'attach_status': volume.attach_status})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
LOG.info(_LI("Begin detaching volume completed successfully."),
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
LOG.info(_LI("Roll detaching of volume completed successfully."),
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
attach_results = self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info(_LI("Detach volume completed successfully."),
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return init_results
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
results = self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
return results
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume)
return results
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
snapshot = None
try:
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata or {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if hasattr(snapshot, 'id'):
snapshot.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = objects.Snapshot(context=context, **options)
snapshot.create()
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
snap.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume %(vol_id)s "
"is not available, current volume status: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
LOG.info(_LI("Snapshot create request issued successfully."),
resource=result)
return result
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info(_LI("Snapshot force create request issued successfully."),
resource=result)
return result
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
'due to invalid status. '
'Status must be available or '
'error, not %(snap_status)s.'),
{'snap_id': snapshot['id'],
'snap_status': snapshot['status']})
msg = _("Volume Snapshot status must be available or error.")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
msg = _('Unable to delete snapshot %s because it is part of a '
'consistency group.') % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.status = 'deleting'
snapshot_obj.save()
volume = objects.Volume.get_by_id(context, snapshot_obj.volume_id)
self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
volume.host)
LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot)
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
snapshot.update(fields)
snapshot.save()
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def delete_volume_metadata(self, context, volume,
key, meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key, meta_type)
LOG.info(_LI("Delete volume metadata completed successfully."),
resource=volume)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume,
metadata, delete=False,
meta_type=common.METADATA_TYPES.user):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
if meta_type == common.METADATA_TYPES.user:
orig_meta = self.get_volume_metadata(context, volume)
elif meta_type == common.METADATA_TYPES.image:
try:
orig_meta = self.get_volume_image_metadata(context,
volume)
except exception.GlanceMetadataNotFound:
orig_meta = {}
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume['id'])
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata,
delete,
meta_type)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume metadata completed successfully."),
resource=volume)
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
LOG.info(_LI("Get volume metadata key completed successfully."),
resource=volume)
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume admin metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
LOG.info(_LI("Delete volume admin metadata completed successfully."),
resource=volume)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume admin metadata completed successfully."),
resource=volume)
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
LOG.info(_LI("Get snapshot metadata completed successfully."),
resource=snapshot)
return snapshot_obj.metadata
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.delete_metadata_key(context, key)
LOG.info(_LI("Delete snapshot metadata completed successfully."),
resource=snapshot)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = snapshot.metadata
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
snapshot.metadata = _metadata
snapshot.save()
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update snapshot metadata completed successfully."),
resource=snapshot)
return snapshot.metadata
def get_snapshot_metadata_value(self, snapshot, key):
LOG.info(_LI("Get snapshot metadata value not implemented."),
resource=snapshot)
# FIXME(jdg): Huh? Pass?
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
resource=volume)
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be '
'available or in-use, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
if not CONF.enable_force_upload and force:
LOG.info(_LI("Force upload to image is disabled, "
"Force option will be ignored."),
resource={'type': 'volume', 'id': volume['id']})
force = False
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
properties = {custom_property:
volume_image_metadata[custom_property]
for custom_property in custom_property_set}
metadata.update(dict(properties=properties))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
LOG.info(_LI("Copy image to volume completed successfully."),
resource=volume)
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to extend, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reserve_opts = {'gigabytes': size_increase}
QUOTAS.add_volume_type_opts(context, reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=volume['project_id'],
**reserve_opts)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be available or in-use, '
'but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume %s is already part of an active "
"migration.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = objects.SnapshotList.get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume %s must not be replicated.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume %s must not be part of a consistency "
"group.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = _('No available service named %s') % host
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different '
'than the current host.')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
LOG.info(_LI("Migrate volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Migrate volume completion issued successfully."),
resource=volume)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to update readonly flag, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
LOG.info(_LI("Update readonly setting on volume "
"completed successfully."),
resource=volume)
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status: '
'%(vol_status)s on volume: %(vol_id)s. Volume status '
'must be available or '
'in-use.') % {'vol_status': volume['status'],
'vol_id': volume['id']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = _('New volume_type same as original: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements.')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volume: %s.') % volume['id']
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
LOG.info(_LI("Retype volume request issued successfully."),
resource=volume)
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
manage_what = {
'context': context,
'name': name,
'description': description,
'host': host,
'ref': ref,
'volume_type': volume_type,
'metadata': metadata,
'availability_zone': availability_zone,
'bootable': bootable,
}
try:
flow_engine = manage_existing.get_flow(self.scheduler_rpcapi,
self.db,
manage_what)
except Exception:
msg = _('Failed to manage api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinder's debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vol_ref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Manage volume request issued successfully."),
resource=vol_ref)
return vol_ref
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window.
On start, it triggers volume evacuation.
"""
raise NotImplementedError()
|
|
import os
import sys
import subprocess
import argparse
import json
import psutil
import time
import six
import signal
import pdb
from apscheduler.schedulers.background import BackgroundScheduler
from studio import fs_tracker, model
from studio.util import logs
from studio.queues.local_queue import LocalQueue
from studio.util.gpu_util import get_available_gpus, get_gpu_mapping, get_gpus_summary
from studio.artifacts.artifact import Artifact
from studio.experiments.experiment import Experiment
from studio.util.util import sixdecode, str2duration, retry,\
parse_verbosity, check_for_kb_interrupt
logs.get_logger('apscheduler.scheduler').setLevel(logs.ERROR)
class LocalExecutor(object):
"""Runs job while capturing environment and logs results.
"""
def __init__(self, queue, args):
self.config = args.config
if args.guest:
self.config['database']['guest'] = True
self.task_queue = queue
self.logger = logs.get_logger('LocalExecutor')
self.logger.setLevel(model.parse_verbosity(self.config.get('verbose', None)))
self.logger.debug("Config: ")
self.logger.debug(self.config)
def run(self, experiment):
if isinstance(experiment, six.string_types):
experiment = self.db.get_experiment(experiment)
elif not isinstance(experiment, Experiment):
raise ValueError("Unknown type of experiment: " +
str(type(experiment)))
self.logger.info("Experiment key: " + experiment.key)
with model.get_db_provider(self.config) as db:
db.start_experiment(experiment)
""" Override env variables with those inside the queued message
"""
env = dict(os.environ)
if 'env' in self.config.keys():
for k, v in six.iteritems(self.config['env']):
if v is not None:
env[str(k)] = str(v)
env['PYTHONUNBUFFERED'] = 'TRUE'
fs_tracker.setup_experiment(env, experiment, clean=False)
log_path = fs_tracker.get_artifact_cache('output', experiment.key)
self.logger.debug('Child process environment:')
self.logger.debug(str(env))
sched = BackgroundScheduler()
sched.start()
with open(log_path, 'w') as output_file:
python = 'python'
if experiment.pythonver[0] == '3':
python = 'python3'
python = which(python)
cmd = [python, experiment.filename] + experiment.args
cwd = experiment.artifacts['workspace'].local_path
container_artifact = experiment.artifacts.get('_singularity')
if container_artifact:
container = container_artifact.get('local')
if not container:
container = container_artifact.get('qualified')
cwd = fs_tracker.get_artifact_cache(
'workspace', experiment.key)
for tag, art in six.iteritems(experiment.artifacts):
local_path = art.get('local')
if not art['mutable'] and os.path.exists(local_path):
os.symlink(
art['local'],
os.path.join(os.path.dirname(cwd), tag)
)
if experiment.filename is not None:
cmd = [
'singularity',
'exec',
container,
] + cmd
else:
cmd = ['singularity', 'run', container]
self.logger.info('Running cmd: {0} in {1}'.format(cmd, cwd))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
cwd=cwd,
text=True
)
def kill_subprocess():
p.kill()
def get_duration(tag: str):
value = self.config.get(tag, '0m')
return int(str2duration(value).total_seconds() / 60)
def checkpoint():
try:
db.checkpoint_experiment(experiment)
except BaseException as e:
self.logger.info(e)
check_for_kb_interrupt()
minutes = get_duration('saveWorkspaceFrequency')
sched.add_job(checkpoint, 'interval', minutes=minutes)
metrics_path = fs_tracker.get_artifact_cache(
'_metrics', experiment.key)
minutes = get_duration('saveMetricsFrequency')
sched.add_job(lambda: save_metrics(metrics_path),
'interval', minutes=minutes)
def kill_if_stopped():
try:
db_expr = db.get_experiment(
experiment.key,
getinfo=False)
except:
check_for_kb_interrupt()
db_expr = None
# Transient issues with getting experiment data might
# result in a None value being returned, as result
# leave the experiment running because we wont be able to
# do anything else even if this experiment is stopped
# in any event if the experiment runs too long then it
# will exceed its allocated time and stop
if db_expr is not None:
if db_expr.status == 'stopped':
kill_subprocess()
return
if experiment.max_duration is not None and \
time.time() > experiment.time_started + \
int(str2duration(experiment.max_duration)
.total_seconds()):
kill_subprocess()
return
# If our tasks queue is signalled inactive
# during work process execution, that means we need to drop
# current execution and exit
if not self.task_queue.is_active():
kill_subprocess()
sched.add_job(kill_if_stopped, 'interval', seconds=10)
while True:
output = p.stdout.readline()
if output == '' and p.poll() is not None:
break
if output:
line_out = output.strip()
print(line_out)
output_file.write(line_out)
try:
p.wait()
finally:
save_metrics(metrics_path)
sched.shutdown()
db.checkpoint_experiment(experiment)
db.finish_experiment(experiment)
return p.returncode
def allocate_resources(experiment, config=None, verbose=10):
logger = logs.get_logger('allocate_resources')
logger.setLevel(verbose)
logger.info('Allocating resources {} for experiment {}'
.format(experiment.resources_needed, experiment.key))
ret_val = True
gpus_needed = int(experiment.resources_needed.get('gpus')) \
if experiment.resources_needed else 0
if gpus_needed > 0:
ret_val = ret_val and allocate_gpus(gpus_needed,
experiment.resources_needed,
config)
else:
allocate_gpus(0)
return ret_val
def allocate_gpus(gpus_needed, resources_needed={}, config=None):
# Only disable gpus if gpus_needed < 0
if gpus_needed < 0:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
return True
elif gpus_needed == 0:
return True
gpu_mem_needed = resources_needed.get('gpuMem', None)
strict = resources_needed.get('gpuMemStrict', False)
available_gpus = get_available_gpus(gpu_mem_needed, strict)
gpu_mapping = get_gpu_mapping()
mapped_gpus = [str(gpu_mapping[g]) for g in available_gpus]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
if len(mapped_gpus) >= gpus_needed:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
mapped_gpus[:gpus_needed])
return True
else:
return False
def main(args=sys.argv):
parser = argparse.ArgumentParser(
description='Studio worker. \
Usage: studio-local-worker \
')
parser.add_argument('--config', help='configuration file', default=None)
parser.add_argument(
'--guest',
help='Guest mode (does not require db credentials)',
action='store_true')
parser.add_argument(
'--timeout',
default=0, type=int)
parser.add_argument(
'--verbose',
default='error')
# Register signal handler for signal.SIGUSR1
# which will invoke built-in Python debugger:
signal.signal(signal.SIGUSR1, lambda sig, stack: pdb.set_trace())
parsed_args, script_args = parser.parse_known_args(args)
verbose = parse_verbosity(parsed_args.verbose)
# worker_config = None
# if parsed_args.config is not None:
# print("Local Runner configuration file = {0}".format(parsed_args.config))
# with open(parsed_args.config) as f:
# worker_config = json.load(f)
queue = LocalQueue('local')
# queue = glob.glob(fs_tracker.get_queue_directory() + "/*")
# wait_for_messages(queue, parsed_args.timeout)
returncode = worker_loop(queue, parsed_args, timeout=parsed_args.timeout)
sys.exit(returncode)
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def worker_loop(queue, parsed_args,
single_experiment=False,
timeout=0,
verbose=None):
fetch_artifacts = True
logger = logs.get_logger('worker_loop')
hold_period = 4
retval = 0
while True:
msg = queue.dequeue(acknowledge=False, timeout=timeout)
if not msg:
break
first_exp, ack_key = msg
data_dict = json.loads(sixdecode(first_exp))
experiment_key = data_dict['experiment']['key']
config = data_dict['config']
parsed_args.config = config
if verbose:
config['verbose'] = verbose
else:
verbose = model.parse_verbosity(config.get('verbose', None))
logger.setLevel(verbose)
logger.debug('Received message: \n{}'.format(data_dict))
executor = LocalExecutor(queue, parsed_args)
with model.get_db_provider(config) as db:
# experiment = experiment_from_dict(data_dict['experiment'])
def try_get_experiment():
experiment = db.get_experiment(experiment_key)
if experiment is None:
raise ValueError(
'experiment is not found - indicates storage failure')
return experiment
experiment = retry(
try_get_experiment,
sleep_time=10,
logger=logger)
if config.get('experimentLifetime', None) and \
int(str2duration(config['experimentLifetime'])
.total_seconds()) + experiment.time_added < time.time():
logger.info(
'Experiment expired (max lifetime of {0} was exceeded)'
.format(config.get('experimentLifetime', None))
)
queue.acknowledge(ack_key)
continue
if allocate_resources(experiment, config, verbose=verbose):
def hold_job():
queue.hold(ack_key, hold_period)
hold_job()
sched = BackgroundScheduler()
sched.add_job(hold_job, 'interval', minutes=hold_period / 2)
sched.start()
try:
python = 'python'
if experiment.pythonver[0] == '3':
python = 'python3'
if '_singularity' not in experiment.artifacts.keys():
pip_diff = pip_needed_packages(
experiment.pythonenv, python)
if any(pip_diff):
logger.info(
'Setting up python packages for experiment')
if pip_install_packages(
pip_diff,
python,
logger
) != 0:
logger.info(
"Installation of all packages together " +
" failed, "
"trying one package at a time")
for pkg in pip_diff:
pip_install_packages([pkg], python, logger)
for tag, item in experiment.artifacts.items():
art: Artifact = item
if fetch_artifacts or art.local_path is None:
get_only_newer: bool = True
if tag == 'workspace':
get_only_newer = False
if not art.is_mutable:
logger.info('Fetching artifact ' + tag)
art.local_path = retry(
lambda: db.get_artifact(art, only_newer=get_only_newer),
sleep_time=10,
logger=logger
)
else:
logger.info('Skipping mutable artifact ' + tag)
returncode = executor.run(experiment)
if returncode != 0:
retval = returncode
finally:
sched.shutdown()
queue.acknowledge(ack_key)
if single_experiment:
logger.info('single_experiment is True, quitting')
return retval
else:
logger.info('Cannot run experiment ' + experiment.key +
' due lack of resources. Will retry')
# Debounce failed requests we cannot service yet
time.sleep(config.get('sleep_time', 5))
logger.info("Queue in {0} is empty, quitting"
.format(fs_tracker.get_queue_directory()))
return retval
def pip_install_packages(packages, python='python', logger=None):
pipp = subprocess.Popen(
[python, '-m', 'pip', 'install'] + [p for p in packages],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pipout, _ = pipp.communicate()
pipout = pipout.decode('utf-8')
# return pip.main(['install'] + list(packages))
if logger:
logger.info("pip output: \n" + pipout)
return pipp.returncode
def wait_for_messages(queue, timeout, logger=None):
wait_time = 0
wait_step = 5
timeout = int(timeout)
if timeout == 0:
return
while not queue.has_next():
if logger:
logger.info(
'No messages found, sleeping for {} s (total wait time {} s)'
.format(wait_step, wait_time))
time.sleep(wait_step)
wait_time += wait_step
if timeout > 0 and timeout < wait_time:
if logger:
logger.info('No jobs found in the queue during {} s'.
format(timeout))
return
def save_metrics(path):
cpu_load = psutil.cpu_percent()
cpu_mem = psutil.virtual_memory().used
timestamp = time.time()
with open(path, 'a') as f:
entry = 'time: {} CPU: {} mem: {} {} \n' \
.format(
timestamp,
cpu_load,
cpu_mem,
get_gpus_summary())
f.write(entry)
def pip_needed_packages(packages, python='python'):
pipp = subprocess.Popen(
[python, '-m', 'pip', 'freeze'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pipout, _ = pipp.communicate()
pipout = pipout.decode('utf-8')
current_packages = {l.strip() for l in pipout.strip().split('\n')}
# current_packages = {p._key + '==' + p._version for p in
# pip.pip.get_installed_distributions(local_only=True)}
return {p for p in packages} - current_packages
if __name__ == "__main__":
main()
|
|
from __future__ import unicode_literals
import datetime
import unittest
import warnings
from decimal import Decimal
import pytest
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.dateparse import parse_date
from django.utils.six.moves import reload_module
from rest_framework import filters, generics, serializers, status
from rest_framework.compat import django_filters, reverse
from rest_framework.test import APIRequestFactory
from .models import BaseFilterableItem, BasicModel, FilterableItem
factory = APIRequestFactory()
if django_filters:
class FilterableItemSerializer(serializers.ModelSerializer):
class Meta:
model = FilterableItem
fields = '__all__'
# Basic filter on a list view.
class FilterFieldsRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
# These class are used to test a filter class.
class SeveralFieldsFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_expr='icontains')
decimal = django_filters.NumberFilter(lookup_expr='lt')
date = django_filters.DateFilter(lookup_expr='gt')
class Meta:
model = FilterableItem
fields = ['text', 'decimal', 'date']
class FilterClassRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test a misconfigured filter class.
class MisconfiguredFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = BasicModel
fields = ['text']
class IncorrectlyConfiguredRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = MisconfiguredFilter
filter_backends = (filters.DjangoFilterBackend,)
class FilterClassDetailView(generics.RetrieveAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test base model filter support
class BaseFilterableItemFilter(django_filters.FilterSet):
text = django_filters.CharFilter()
class Meta:
model = BaseFilterableItem
fields = '__all__'
# Test the same filter using the deprecated internal FilterSet class.
class BaseFilterableItemFilterWithProxy(filters.FilterSet):
text = django_filters.CharFilter()
class Meta:
model = BaseFilterableItem
fields = '__all__'
class BaseFilterableItemFilterRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = BaseFilterableItemFilter
filter_backends = (filters.DjangoFilterBackend,)
class BaseFilterableItemFilterWithProxyRootView(BaseFilterableItemFilterRootView):
filter_class = BaseFilterableItemFilterWithProxy
# Regression test for #814
class FilterFieldsQuerysetView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
class GetQuerysetView(generics.ListCreateAPIView):
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
def get_queryset(self):
return FilterableItem.objects.all()
urlpatterns = [
url(r'^(?P<pk>\d+)/$', FilterClassDetailView.as_view(), name='detail-view'),
url(r'^$', FilterClassRootView.as_view(), name='root-view'),
url(r'^get-queryset/$', GetQuerysetView.as_view(),
name='get-queryset-view'),
]
class BaseFilterTests(TestCase):
def setUp(self):
self.original_coreapi = filters.coreapi
filters.coreapi = True # mock it, because not None value needed
self.filter_backend = filters.BaseFilterBackend()
def tearDown(self):
filters.coreapi = self.original_coreapi
def test_filter_queryset_raises_error(self):
with pytest.raises(NotImplementedError):
self.filter_backend.filter_queryset(None, None, None)
def test_get_schema_fields_checks_for_coreapi(self):
filters.coreapi = None
with pytest.raises(AssertionError):
self.filter_backend.get_schema_fields({})
filters.coreapi = True
assert self.filter_backend.get_schema_fields({}) == []
class CommonFilteringTestCase(TestCase):
def _serialize_object(self, obj):
return {'id': obj.id, 'text': obj.text, 'decimal': str(obj.decimal), 'date': obj.date.isoformat()}
def setUp(self):
"""
Create 10 FilterableItem instances.
"""
base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8))
for i in range(10):
text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc.
decimal = base_data[1] + i
date = base_data[2] - datetime.timedelta(days=i * 2)
FilterableItem(text=text, decimal=decimal, date=date).save()
self.objects = FilterableItem.objects
self.data = [
self._serialize_object(obj)
for obj in self.objects.all()
]
class IntegrationTestFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered list views.
"""
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_backend_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
view = FilterFieldsRootView.as_view()
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertIn("'rest_framework.filters.DjangoFilterBackend' is deprecated.", str(w[-1].message))
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_no_df_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import django_filters.rest_framework
class DFFilterFieldsRootView(FilterFieldsRootView):
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
view = DFFilterFieldsRootView.as_view()
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
assert len(w) == 0
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_backend_mro(self):
class CustomBackend(filters.DjangoFilterBackend):
def filter_queryset(self, request, queryset, view):
assert False, "custom filter_queryset should run"
class DFFilterFieldsRootView(FilterFieldsRootView):
filter_backends = (CustomBackend,)
view = DFFilterFieldsRootView.as_view()
request = factory.get('/')
with pytest.raises(AssertionError, message="custom filter_queryset should run"):
view(request).render()
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_fields_root_view(self):
"""
GET requests to paginated ListCreateAPIView should return paginated results.
"""
view = FilterFieldsRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
assert response.data == expected_data
# Tests that the date filter works.
search_date = datetime.date(2012, 9, 22)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-09-22'
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) == search_date]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_queryset(self):
"""
Regression test for #814.
"""
view = FilterFieldsQuerysetView.as_view()
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_get_queryset_only(self):
"""
Regression test for #834.
"""
view = GetQuerysetView.as_view()
request = factory.get('/get-queryset/')
view(request).render()
# Used to raise "issubclass() arg 2 must be a class or tuple of classes"
# here when neither `model' nor `queryset' was specified.
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_class_root_view(self):
"""
GET requests to filtered ListCreateAPIView that have a filter_class set
should return filtered results.
"""
view = FilterClassRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
# Tests that the decimal filter set with 'lt' in the filter class works.
search_decimal = Decimal('4.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) < search_decimal]
assert response.data == expected_data
# Tests that the date filter set with 'gt' in the filter class works.
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-10-02'
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) > search_date]
assert response.data == expected_data
# Tests that the text filter set with 'icontains' in the filter class works.
search_text = 'ff'
request = factory.get('/', {'text': '%s' % search_text})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if search_text in f['text'].lower()]
assert response.data == expected_data
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {
'decimal': '%s' % (search_decimal,),
'date': '%s' % (search_date,)
})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) > search_date and
Decimal(f['decimal']) < search_decimal]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_incorrectly_configured_filter(self):
"""
An error should be displayed when the filter class is misconfigured.
"""
view = IncorrectlyConfiguredRootView.as_view()
request = factory.get('/')
self.assertRaises(AssertionError, view, request)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_base_model_filter(self):
"""
The `get_filter_class` model checks should allow base model filters.
"""
view = BaseFilterableItemFilterRootView.as_view()
request = factory.get('/?text=aaa')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 1
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_base_model_filter_with_proxy(self):
"""
The `get_filter_class` model checks should allow base model filters.
"""
view = BaseFilterableItemFilterWithProxyRootView.as_view()
request = factory.get('/?text=aaa')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 1
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_unknown_filter(self):
"""
GET requests with filters that aren't configured should return 200.
"""
view = FilterFieldsRootView.as_view()
search_integer = 10
request = factory.get('/', {'integer': '%s' % search_integer})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
@override_settings(ROOT_URLCONF='tests.test_filters')
class IntegrationTestDetailFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered detail views.
"""
def _get_url(self, item):
return reverse('detail-view', kwargs=dict(pk=item.pk))
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_detail_view(self):
"""
GET requests to filtered RetrieveAPIView that have a filter_class set
should return filtered results.
"""
item = self.objects.all()[0]
data = self._serialize_object(item)
# Basic test with no filter.
response = self.client.get(self._get_url(item))
assert response.status_code == status.HTTP_200_OK
assert response.data == data
# Tests that the decimal filter set that should fail.
search_decimal = Decimal('4.25')
high_item = self.objects.filter(decimal__gt=search_decimal)[0]
response = self.client.get(
'{url}'.format(url=self._get_url(high_item)),
{'decimal': '{param}'.format(param=search_decimal)})
assert response.status_code == status.HTTP_404_NOT_FOUND
# Tests that the decimal filter set that should succeed.
search_decimal = Decimal('4.25')
low_item = self.objects.filter(decimal__lt=search_decimal)[0]
low_item_data = self._serialize_object(low_item)
response = self.client.get(
'{url}'.format(url=self._get_url(low_item)),
{'decimal': '{param}'.format(param=search_decimal)})
assert response.status_code == status.HTTP_200_OK
assert response.data == low_item_data
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
valid_item = self.objects.filter(decimal__lt=search_decimal, date__gt=search_date)[0]
valid_item_data = self._serialize_object(valid_item)
response = self.client.get(
'{url}'.format(url=self._get_url(valid_item)), {
'decimal': '{decimal}'.format(decimal=search_decimal),
'date': '{date}'.format(date=search_date)
})
assert response.status_code == status.HTTP_200_OK
assert response.data == valid_item_data
class SearchFilterModel(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
class SearchFilterSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModel
fields = '__all__'
class SearchFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# z abc
# zz bcd
# zzz cde
# ...
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModel(title=title, text=text).save()
def test_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_search_returns_same_queryset_if_no_search_fields_or_terms_provided(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
view = SearchListView.as_view()
request = factory.get('/')
response = view(request)
expected = SearchFilterSerializer(SearchFilterModel.objects.all(),
many=True).data
assert response.data == expected
def test_exact_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zzz'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'zzz', 'text': 'cde'}
]
def test_startswith_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', '^text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
assert response.data == [
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_regexp_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('$title', '$text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'z{2} ^b'})
response = view(request)
assert response.data == [
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_search_with_nonstandard_search_param(self):
with override_settings(REST_FRAMEWORK={'SEARCH_PARAM': 'query'}):
reload_module(filters)
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'query': 'b'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
reload_module(filters)
class AttributeModel(models.Model):
label = models.CharField(max_length=32)
class SearchFilterModelFk(models.Model):
title = models.CharField(max_length=20)
attribute = models.ForeignKey(AttributeModel, on_delete=models.CASCADE)
class SearchFilterFkSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelFk
fields = '__all__'
class SearchFilterFkTests(TestCase):
def test_must_call_distinct(self):
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%stitle" % prefix]
)
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%stitle" % prefix, "%sattribute__label" % prefix]
)
def test_must_call_distinct_restores_meta_for_each_field(self):
# In this test case the attribute of the fk model comes first in the
# list of search fields.
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%sattribute__label" % prefix, "%stitle" % prefix]
)
class SearchFilterModelM2M(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
attributes = models.ManyToManyField(AttributeModel)
class SearchFilterM2MSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelM2M
fields = '__all__'
class SearchFilterM2MTests(TestCase):
def setUp(self):
# Sequence of title/text/attributes is:
#
# z abc [1, 2, 3]
# zz bcd [1, 2, 3]
# zzz cde [1, 2, 3]
# ...
for idx in range(3):
label = 'w' * (idx + 1)
AttributeModel(label=label)
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModelM2M(title=title, text=text).save()
SearchFilterModelM2M.objects.get(title='zz').attributes.add(1, 2, 3)
def test_m2m_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModelM2M.objects.all()
serializer_class = SearchFilterM2MSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text', 'attributes__label')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zz'})
response = view(request)
assert len(response.data) == 1
def test_must_call_distinct(self):
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelM2M._meta,
["%stitle" % prefix]
)
assert filter_.must_call_distinct(
SearchFilterModelM2M._meta,
["%stitle" % prefix, "%sattributes__label" % prefix]
)
class OrderingFilterModel(models.Model):
title = models.CharField(max_length=20, verbose_name='verbose title')
text = models.CharField(max_length=100)
class OrderingFilterRelatedModel(models.Model):
related_object = models.ForeignKey(OrderingFilterModel, related_name="relateds", on_delete=models.CASCADE)
class OrderingFilterSerializer(serializers.ModelSerializer):
class Meta:
model = OrderingFilterModel
fields = '__all__'
class DjangoFilterOrderingModel(models.Model):
date = models.DateField()
text = models.CharField(max_length=10)
class Meta:
ordering = ['-date']
class DjangoFilterOrderingSerializer(serializers.ModelSerializer):
class Meta:
model = DjangoFilterOrderingModel
fields = '__all__'
class DjangoFilterOrderingTests(TestCase):
def setUp(self):
data = [{
'date': datetime.date(2012, 10, 8),
'text': 'abc'
}, {
'date': datetime.date(2013, 10, 8),
'text': 'bcd'
}, {
'date': datetime.date(2014, 10, 8),
'text': 'cde'
}]
for d in data:
DjangoFilterOrderingModel.objects.create(**d)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_default_ordering(self):
class DjangoFilterOrderingView(generics.ListAPIView):
serializer_class = DjangoFilterOrderingSerializer
queryset = DjangoFilterOrderingModel.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ['text']
ordering = ('-date',)
view = DjangoFilterOrderingView.as_view()
request = factory.get('/')
response = view(request)
assert response.data == [
{'id': 3, 'date': '2014-10-08', 'text': 'cde'},
{'id': 2, 'date': '2013-10-08', 'text': 'bcd'},
{'id': 1, 'date': '2012-10-08', 'text': 'abc'}
]
class OrderingFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# zyx abc
# yxw bcd
# xwv cde
for idx in range(3):
title = (
chr(ord('z') - idx) +
chr(ord('y') - idx) +
chr(ord('x') - idx)
)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
OrderingFilterModel(title=title, text=text).save()
def test_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_reverse_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-text'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_incorrectfield_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'foobar'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_default_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_default_ordering_using_string(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_ordering_by_aggregate_field(self):
# create some related models to aggregate order by
num_objs = [2, 5, 3]
for obj, num_relateds in zip(OrderingFilterModel.objects.all(),
num_objs):
for _ in range(num_relateds):
new_related = OrderingFilterRelatedModel(
related_object=obj
)
new_related.save()
class OrderingListView(generics.ListAPIView):
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = '__all__'
queryset = OrderingFilterModel.objects.all().annotate(
models.Count("relateds"))
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'relateds__count'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
]
def test_ordering_with_nonstandard_ordering_param(self):
with override_settings(REST_FRAMEWORK={'ORDERING_PARAM': 'order'}):
reload_module(filters)
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'order': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
reload_module(filters)
def test_get_template_context(self):
class OrderingListView(generics.ListAPIView):
ordering_fields = '__all__'
serializer_class = OrderingFilterSerializer
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
request = factory.get('/', {'ordering': 'title'}, HTTP_ACCEPT='text/html')
view = OrderingListView.as_view()
response = view(request)
self.assertContains(response, 'verbose title')
def test_ordering_with_overridden_get_serializer_class(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class specified
def get_serializer_class(self):
return OrderingFilterSerializer
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_ordering_with_improper_configuration(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class
# or get_serializer_class specified
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
with self.assertRaises(ImproperlyConfigured):
view(request)
class SensitiveOrderingFilterModel(models.Model):
username = models.CharField(max_length=20)
password = models.CharField(max_length=100)
# Three different styles of serializer.
# All should allow ordering by username, but not by password.
class SensitiveDataSerializer1(serializers.ModelSerializer):
username = serializers.CharField()
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username')
class SensitiveDataSerializer2(serializers.ModelSerializer):
username = serializers.CharField()
password = serializers.CharField(write_only=True)
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username', 'password')
class SensitiveDataSerializer3(serializers.ModelSerializer):
user = serializers.CharField(source='username')
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'user')
class SensitiveOrderingFilterTests(TestCase):
def setUp(self):
for idx in range(3):
username = {0: 'userA', 1: 'userB', 2: 'userC'}[idx]
password = {0: 'passA', 1: 'passC', 2: 'passB'}[idx]
SensitiveOrderingFilterModel(username=username, password=password).save()
def test_order_by_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-username'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: Inverse username ordering correctly applied.
assert response.data == [
{'id': 3, username_field: 'userC'},
{'id': 2, username_field: 'userB'},
{'id': 1, username_field: 'userA'},
]
def test_cannot_order_by_non_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'password'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: The passwords are not in order. Default ordering is used.
assert response.data == [
{'id': 1, username_field: 'userA'}, # PassB
{'id': 2, username_field: 'userB'}, # PassC
{'id': 3, username_field: 'userC'}, # PassA
]
|
|
from __future__ import unicode_literals
import datetime
import unittest
from decimal import Decimal
from django import forms, test
from django.apps import apps
from django.core import checks, validators
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models, transaction
from django.db.models.fields import (
NOT_PROVIDED, AutoField, BigIntegerField, BinaryField, BooleanField,
CharField, CommaSeparatedIntegerField, DateField, DateTimeField,
DecimalField, EmailField, FilePathField, FloatField, GenericIPAddressField,
IntegerField, IPAddressField, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField,
)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Bar, BigD, BigIntegerModel, BigS, BooleanModel, DataModel, DateTimeModel,
Document, FksToBooleans, FkToChar, FloatModel, Foo, GenericIPAddress,
IntegerModel, NullBooleanModel, PositiveIntegerModel,
PositiveSmallIntegerModel, Post, PrimaryKeyCharModel, RenamedField,
SmallIntegerModel, VerboseNameField, Whiz, WhizIter, WhizIterEmpty,
)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 24):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
def test_warning_when_unique_true_on_fk(self):
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field('a').remote_field.related_name
self.assertIsInstance(rel_name, six.text_type)
def test_abstract_model_pending_operations(self):
"""
Foreign key fields declared on abstract models should not add lazy relations to
resolve relationship declared as string. refs #24215
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractForeignKeyModel(models.Model):
fk = models.ForeignKey('missing.FK')
class Meta:
abstract = True
self.assertIs(AbstractForeignKeyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
"Pending lookup added for a foreign key on an abstract model"
)
class ManyToManyFieldTests(test.TestCase):
def test_abstract_model_pending_operations(self):
"""
Many-to-many fields declared on abstract models should not add lazy relations to
resolve relationship declared as string. refs #24215
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractManyToManyModel(models.Model):
fk = models.ForeignKey('missing.FK')
class Meta:
abstract = True
self.assertIs(AbstractManyToManyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
"Pending lookup added for a many-to-many field on an abstract model"
)
class DateTimeFieldTests(test.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
@test.skipUnlessDBFeature("supports_microsecond_precision")
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
class BooleanFieldTests(test.TestCase):
def _test_get_db_prep_lookup(self, f):
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_charfield_textfield_max_length_passed_to_formfield(self):
"""
Test that CharField and TextField pass their max_length attributes to
form fields created using their .formfield() method (#22206).
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was successful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with transaction.atomic():
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
def test_iterator_choices(self):
"""
Check that get_choices works with Iterators (#23112).
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertEqual(WhizIter(c=None).c, None) # Blank value
self.assertEqual(WhizIter(c='').c, '') # Empty value
def test_empty_iterator_choices(self):
"""
Check that get_choices works with empty iterators (#23112).
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertEqual(WhizIterEmpty(c=None).c, None) # Blank value
self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_charfield_get_choices_doesnt_evaluate_lazy_strings(self):
# Regression test for #23098
# Will raise ZeroDivisionError if lazy is evaluated
lazy_func = lazy(lambda x: 0 / 0, int)
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(True)[0], ('', '---------'))
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class IntegerFieldTests(test.TestCase):
model = IntegerModel
documented_range = (-2147483648, 2147483647)
def test_documented_range(self):
"""
Ensure that values within the documented safe range pass validation,
can be saved and retrieved without corruption.
"""
min_value, max_value = self.documented_range
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_validation(self):
"""
Ensure that backend specific range are enforced at the model
validation level. ref #12030.
"""
field = self.model._meta.get_field('value')
internal_type = field.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
instance = self.model(value=min_value - 1)
expected_message = validators.MinValueValidator.message % {
'limit_value': min_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = min_value
instance.full_clean()
if max_value is not None:
instance = self.model(value=max_value + 1)
expected_message = validators.MaxValueValidator.message % {
'limit_value': max_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = max_value
instance.full_clean()
def test_types(self):
instance = self.model(value=0)
self.assertIsInstance(instance.value, six.integer_types)
instance.save()
self.assertIsInstance(instance.value, six.integer_types)
instance = self.model.objects.get()
self.assertIsInstance(instance.value, six.integer_types)
def test_coercing(self):
self.model.objects.create(value='10')
instance = self.model.objects.get(value='10')
self.assertEqual(instance.value, 10)
class SmallIntegerFieldTests(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
class BigIntegerFieldTests(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
class PositiveSmallIntegerFieldTests(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
class PositiveIntegerFieldTests(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
def test_null_value(self):
"""
Null values should be resolved to None in Python (#24078).
"""
GenericIPAddress.objects.create()
o = GenericIPAddress.objects.get()
self.assertIsNone(o.ip)
def test_save_load(self):
instance = GenericIPAddress.objects.create(ip='::1')
loaded = GenericIPAddress.objects.get()
self.assertEqual(loaded.ip, instance.ip)
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: 'mailbox@domain.com', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A dictionary-like object with SQLite backend
============================================
Python dictionaries are very efficient objects for fast data access. But when
data is too large to fit in memory, you want to keep data on disk but available
for fast random access.
Here's a dictionary-like object which uses a SQLite database backend for random
access to the dictionary's key-value pairs.
Use it like a standard dictionary, except that you give it a name
(eg.'tempdict'):
import dbdict
d = dbdict.open('tempdict')
d['foo'] = 'bar'
# At this point, the key value pair foo and bar is written to disk.
d['John'] = 'doh!'
d['pi'] = 3.999
d['pi'] = 3.14159 # replaces the previous version of pi
d['pi'] += 1
d.close() # close the database file
You can access your dictionary later on:
d = dbdict.open('tempdict')
del d['foo']
if 'John' in d:
print 'John is in there !'
print d.items()
For efficient inserting/updating a list of key-value pairs, use the update()
method:
d.update([('f1', 'test'), ('f2', 'example')])
d.update({'f1':'test', 'f2':'example'})
d.update(f1='test', f2='example')
Use get() method to most efficiently get a number of items as specified by a
lis of keys:
d.get(['f1', 'f2'])
Use remove() method to most efficiently remove a number of items as specified
by a list of keys:
d.remove(['f1', 'f2'])
There is also an alternative (fully equivalent) way to instanstiate a dbdict
object by the call:
from dbdict import dbdict
d = dbdict('tempdict')
Make a memory-based (ie not filed based) SQLite database by the call:
dbdict(':memory:')
Other special functionality as compared to dict:
d.clear() Clear all items (and free up unused disk space)
d.reindex() Delete and recreate the key index
d.vacuum() Free up unused disk space
d.con Access to the underlying SQLite connection (for advanced use)
Some things to note:
- You can't directly store Python objects. Only numbers, strings and binary
data. Objects need to be serialized first in order to be stored. Use e.g.
pickle, json (or simplejson) or yaml for that purpose.
- Explicit database connection closing using the close() method is not
required. Changes are written on key-value assignment to the dictionary.
The file stays open until the object is destroyed or the close() method is
called.
Original code by Jacob Sondergaard
hg clone ssh://hg@bitbucket.org/nephics/dbdict
Modified to pickle automatically
'''
__version__ = '1.3.1'
import sqlite3
try:
from collections.abc import MutableMapping
except ImportError:
# MutableMapping is new in Python 2.6+
from collections import MutableMapping
from os import path
try:
import cPickle as pickle
except ImportError:
import pickle
class DbDict(MutableMapping):
''' DbDict, a dictionary-like object with SQLite back-end '''
def __init__(self, filename, picklevalues=False):
self.picklevalues = picklevalues
if filename == ':memory:' or not path.isfile(filename):
self.con = sqlite3.connect(filename)
self._create_table()
else:
self.con = sqlite3.connect(filename)
# _____________________________________________________________________________________
# Add automatic pickling and unpickling
def pickle_loads(self, value):
"""
pickle.load if specified
"""
if self.picklevalues:
value = pickle.loads(bytes(value))
return value
def pickle_dumps(self, value):
"""
pickle.load if specified
"""
if self.picklevalues:
#
# Protocol = 0 generates ASCII 7 bit strings and is less efficient
# Protocol = -1 generates ASCII 8 bit strings which need to be handled as
# blobs by sqlite3.
#
# Unfortunately, sqlite3 only understands memoryview objects in python3 and
# buffer objects in python2
#
# http://bugs.python.org/issue7723 suggests there is no portable
# python2/3 way to write blobs to Sqlite
#
# However, sqlite3.Binary seems to do the trick
#
# Otherwise, to use protocol -1, we need to use the following code:
#
# if sys.hexversion >= 0x03000000:
# value = memoryview(pickle.dumps(value, protocol = -1))
# else:
# value = buffer(pickle.dumps(value, protocol = -1))
#
value = sqlite3.Binary(pickle.dumps(value, protocol=-1))
return value
# _____________________________________________________________________________________
def _create_table(self):
'''Creates an SQLite table 'data' with the columns 'key' and 'value'
where column 'key' is the table's primary key.
Note: SQLite automatically creates an unique index for the 'key' column.
The index may get fragmented with lots of insertions/updates/deletions
therefore it is recommended to use reindex() when searches becomes
gradually slower.
'''
self.con.execute('create table data (key PRIMARY KEY,value)')
self.con.commit()
def __getitem__(self, key):
'''Return value for specified key'''
row = self.con.execute('select value from data where key=?',
(key, )).fetchone()
if not row:
raise KeyError(key)
return self.pickle_loads(row[0])
def __setitem__(self, key, value):
'''Set value at specified key'''
value = self.pickle_dumps(value)
self.con.execute('insert or replace into data (key, value) '
'values (?,?)', (key, value))
self.con.commit()
def __delitem__(self, key):
'''Delete item (key-value pair) at specified key'''
if key in self:
self.con.execute('delete from data where key=?', (key, ))
self.con.commit()
else:
raise KeyError
def __iter__(self):
'''Return iterator over keys'''
return self._iterquery(self.con.execute('select key from data'),
single_value=True)
def __len__(self):
'''Return the number of stored items'''
cursor = self.con.execute('select count() from data')
return cursor.fetchone()[0]
@staticmethod
def _iterquery(cursor, single_value=False):
'''Return iterator over query result with pre-fetching of items in
set sizes determined by SQLite backend'''
rows = True
while rows:
rows = cursor.fetchmany()
for row in rows:
if single_value:
yield row[0]
else:
yield row
def iterkeys(self):
'''Return iterator of all keys in the database'''
return self.__iter__()
def itervalues(self):
'''Return iterator of all values in the database'''
it = self._iterquery(self.con.execute('select value from data'),
single_value=True)
return iter(self.pickle_loads(x) for x in it)
def iteritems(self):
'''Return iterator of all key-value pairs in the database'''
it = self._iterquery(self.con.execute('select key, value from data'))
return iter((x[0], self.pickle_loads(x[1])) for x in it)
def keys(self):
'''Return all keys in the database'''
return [row[0]
for row in self.con.execute('select key from data').fetchall()]
def items(self):
'''Return all key-value pairs in the database'''
values = self.con.execute('select key, value from data').fetchall()
return [(x[0], self.pickle_loads(x[1])) for x in values]
def clear(self):
'''Clear the database for all key-value pairs, and free up unsused
disk space.
'''
self.con.execute('drop table data')
self.vacuum()
self._create_table()
def _update(self, items):
'''Perform the SQL query of updating items (list of key-value pairs)'''
items = [(k, self.pickle_dumps(v)) for k, v in items]
self.con.executemany('insert or replace into data (key, value)'
' values (?, ?)', items)
self.con.commit()
def update(self, items=None, **kwds):
'''Updates key-value pairs in the database.
Items (key-value pairs) may be given by keyword assignments or using
the parameter 'items' a dict or list/tuple of items.
'''
if isinstance(items, dict):
self._update(list(items.items()))
elif isinstance(items, list) or isinstance(items, tuple):
self._update(items)
elif items:
# probably a generator
try:
self._update(list(items))
except TypeError:
raise ValueError(
'Could not interpret value of parameter `items` as a dict, list/tuple or iterator.')
if kwds:
self._update(list(kwds.items()))
def popitem(self):
'''Pop a key-value pair from the database. Returns the next key-value
pair which is then removed from the database.'''
res = self.con.execute('select key, value from data').fetchone()
if res:
key, value = res
else:
raise StopIteration
del self[key]
value = self.pickle_loads(value)
return key, value
def close(self):
'''Close database connection'''
self.con.close()
def vacuum(self):
'''Free unused disk space from the database file.
The operation has no effect if database is in memory.
Note: The operation can take some time to run (around a half second per
megabyte on the Linux box where SQLite is developed) and it can use up
to twice as much temporary disk space as the original file while it is
running.
'''
self.con.execute('vacuum')
self.con.commit()
def get(self, keys):
'''Get item(s) for the specified key or list of keys.
Items will be returned only for those keys that are defined. The
function will pass silently (i.e. not raise an error) if one or more of
the keys is not defined.'''
try:
keys = tuple(keys)
except TypeError:
# probably a single key (ie not an iterable)
keys = (keys,)
values = self.con.execute('select key, value from data where key in '
'%s' % (keys,)).fetchall()
return [(k, self.pickle_loads(v)) for k, v in values]
def remove(self, keys):
'''Removes item(s) for the specified key or list of keys.
The function will pass silently (i.e. not raise an error) if one or more
of the keys is not defined.'''
try:
keys = tuple(keys)
except TypeError:
# probably a single key (ie not an iterable)
keys = (keys,)
self.con.execute('delete from data where key in %s' % (keys,))
self.con.commit()
def reindex(self):
'''Delete and recreate key index.
Use this function if key lookup time becomes slower. This may happen as
the index will become fragmented with lots of
insertions/updates/deletions.'''
self.con.execute('reindex sqlite_autoindex_data_1')
self.con.commit()
def dbdict(filename, picklevalues=False):
'''Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. If filename is ':memory:' the database is created in
memory.
See the module's __doc__ string for an overview of the interface.
'''
return DbDict(filename, picklevalues)
def open(filename, picklevalues=False):
'''Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. If filename is ':memory:' the database is created in
memory.
See the module's __doc__ string for an overview of the interface.
'''
return DbDict(filename, picklevalues)
if __name__ == '__main__':
# Perform some tests
d = open(':memory:')
d[1] = 'test'
assert d[1] == 'test'
d[1] += '1'
assert d[1] == 'test1'
try:
assert d[2], 'Lookup did not fail on non-existent key'
except KeyError:
pass
# test len
assert len(d) == 1, 'Failed to count number of items'
# test clear
d.clear()
assert len(d) == 0, 'Database not cleared as expected'
# test with list of items as (key, value) pairs
range10 = list(range(10))
items = [(i, i) for i in range10]
d.update(items)
assert list(d.items()) == items, 'Failed to update using list'
d.clear()
# test with tuple of items as (key, value) pairs
d.update(tuple(items))
assert list(d.items()) == items, 'Failed to update using tuple'
d.clear()
# test with dict
d.update(dict(items))
assert list(d.items()) == items
d.clear()
# test with generator
d.update((i, i) for i in range10)
assert list(d.items()) == items, 'Failed to update using generator'
# check the std. dict methods
assert list(d.keys()) == range10
assert list(d.values()) == range10
assert list(d.items()) == items
#assert list(d.iterkeys()) == range10
#assert list(d.itervalues()) == range10
#assert list(d.iteritems()) == items
# test get
assert d.get(list(range(8, 12))) == items[-2:]
# test remove
d.remove(list(range(8, 10)))
assert len(d.get(list(range(8, 10)))
) == 0, 'Items not removed successfully'
d.clear()
# test with key,value pairs as parameters
d.update(foo=1, bar=2)
assert list(d.items()) == [('foo', 1), ('bar', 2)], \
'keyword assignment not successful'
# test popitem
while True:
try:
value = d.popitem()
assert value in [('foo', 1), ('bar', 2)], \
'Popitem not in expected result set'
except StopIteration:
break
# test setdefault
d.setdefault(10, 10)
assert d[10] == 10, 'Failed to set default value'
# test vacuum call (no assert)
d.reindex()
# test vacuum call (no assert, and call has no effect on an in memory db)
d.vacuum()
# test close call (assert is given reading from closed database)
d.close()
# try reading from a closed database
try:
d[1] = 1
raise AssertionError('Database not closed')
except sqlite3.ProgrammingError:
pass
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from workspace_tools.paths import *
from workspace_tools.data.support import *
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
PERIPHERALS = join(TEST_DIR, "peripherals")
BENCHMARKS_DIR = join(TEST_DIR, "benchmarks")
SD = join(TEST_DIR, "sd")
TMP102 = join(PERIPHERALS, 'TMP102')
AT30TSE75X = join(PERIPHERALS, 'AT30TSE75X')
"""
Wiring:
* Ground:
* LPC1*: p1
* KL25Z: GND
* Vout
* LPC1*: p40
* KL25Z: P3V3
* TMP102 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTC9, SCL=PTC8)
* MAXWSNENV: (SDA=TP6, SCL=TP5)
* digital_loop (Digital(In|Out|InOut), InterruptIn):
* Arduino headers: (D0 <-> D7)
* LPC1549: (D2 <-> D7)
* LPC1*: (p5 <-> p25 )
* KL25Z: (PTA5<-> PTC6)
* NUCLEO_F103RB: (PC_6 <-> PB_8)
* MAXWSNENV: (TP3 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7)
* VK_RZ_A1H: (P3_2 <-> P5_6)
* port_loop (Port(In|Out|InOut)):
* Arduino headers: (D0 <-> D7), (D1 <-> D6)
* LPC1*: (p5 <-> p25), (p6 <-> p26)
* KL25Z: (PTA5 <-> PTC6), (PTA4 <-> PTC5)
* NUCLEO_F103RB: (PC_6 <-> PB_8), (PC_5 <-> PB_9)
* MAXWSNENV: (TP1 <-> TP3), (TP2 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7), (P1_1 <-> P4_6)
* VK_RZ_A1H: (P3_2 <-> P5_6), (P3_7 <-> P5_1)
* analog_loop (AnalogIn, AnalogOut):
* Arduino headers: (A0 <-> A5)
* LPC1549: (A0 <-> D12)
* LPC1*: (p17 <-> p18 )
* KL25Z: (PTE30 <-> PTC2)
* analog_pot (AnalogIn):
* Arduino headers: (A0, A1)
* VK_RZ_A1H: (AN0, AN1)
* SD (SPI):
* LPC1*: (mosi=p11 , miso=p12 , sclk=p13 , cs=p14 )
* KL25Z: (mosi=PTD2, miso=PTD3, sclk=PTD1, cs=PTD0)
* MMA7660 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* i2c_loop:
* LPC1768: (p28 <-> p9), (p27 <-> p10)
* i2c_eeprom:
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTE0, SCL=PTE1)
* VK_RZ_A1H:(SDA=P1_1, SCL=P1_0)
* can_transceiver:
* LPC1768: (RX=p9, TX=p10)
* LPC1549: (RX=D9, TX=D8)
* LPC4088: (RX=p9, TX=p10)
* VK_RZ_A1H:(RX=P5_9, TX=P5_10)
"""
TESTS = [
# Automated MBED tests
{
"id": "MBED_A1", "description": "Basic",
"source_dir": join(TEST_DIR, "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "mbed", "file"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_A3", "description": "C++ STL",
"source_dir": join(TEST_DIR, "mbed", "stl"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_A4", "description": "I2C TMP102",
"source_dir": join(TEST_DIR, "mbed", "i2c_TMP102"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, TMP102],
"automated": True,
"peripherals": ["TMP102"]
},
{
"id": "MBED_AT30TSE75X", "description": "I2C Temperature Sensor / EEPROM",
"source_dir": join(TEST_DIR, "mbed", "i2c_at30tse75x"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, AT30TSE75X],
"automated": False,
"peripherals": ["AT30TSE75X"]
},
{
"id": "MBED_A5", "description": "DigitalIn DigitalOut",
"source_dir": join(TEST_DIR, "mbed", "digitalin_digitalout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A6", "description": "DigitalInOut",
"source_dir": join(TEST_DIR, "mbed", "digitalinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A7", "description": "InterruptIn",
"source_dir": join(TEST_DIR, "mbed", "interruptin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A8", "description": "Analog",
"source_dir": join(TEST_DIR, "mbed", "analog"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["analog_loop"],
"mcu": ["LPC1768", "LPC2368", "LPC2460", "KL25Z", "K64F", "K22F", "LPC4088", "LPC1549",
"NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_F302R8", "NUCLEO_F303K8", "NUCLEO_F303RE",
"NUCLEO_F334R8", "NUCLEO_L053R8", "NUCLEO_L073RZ", "NUCLEO_L152RE",
"NUCLEO_F410RB", "NUCLEO_F411RE", "NUCLEO_F446RE", "DISCO_F407VG", "DISCO_F746NG",
"ARCH_MAX", "MAX32600MBED", "MOTE_L152RC", "B96B_F446VE"]
},
{
"id": "MBED_A9", "description": "Serial Echo at 115200",
"source_dir": join(TEST_DIR, "mbed", "echo"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "echo"
},
{
"id": "MBED_A10", "description": "PortOut PortIn",
"source_dir": join(TEST_DIR, "mbed", "portout_portin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A11", "description": "PortInOut",
"source_dir": join(TEST_DIR, "mbed", "portinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A12", "description": "SD File System",
"source_dir": join(TEST_DIR, "mbed", "sd"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "MBED_A13", "description": "I2C MMA7660 accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA7660"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA7660')],
"automated": True,
"peripherals": ["MMA7660"]
},
{
"id": "MBED_A14", "description": "I2C Master",
"source_dir": join(TEST_DIR, "mbed", "i2c_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A15", "description": "I2C Slave",
"source_dir": join(TEST_DIR, "mbed", "i2c_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A16", "description": "SPI Master",
"source_dir": join(TEST_DIR, "mbed", "spi_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A17", "description": "SPI Slave",
"source_dir": join(TEST_DIR, "mbed", "spi_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A18", "description": "Interrupt vector relocation",
"source_dir": join(TEST_DIR, "mbed", "vtor_reloc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768"],
"automated": True,
},
{
"id": "MBED_A19", "description": "I2C EEPROM read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 15,
},
{
"id": "MBED_A20", "description": "I2C master/slave test",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768", "RZ_A1H"],
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_A21", "description": "Call function before main (mbed_main)",
"source_dir": join(TEST_DIR, "mbed", "call_before_main"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A22", "description": "SPIFI for LPC4088 (test 1)",
"source_dir": join(TEST_DIR, "mbed", "spifi1"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A23", "description": "SPIFI for LPC4088 (test 2)",
"source_dir": join(TEST_DIR, "mbed", "spifi2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A24", "description": "Serial echo with RTS/CTS flow control",
"source_dir": join(TEST_DIR, "mbed", "echo_flow_control"),
"dependencies": [MBED_LIBRARIES],
"automated": "True",
"host_test": "echo_flow_control",
"mcu": ["LPC1768"],
"peripherals": ["extra_serial"]
},
{
"id": "MBED_A25", "description": "I2C EEPROM line read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom_line"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A26", "description": "AnalogIn potentiometer test",
"source_dir": join(TEST_DIR, "mbed", "analog_pot"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["analog_pot"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A27", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 20,
"peripherals": ["can_transceiver"],
"mcu": ["LPC1549", "LPC1768","B96B_F446VE", "VK_RZ_A1H"],
},
{
"id": "MBED_BLINKY", "description": "Blinky",
"source_dir": join(TEST_DIR, "mbed", "blinky"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUS", "description": "Blinky BUS",
"source_dir": join(TEST_DIR, "mbed", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
"duration": 15,
},
{
"id": "MBED_BUSOUT", "description": "BusOut",
"source_dir": join(TEST_DIR, "mbed", "bus_out"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 15,
},
# Size benchmarks
{
"id": "BENCHMARK_1", "description": "Size (c environment)",
"source_dir": join(BENCHMARKS_DIR, "cenv"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_2", "description": "Size (float math)",
"source_dir": join(BENCHMARKS_DIR, "float_math"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_3", "description": "Size (printf)",
"source_dir": join(BENCHMARKS_DIR, "printf"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_4", "description": "Size (mbed libs)",
"source_dir": join(BENCHMARKS_DIR, "mbed"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_5", "description": "Size (all)",
"source_dir": join(BENCHMARKS_DIR, "all"),
"dependencies": [MBED_LIBRARIES]
},
# performance related tests
{
"id": "PERF_1", "description": "SD Stdio R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_2", "description": "SD FileHandle R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fhandle"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_3", "description": "SD FatFS R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fatfs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
# Not automated MBED tests
{
"id": "MBED_1", "description": "I2C SRF08",
"source_dir": join(TEST_DIR, "mbed", "i2c_SRF08"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'SRF08')],
"peripherals": ["SRF08"]
},
{
"id": "MBED_2", "description": "stdio",
"source_dir": join(TEST_DIR, "mbed", "stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
#"host_test": "stdio_auto"
},
{
"id": "MBED_3", "description": "PortOut",
"source_dir": join(TEST_DIR, "mbed", "portout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_4", "description": "Sleep",
"source_dir": join(TEST_DIR, "mbed", "sleep"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 30,
"mcu": ["LPC1768", "LPC11U24", "LPC4088","LPC4088_DM","NRF51822", "LPC11U68"]
},
{
"id": "MBED_5", "description": "PWM",
"source_dir": join(TEST_DIR, "mbed", "pwm"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB]
},
{
"id": "MBED_6", "description": "SW Reset",
"source_dir": join(TEST_DIR, "mbed", "reset"),
"dependencies": [MBED_LIBRARIES],
"duration": 15
},
{
"id": "MBED_7", "description": "stdio benchmark",
"source_dir": join(TEST_DIR, "mbed", "stdio_benchmark"),
"dependencies": [MBED_LIBRARIES],
"duration": 40
},
{
"id": "MBED_8", "description": "SPI",
"source_dir": join(TEST_DIR, "mbed", "spi"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_9", "description": "Sleep Timeout",
"source_dir": join(TEST_DIR, "mbed", "sleep_timeout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_10", "description": "Hello World",
"source_dir": join(TEST_DIR, "mbed", "hello"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "hello_auto",
},
{
"id": "MBED_11", "description": "Ticker Int",
"source_dir": join(TEST_DIR, "mbed", "ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"duration": 20,
},
{
"id": "MBED_12", "description": "C++",
"source_dir": join(TEST_DIR, "mbed", "cpp"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_13", "description": "Heap & Stack",
"source_dir": join(TEST_DIR, "mbed", "heap_and_stack"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_14", "description": "Serial Interrupt",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_15", "description": "RPC",
"source_dir": join(TEST_DIR, "mbed", "rpc"),
"dependencies": [MBED_LIBRARIES, join(LIB_DIR, "rpc"), TEST_MBED_LIB],
"automated": False,
"mcu": ["LPC1768"]
},
{
"id": "MBED_16", "description": "RTC",
"source_dir": join(TEST_DIR, "mbed", "rtc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"exclude_mcu": ["NRF51822", "NRF51822_BOOT", "NRF51822_OTA", "NRF51822_Y5_MBUG",
"NRF51_DK", "NRF51_DK_BOOT", "NRF51_DK_OTA",
"NRF51_MICROBIT", "NRF51_MICROBIT_B", "NRF51_MICROBIT_BOOT",
"NRF51_MICROBIT_B_BOOT", "NRF51_MICROBIT_B_OTA", "NRF51_MICROBIT_OTA",
"HRM1017", "HRM1017_BOOT", "HRM1701_OTA",
"TY51822R3", "TY51822R3_BOOT", "TY51822R3_OTA",
"NRF15_DONGLE", "NRF15_DONGLE_BOOT", "NRF15_DONGLE_OTA",
"ARCH_BLE", "ARCH_BLE_BOOT", "ARCH_BLE_OTA",
"ARCH_LINK", "ARCH_LINK_BOOT", "ARCH_LINK_OTA",
"RBLAB_BLENANO", "RBLAB_BLENANO_BOOT", "RBLAB_BLENANO_OTA",
"RBLAB_NRF51822", "RBLAB_NRF51822_BOOT", "RBLAB_NRF51822_OTA",
"SEEED_TINY_BLE", "SEEED_TINY_BLE_BOOT", "SEEED_TINY_BLE_OTA",
"WALLBOT_BLE", "WALLBOT_BLE_BOOT", "WALLBOT_BLE_OTA",
"DELTA_DFCM_NNN40", "DELTA_DFCM_NNN40_BOOT", "DELTA_DFCM_NNN40_OTA",
"LPC1114"],
#"host_test": "rtc_auto",
"duration": 15
},
{
"id": "MBED_17", "description": "Serial Interrupt 2",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_18", "description": "Local FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_19", "description": "SD FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir_sd"),
"dependencies": [MBED_LIBRARIES, FS_LIBRARY],
"peripherals": ["SD"]
},
{
"id": "MBED_20", "description": "InterruptIn 2",
"source_dir": join(TEST_DIR, "mbed", "interruptin_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_21", "description": "freopen Stream",
"source_dir": join(TEST_DIR, "mbed", "freopen"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_22", "description": "Semihost",
"source_dir": join(TEST_DIR, "mbed", "semihost"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_23", "description": "Ticker Int us",
"source_dir": join(TEST_DIR, "mbed", "ticker_2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_24", "description": "Timeout Int us",
"source_dir": join(TEST_DIR, "mbed", "timeout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_25", "description": "Time us",
"source_dir": join(TEST_DIR, "mbed", "time_us"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_26", "description": "Integer constant division",
"source_dir": join(TEST_DIR, "mbed", "div"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_27", "description": "SPI ADXL345",
"source_dir": join(TEST_DIR, "mbed", "spi_ADXL345"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'ADXL345')],
"peripherals": ["ADXL345"]
},
{
"id": "MBED_28", "description": "Interrupt chaining (InterruptManager)",
"source_dir": join(TEST_DIR, "mbed", "interrupt_chaining"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_29", "description": "CAN network test",
"source_dir": join(TEST_DIR, "mbed", "can"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE"]
},
{
"id": "MBED_30", "description": "CAN network test using interrupts",
"source_dir": join(TEST_DIR, "mbed", "can_interrupt"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE"]
},
{
"id": "MBED_31", "description": "PWM LED test",
"source_dir": join(TEST_DIR, "mbed", "pwm_led"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_32", "description": "Pin toggling",
"source_dir": join(TEST_DIR, "mbed", "pin_toggling"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_33", "description": "C string operations",
"source_dir": join(TEST_DIR, "mbed", "cstring"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 10,
"automated": False,
},
{
"id": "MBED_34", "description": "Ticker Two callbacks",
"source_dir": join(TEST_DIR, "mbed", "ticker_3"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_35", "description": "SPI C12832 display",
"source_dir": join(TEST_DIR, "mbed", "spi_C12832"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'C12832')],
"peripherals": ["C12832"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_36", "description": "WFI correct behavior",
"source_dir": join(TEST_DIR, "mbed", "wfi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False
},
{
"id": "MBED_37", "description": "Serial NC RX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_rx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_38", "description": "Serial NC TX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_tx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
# CMSIS RTOS tests
{
"id": "CMSIS_RTOS_1", "description": "Basic",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_2", "description": "Mutex",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_3", "description": "Semaphore",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_4", "description": "Signals",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_5", "description": "Queue",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_6", "description": "Mail",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_8", "description": "ISR",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
# mbed RTOS tests
{
"id": "RTOS_1", "description": "Basic thread",
"source_dir": join(TEST_DIR, "rtos", "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_2", "description": "Mutex resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_3", "description": "Semaphore resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_4", "description": "Signals messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_5", "description": "Queue messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_6", "description": "Mail messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "mbed", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_8", "description": "ISR (Queue)",
"source_dir": join(TEST_DIR, "rtos", "mbed", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "MOTE_L152RC", "B96B_F446VE"],
},
{
"id": "RTOS_9", "description": "SD File write-read",
"source_dir": join(TEST_DIR, "rtos", "mbed", "file"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"],
"mcu": ["LPC1768", "LPC11U24", "LPC812", "KL25Z",
"KL05Z", "K64F", "KL46Z", "RZ_A1H",
"DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F401RE", "NUCLEO_F410RB", "DISCO_F469NI"],
},
# Networking Tests
{
"id": "NET_1", "description": "TCP client hello world",
"source_dir": join(TEST_DIR, "net", "helloworld", "tcpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_2", "description": "NIST Internet Time Service",
"source_dir": join(TEST_DIR, "net", "helloworld", "udpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_3", "description": "TCP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "tcpecho_server_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_4", "description": "TCP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_5", "description": "UDP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "udp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_server_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_6", "description": "UDP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "udp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_7", "description": "HTTP client hello world",
"source_dir": join(TEST_DIR, "net", "protocols", "HTTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
"peripherals": ["ethernet"],
},
{
"id": "NET_8", "description": "NTP client",
"source_dir": join(TEST_DIR, "net", "protocols", "NTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_9", "description": "Multicast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_10", "description": "Multicast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_11", "description": "Broadcast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_12", "description": "Broadcast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_13", "description": "TCP client echo loop",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client_loop"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_14", "description": "UDP PHY/Data link layer",
"source_dir": join(TEST_DIR, "net", "echo", "udp_link_layer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"automated": False,
"duration": 20,
"host_test": "udp_link_layer_auto",
"peripherals": ["ethernet"],
},
# u-blox tests
{
"id": "UB_1", "description": "u-blox USB modem: HTTP client",
"source_dir": [join(TEST_DIR, "net", "cellular", "http", "ubloxusb"), join(TEST_DIR, "net", "cellular", "http", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "UB_2", "description": "u-blox USB modem: SMS test",
"source_dir": [join(TEST_DIR, "net", "cellular", "sms", "ubloxusb"), join(TEST_DIR, "net", "cellular", "sms", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
# USB Tests
{
"id": "USB_1", "description": "Mouse",
"source_dir": join(TEST_DIR, "usb", "device", "basic"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_2", "description": "Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_3", "description": "Mouse_Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_4", "description": "Serial Port",
"source_dir": join(TEST_DIR, "usb", "device", "serial"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "USB_5", "description": "Generic HID",
"source_dir": join(TEST_DIR, "usb", "device", "raw_hid"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_6", "description": "MIDI",
"source_dir": join(TEST_DIR, "usb", "device", "midi"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_7", "description": "AUDIO",
"source_dir": join(TEST_DIR, "usb", "device", "audio"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
# CMSIS DSP
{
"id": "CMSIS_DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "cmsis", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# mbed DSP
{
"id": "DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "mbed", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# KL25Z
{
"id": "KL25Z_1", "description": "LPTMR",
"source_dir": join(TEST_DIR, "KL25Z", "lptmr"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_2", "description": "PIT",
"source_dir": join(TEST_DIR, "KL25Z", "pit"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_3", "description": "TSI Touch Sensor",
"source_dir": join(TEST_DIR, "mbed", "tsi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'TSI')],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_4", "description": "RTC",
"source_dir": join(TEST_DIR, "KL25Z", "rtc"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_5", "description": "MMA8451Q accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA8451Q"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA8451Q')],
"mcu": ["KL25Z", "KL05Z", "KL46Z", "K20D50M"],
"automated": True,
"duration": 15,
},
# Examples
{
"id": "EXAMPLE_1", "description": "/dev/null",
"source_dir": join(TEST_DIR, "mbed", "dev_null"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "dev_null_auto",
},
{
"id": "EXAMPLE_2", "description": "FS + RTOS",
"source_dir": join(TEST_DIR, "mbed", "fs"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
},
# CPPUTEST Library provides Unit testing Framework
#
# To write TESTs and TEST_GROUPs please add CPPUTEST_LIBRARY to 'dependencies'
#
# This will also include:
# 1. test runner - main function with call to CommandLineTestRunner::RunAllTests(ac, av)
# 2. Serial console object to print test result on serial port console
#
# Unit testing with cpputest library
{
"id": "UT_1", "description": "Basic",
"source_dir": join(TEST_DIR, "utest", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "utest", "semihost_fs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "UT_3", "description": "General tests",
"source_dir": join(TEST_DIR, "utest", "general"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_BUSIO", "description": "BusIn BusOut",
"source_dir": join(TEST_DIR, "utest", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_I2C_EEPROM_ASYNCH", "description": "I2C Asynch eeprom",
"source_dir": join(TEST_DIR, "utest", "i2c_eeprom_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SERIAL_ASYNCH", "description": "Asynch serial test (req 2 serial peripherals)",
"source_dir": join(TEST_DIR, "utest", "serial_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SPI_ASYNCH", "description": "Asynch spi test",
"source_dir": join(TEST_DIR, "utest", "spi_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_LP_TICKER", "description": "Low power ticker test",
"source_dir": join(TEST_DIR, "utest", "lp_ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
# Tests used for target information purposes
{
"id": "DTCT_1", "description": "Simple detect test",
"source_dir": join(TEST_DIR, "mbed", "detect"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "detect_auto",
},
]
# Group tests with the same goals into categories
GROUPS = {
"core": ["MBED_A1", "MBED_A2", "MBED_A3", "MBED_A18"],
"digital_io": ["MBED_A5", "MBED_A6", "MBED_A7", "MBED_A10", "MBED_A11"],
"analog_io": ["MBED_A8"],
"i2c": ["MBED_A19", "MBED_A20"],
"spi": ["MBED_A12"],
}
GROUPS["rtos"] = [test["id"] for test in TESTS if test["id"].startswith("RTOS_")]
GROUPS["net"] = [test["id"] for test in TESTS if test["id"].startswith("NET_")]
GROUPS["automated"] = [test["id"] for test in TESTS if test.get("automated", False)]
# Look for 'TEST_GROUPS' in private_settings.py and update the GROUPS dictionary
# with the information in test_groups if found
try:
from workspace_tools.private_settings import TEST_GROUPS
except:
TEST_GROUPS = {}
GROUPS.update(TEST_GROUPS)
class Test:
DEFAULTS = {
#'mcu': None,
'description': None,
'dependencies': None,
'duration': 10,
'host_test': 'host_test',
'automated': False,
'peripherals': None,
#'supported': None,
'source_dir': None,
'extra_files': None
}
def __init__(self, n):
self.n = n
self.__dict__.update(Test.DEFAULTS)
self.__dict__.update(TESTS[n])
def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
def get_description(self):
if self.description:
return self.description
else:
return self.id
def __cmp__(self, other):
return cmp(self.n, other.n)
def __str__(self):
return "[%3d] %s: %s" % (self.n, self.id, self.get_description())
def __getitem__(self, key):
if key == "id": return self.id
elif key == "mcu": return self.mcu
elif key == "exclude_mcu": return self.exclude_mcu
elif key == "dependencies": return self.dependencies
elif key == "description": return self.description
elif key == "duration": return self.duration
elif key == "host_test": return self.host_test
elif key == "automated": return self.automated
elif key == "peripherals": return self.peripherals
elif key == "supported": return self.supported
elif key == "source_dir": return self.source_dir
elif key == "extra_files": return self.extra_files
else:
return None
TEST_MAP = dict([(test['id'], Test(i)) for i, test in enumerate(TESTS)])
|
|
from collections import OrderedDict
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import call
from mock import Mock
from mock import patch
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.exceptions import ParseError
from rest_framework.exceptions import PermissionDenied
from rest_framework.request import Request
from rest_framework.test import APITestCase
from kolibri.core.auth.constants.morango_sync import PROFILE_FACILITY_DATA
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityDataset
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.test.test_api import FacilityUserFactory
from kolibri.core.device.models import DevicePermissions
from kolibri.core.device.models import DeviceSettings
from kolibri.core.discovery.utils.network.errors import NetworkLocationNotFound
from kolibri.core.tasks.api import prepare_peer_sync_job
from kolibri.core.tasks.api import prepare_sync_job
from kolibri.core.tasks.api import prepare_sync_task
from kolibri.core.tasks.api import ResourceGoneError
from kolibri.core.tasks.api import validate_facility
from kolibri.core.tasks.api import validate_peer_sync_job
from kolibri.core.tasks.api import validate_sync_task
from kolibri.core.tasks.exceptions import JobNotFound
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import State
DUMMY_PASSWORD = "password"
fake_job_defaults = dict(
job_id=None,
state=None,
exception="",
traceback="",
percentage_progress=0,
cancellable=False,
extra_metadata=dict(),
func=lambda: None,
)
def fake_job(**kwargs):
fake_data = fake_job_defaults.copy()
fake_data.update(kwargs)
return Mock(spec=Job, **fake_data)
class BaseAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
DeviceSettings.objects.create(is_provisioned=True)
cls.facility = Facility.objects.create(name="facility")
cls.superuser = FacilityUser.objects.create(
username="superuser", facility=cls.facility
)
cls.superuser.set_password(DUMMY_PASSWORD)
cls.superuser.save()
DevicePermissions.objects.create(user=cls.superuser, is_superuser=True)
def setUp(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
@patch("kolibri.core.tasks.api.priority_queue")
@patch("kolibri.core.tasks.api.queue")
class TaskAPITestCase(BaseAPITestCase):
def test_task_cancel(self, queue_mock, priority_queue_mock):
queue_mock.fetch_job.return_value = fake_job(state=State.CANCELED)
response = self.client.post(
reverse("kolibri:core:task-canceltask"), {"task_id": "1"}, format="json"
)
self.assertEqual(response.data, {})
def test_task_cancel_no_task(self, queue_mock, priority_queue_mock):
queue_mock.cancel.side_effect = JobNotFound()
response = self.client.post(
reverse("kolibri:core:task-canceltask"), {"task_id": "1"}, format="json"
)
self.assertEqual(response.status_code, 200)
def test_task_get_no_task(self, queue_mock, priority_queue_mock):
queue_mock.fetch_job.side_effect = JobNotFound()
priority_queue_mock.fetch_job.side_effect = JobNotFound()
response = self.client.get(
reverse("kolibri:core:task-detail", kwargs={"pk": "1"}),
{"task_id": "1"},
format="json",
)
self.assertEqual(response.status_code, 404)
def test_tasks_clearable_flag(self, queue_mock, priority_queue_mock):
queue_mock.jobs = [
fake_job(state=state)
for state in [
# not clearable
State.SCHEDULED,
State.QUEUED,
State.RUNNING,
State.CANCELING,
# clearable
State.FAILED,
State.CANCELED,
State.COMPLETED,
]
]
priority_queue_mock.jobs = []
response = self.client.get(reverse("kolibri:core:task-list"))
def assert_clearable(index, expected):
self.assertEqual(response.data[index]["clearable"], expected)
for i in [0, 1, 2, 3]:
assert_clearable(i, False)
for i in [4, 5, 6]:
assert_clearable(i, True)
def test_restart_task(self, queue_mock, priority_queue_mock):
queue_mock.restart_job.return_value = 1
queue_mock.fetch_job.return_value = fake_job(state=State.QUEUED, job_id=1)
response = self.client.post(
reverse("kolibri:core:task-restarttask"), {"task_id": "1"}, format="json"
)
expected_response = {
"status": "QUEUED",
"exception": "",
"traceback": "",
"percentage": 0,
"id": 1,
"cancellable": False,
"clearable": False,
}
self.assertDictEqual(response.data, expected_response)
@patch("kolibri.core.tasks.api.priority_queue")
@patch("kolibri.core.tasks.api.queue")
class TaskAPIPermissionsTestCase(APITestCase):
def setUp(self):
DeviceSettings.objects.create(is_provisioned=True)
self.facility = Facility.objects.create(name="facility")
admin = FacilityUserFactory(facility=self.facility)
self.facility.add_admin(admin)
self.client.login(username=admin.username, password=DUMMY_PASSWORD)
def test_exportlogs_permissions(self, queue_mock, priority_queue_mock):
with patch("kolibri.core.tasks.api._job_to_response", return_value={}):
response = self.client.post(
reverse("kolibri:core:task-startexportlogcsv"),
{"facility": self.facility.pk},
format="json",
)
self.assertEqual(response.status_code, 200)
def test_list_permissions(self, queue_mock, priority_queue_mock):
with patch("kolibri.core.tasks.api._job_to_response", return_value={}):
response = self.client.get(reverse("kolibri:core:task-list"), format="json")
self.assertEqual(response.status_code, 200)
@patch("kolibri.core.tasks.api.facility_queue")
class FacilityTaskAPITestCase(BaseAPITestCase):
def assertJobResponse(self, job_data, response):
id = job_data.get("job_id", fake_job_defaults.get("job_id"))
self.assertEqual(id, response.data.get("id"))
status = job_data.get("state", fake_job_defaults.get("state"))
self.assertEqual(status, response.data.get("status"))
exception = job_data.get("exception", fake_job_defaults.get("exception"))
self.assertEqual(exception, response.data.get("exception"))
traceback = job_data.get("traceback", fake_job_defaults.get("traceback"))
self.assertEqual(traceback, response.data.get("traceback"))
percentage = job_data.get(
"percentage_progress", fake_job_defaults.get("percentage_progress")
)
self.assertEqual(percentage, response.data.get("percentage"))
cancellable = job_data.get("cancellable", fake_job_defaults.get("cancellable"))
self.assertEqual(cancellable, response.data.get("cancellable"))
cancellable = job_data.get("cancellable", fake_job_defaults.get("cancellable"))
self.assertEqual(cancellable, response.data.get("cancellable"))
extra = job_data.get("extra_metadata", fake_job_defaults.get("extra_metadata"))
for key, value in extra.items():
self.assertEqual(
value,
response.data.get(key),
"Extra metadata key `{}` doesn't match".format(key),
)
def test_list_unprovisioned(self, facility_queue):
facility_queue.jobs.return_value = []
response = self.client.get(
reverse("kolibri:core:facilitytask-list"), format="json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_list_provisioned(self, facility_queue):
response = self.client.get(
reverse("kolibri:core:facilitytask-list"), format="json"
)
self.assertEqual(response.status_code, 200)
def test_startdataportalsync(self, facility_queue):
user = self.superuser
facility_queue.enqueue.return_value = 123
fake_job_data = dict(
job_id=123,
state="testing",
percentage_progress=42,
cancellable=False,
extra_metadata=dict(this_is_extra=True),
)
facility_queue.fetch_job.return_value = fake_job(**fake_job_data)
response = self.client.post(
reverse("kolibri:core:facilitytask-startdataportalsync"),
{"facility": self.facility.id, "facility_name": "my facility name"},
format="json",
)
self.assertEqual(response.status_code, 200)
self.assertJobResponse(fake_job_data, response)
facility_queue.enqueue.assert_called_with(
call_command,
"sync",
facility=self.facility.id,
chunk_size=200,
noninteractive=True,
extra_metadata=dict(
facility=self.facility.id,
facility_name="my facility name",
started_by=user.pk,
started_by_username=user.username,
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
type="SYNCDATAPORTAL",
),
track_progress=True,
cancellable=False,
)
def test_startdataportalbulksync(self, facility_queue):
user = self.superuser
facility2 = Facility.objects.create(name="facility 2")
facility3 = Facility.objects.create(name="facility 3")
dataset_ids = [facility2.dataset_id, facility3.dataset_id]
FacilityDataset.objects.filter(pk__in=dataset_ids).update(registered=True)
fake_job_data = dict(
job_id=123,
state="testing",
percentage_progress=42,
cancellable=False,
extra_metadata=dict(this_is_extra=True),
)
facility_queue.fetch_job.return_value = fake_job(**fake_job_data)
response = self.client.post(
reverse("kolibri:core:facilitytask-startdataportalbulksync"), format="json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(2, facility_queue.enqueue.call_count)
facility_queue.enqueue.assert_has_calls(
[
call(
call_command,
"sync",
facility=facility2.id,
chunk_size=200,
noninteractive=True,
extra_metadata=dict(
facility=facility2.id,
facility_name="facility 2",
started_by=user.pk,
started_by_username=user.username,
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
type="SYNCDATAPORTAL",
),
track_progress=True,
cancellable=False,
),
call(
call_command,
"sync",
facility=facility3.id,
chunk_size=200,
noninteractive=True,
extra_metadata=dict(
facility=facility3.id,
facility_name="facility 3",
started_by=user.pk,
started_by_username=user.username,
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
type="SYNCDATAPORTAL",
),
track_progress=True,
cancellable=False,
),
],
any_order=True,
)
@patch("kolibri.core.tasks.api.validate_peer_sync_job")
@patch("kolibri.core.tasks.api.prepare_peer_sync_job")
@patch("kolibri.core.tasks.api.get_client_and_server_certs")
def test_startpeerfacilityimport(
self,
get_client_and_server_certs,
prepare_peer_sync_job,
validate_peer_sync_job,
facility_queue,
):
user = self.superuser
extra_metadata = dict(
facility=self.facility.id,
started_by=user.pk,
started_by_username=user.username,
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
type="SYNCPEER/PULL",
facility_name="",
device_name="",
device_id="",
baseurl="https://some.server.test/extra/stuff",
)
request_data = OrderedDict(baseurl="https://some.server.test/")
request_data["facility"] = self.facility.id
request_data["username"] = ""
request_data["password"] = ""
prepared_data = dict(
baseurl="https://some.server.test/",
facility=self.facility.id,
no_push=True,
chunk_size=200,
noninteractive=True,
extra_metadata=extra_metadata,
track_progress=True,
cancellable=False,
)
validate_peer_sync_job.return_value = request_data.copy()
prepare_peer_sync_job.return_value = prepared_data.copy()
facility_queue.enqueue.return_value = 123
fake_job_data = dict(
job_id=123,
state="testing",
percentage_progress=42,
cancellable=False,
extra_metadata=dict(this_is_extra=True),
)
fake_job_data["extra_metadata"].update(extra_metadata)
facility_queue.fetch_job.return_value = fake_job(**fake_job_data)
req_data = dict(
facility=self.facility.id, baseurl="https://some.server.test/extra/stuff"
)
response = self.client.post(
reverse("kolibri:core:facilitytask-startpeerfacilityimport"),
req_data,
format="json",
)
self.assertEqual(response.status_code, 200)
self.assertJobResponse(fake_job_data, response)
prepare_peer_sync_job.assert_has_calls(
[
call(
*request_data.keys(),
no_push=True,
no_provision=True,
extra_metadata=extra_metadata
)
]
)
facility_queue.enqueue.assert_called_with(call_command, "sync", **prepared_data)
@patch("kolibri.core.tasks.api.prepare_peer_sync_job")
@patch("kolibri.core.tasks.api.validate_peer_sync_job")
def test_startpeerfacilitysync(
self, validate_peer_sync_job, prepare_peer_sync_job, facility_queue
):
user = self.superuser
extra_metadata = dict(
facility=self.facility.id,
started_by=user.pk,
started_by_username=user.username,
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
type="SYNCPEER/FULL",
facility_name="",
device_name="",
device_id="",
baseurl="https://some.server.test/extra/stuff",
)
request_data = OrderedDict(baseurl="https://some.server.test/")
request_data["facility"] = self.facility.id
request_data["username"] = ""
request_data["password"] = ""
prepared_data = OrderedDict(
baseurl="https://some.server.test/",
facility=self.facility.id,
chunk_size=200,
noninteractive=True,
extra_metadata=extra_metadata,
track_progress=True,
cancellable=False,
)
validate_peer_sync_job.return_value = request_data.copy()
prepare_peer_sync_job.return_value = prepared_data.copy()
facility_queue.enqueue.return_value = 123
fake_job_data = dict(
job_id=123,
state="testing",
percentage_progress=42,
cancellable=False,
extra_metadata=dict(this_is_extra=True),
)
fake_job_data["extra_metadata"].update(extra_metadata)
facility_queue.fetch_job.return_value = fake_job(**fake_job_data)
req_data = dict(
facility=self.facility.id, baseurl="https://some.server.test/extra/stuff"
)
response = self.client.post(
reverse("kolibri:core:facilitytask-startpeerfacilitysync"),
req_data,
format="json",
)
self.assertEqual(response.status_code, 200)
self.assertJobResponse(fake_job_data, response)
prepare_peer_sync_job.assert_has_calls(
[call(*request_data.keys(), extra_metadata=extra_metadata)]
)
facility_queue.enqueue.assert_called_with(call_command, "sync", **prepared_data)
def test_startdeletefacility(self, facility_queue):
user = self.superuser
facility2 = Facility.objects.create(name="facility2")
extra_metadata = dict(
facility=facility2.id,
facility_name=facility2.name,
started_by=user.pk,
started_by_username=user.username,
type="DELETEFACILITY",
)
prepared_data = dict(
facility=facility2.id,
noninteractive=True,
extra_metadata=extra_metadata,
track_progress=True,
cancellable=False,
)
facility_queue.enqueue.return_value = 123
fake_job_data = dict(
job_id=123,
state="testing",
cancellable=False,
extra_metadata=dict(this_is_extra=True),
)
fake_job_data["extra_metadata"].update(extra_metadata)
facility_queue.fetch_job.return_value = fake_job(**fake_job_data)
response = self.client.post(
reverse("kolibri:core:facilitytask-startdeletefacility"),
dict(facility=facility2.id),
format="json",
)
self.assertEqual(response.status_code, 200)
self.assertJobResponse(fake_job_data, response)
facility_queue.enqueue.assert_called_with(
call_command, "deletefacility", **prepared_data
)
def test_startdeletefacility__sole_facility(self, facility_queue):
response = self.client.post(
reverse("kolibri:core:facilitytask-startdeletefacility"),
dict(facility=self.facility.id),
format="json",
)
self.assertEqual(response.status_code, 400)
self.assertEqual("SOLE_FACILITY", response.data.get("code"))
def test_startdeletefacility__not_superuser(self, facility_queue):
facility1 = Facility.objects.create(name="facility1")
Facility.objects.create(name="facility2")
user = FacilityUser.objects.create(username="notasuperuser", facility=facility1)
user.set_password(DUMMY_PASSWORD)
user.save()
DevicePermissions.objects.create(
user=user, is_superuser=False, can_manage_content=True
)
self.client.logout()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.post(
reverse("kolibri:core:facilitytask-startdeletefacility"),
dict(facility=facility1.id),
format="json",
)
self.assertEqual(response.status_code, 403)
def test_startdeletefacility__facility_member(self, facility_queue):
Facility.objects.create(name="facility2")
response = self.client.post(
reverse("kolibri:core:facilitytask-startdeletefacility"),
dict(facility=self.facility.id),
format="json",
)
self.assertEqual(response.status_code, 400)
self.assertEqual("FACILITY_MEMBER", response.data.get("code"))
class FacilityTaskHelperTestCase(TestCase):
def test_validate_sync_task(self):
user = Mock(spec=FacilityUser, pk=456, username="abc")
req = Mock(spec=Request, data=dict(facility=123), user=user)
expected = dict(
facility=123,
started_by=456,
started_by_username="abc",
sync_state="PENDING",
bytes_sent=0,
bytes_received=0,
other_kwarg="is test",
)
actual = prepare_sync_task(*validate_sync_task(req), other_kwarg="is test")
self.assertEqual(expected, actual)
def test__prepare_sync_job(self):
expected = dict(
facility=123,
chunk_size=200,
noninteractive=True,
track_progress=True,
cancellable=False,
extra_metadata=dict(type="test"),
)
actual = prepare_sync_job(123, extra_metadata=dict(type="test"))
self.assertEqual(expected, actual)
def test_validate_facility__parse_error(self):
req = Mock(spec="rest_framework.requests.Request", data=dict())
with self.assertRaises(ParseError):
validate_facility(req)
def test_validate_facility__parse_error__empty(self):
req = Mock(spec="rest_framework.requests.Request", data=dict(facility=""))
with self.assertRaises(ParseError):
validate_facility(req)
@patch("kolibri.core.tasks.api.MorangoProfileController")
@patch("kolibri.core.tasks.api.NetworkClient")
@patch("kolibri.core.tasks.api.get_client_and_server_certs")
@patch("kolibri.core.tasks.api.get_dataset_id")
def test_validate_peer_sync_job(
self,
get_dataset_id,
get_client_and_server_certs,
NetworkClient,
MorangoProfileController,
):
dataset_id = 456
req = Mock(
spec=Request,
data=dict(
facility=123,
baseurl="https://some.server.test/extra/stuff",
username="tester",
password="mypassword",
),
)
client = NetworkClient.return_value
client.base_url = "https://some.server.test/"
network_connection = Mock()
controller = MorangoProfileController.return_value
controller.create_network_connection.return_value = network_connection
get_dataset_id.return_value = dataset_id
get_client_and_server_certs.return_value = None
expected = dict(
baseurl="https://some.server.test/",
facility=123,
chunk_size=200,
noninteractive=True,
track_progress=True,
cancellable=False,
extra_metadata=dict(type="test"),
)
actual = prepare_peer_sync_job(
*validate_peer_sync_job(req), extra_metadata=dict(type="test")
)
self.assertEqual(expected, actual)
MorangoProfileController.assert_called_with(PROFILE_FACILITY_DATA)
controller.create_network_connection.assert_called_with(
"https://some.server.test/"
)
get_dataset_id.assert_called_with(
"https://some.server.test/", identifier=123, noninteractive=True
)
get_client_and_server_certs.assert_called_with(
"tester", "mypassword", dataset_id, network_connection, noninteractive=True
)
def test_validate_peer_sync_job__no_baseurl(self):
req = Mock(spec=Request, data=dict(facility=123))
with self.assertRaises(ParseError, msg="Missing `baseurl` parameter"):
validate_peer_sync_job(req)
def test_validate_peer_sync_job__bad_url(self):
req = Mock(
spec=Request, data=dict(facility=123, baseurl="/com.bad.url.www//:sptth")
)
with self.assertRaises(ParseError, msg="Invalid URL"):
validate_peer_sync_job(req)
@patch("kolibri.core.tasks.api.NetworkClient")
def test_validate_peer_sync_job__cannot_connect(self, NetworkClient):
req = Mock(
spec=Request, data=dict(facility=123, baseurl="https://www.notfound.never")
)
NetworkClient.side_effect = NetworkLocationNotFound()
with self.assertRaises(ResourceGoneError):
validate_peer_sync_job(req)
@patch("kolibri.core.tasks.api.MorangoProfileController")
@patch("kolibri.core.tasks.api.NetworkClient")
@patch("kolibri.core.tasks.api.get_dataset_id")
def test_validate_peer_sync_job__unknown_facility(
self, get_dataset_id, NetworkClient, MorangoProfileController
):
req = Mock(
spec=Request,
data=dict(
facility=123,
baseurl="https://some.server.test/extra/stuff",
username="tester",
password="mypassword",
),
)
client = NetworkClient.return_value
client.base_url = "https://some.server.test/"
network_connection = Mock()
controller = MorangoProfileController.return_value
controller.create_network_connection.return_value = network_connection
get_dataset_id.side_effect = CommandError()
with self.assertRaises(AuthenticationFailed):
prepare_peer_sync_job(
*validate_peer_sync_job(req), extra_metadata=dict(type="test")
)
@patch("kolibri.core.tasks.api.MorangoProfileController")
@patch("kolibri.core.tasks.api.NetworkClient")
@patch("kolibri.core.tasks.api.get_client_and_server_certs")
@patch("kolibri.core.tasks.api.get_dataset_id")
def test_validate_peer_sync_job__not_authenticated(
self,
get_dataset_id,
get_client_and_server_certs,
NetworkClient,
MorangoProfileController,
):
req = Mock(
spec=Request,
data=dict(facility=123, baseurl="https://some.server.test/extra/stuff"),
)
client = NetworkClient.return_value
client.base_url = "https://some.server.test/"
network_connection = Mock()
controller = MorangoProfileController.return_value
controller.create_network_connection.return_value = network_connection
get_dataset_id.return_value = 456
get_client_and_server_certs.side_effect = CommandError()
with self.assertRaises(PermissionDenied):
prepare_peer_sync_job(
*validate_peer_sync_job(req), extra_metadata=dict(type="test")
)
@patch("kolibri.core.tasks.api.MorangoProfileController")
@patch("kolibri.core.tasks.api.NetworkClient")
@patch("kolibri.core.tasks.api.get_client_and_server_certs")
@patch("kolibri.core.tasks.api.get_dataset_id")
def test_validate_peer_sync_job__authentication_failed(
self,
get_dataset_id,
get_client_and_server_certs,
NetworkClient,
MorangoProfileController,
):
req = Mock(
spec=Request,
data=dict(
facility=123,
baseurl="https://some.server.test/extra/stuff",
username="tester",
password="mypassword",
),
)
client = NetworkClient.return_value
client.base_url = "https://some.server.test/"
network_connection = Mock()
controller = MorangoProfileController.return_value
controller.create_network_connection.return_value = network_connection
get_dataset_id.return_value = 456
get_client_and_server_certs.side_effect = CommandError()
with self.assertRaises(AuthenticationFailed):
prepare_peer_sync_job(
*validate_peer_sync_job(req), extra_metadata=dict(type="test")
)
|
|
import pygame
import ocempgui.widgets as ow
import ocempgui.widgets.Constants as oc
import ocempgui.events as oe
from lobby_list_item import LobbyListItem
import events as be
class GUI():
"""
User Interface windows.
"""
def __init__(self, renderer):
self.renderer = renderer
self.lobby_visible = False
self.game_name = ""
self.boardsize = (10, 10)
self.num_players = (1, 3)
self.renderer.color = (255, 255, 255, 0)
def hide_all(self):
"""
Hide all widgets.
"""
self.lobby_visible = False
self.renderer.clear()
def show_lobby(self):
"""
Display the lobby window.
"""
# Get a rectangle with amargin.
rect = self.renderer._get_rect()
rect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)
# Nickname textbox.
self.e_nickname = ow.Entry("Anon")
self.e_nickname.topleft = (rect[0] + 64, rect[3] - self.e_nickname.height)
self.l_nickname = ow.Label("Nickname: ")
self.l_nickname.topleft = (rect[0], self.e_nickname.topleft[1] + (self.e_nickname.height - self.l_nickname.height) / 2 )
# Create Game button.
self.b_create = ow.Button("Create Game")
self.b_create.topleft = (rect[2] - self.b_create.width - 100, rect[3] - self.b_create.height)
self.b_create.connect_signal(oc.SIG_CLICKED, self.do_create_game)
# Join Game button.
self.b_join = ow.Button("Join Game")
self.b_join.topleft = (rect[2] - self.b_join.width, rect[3] - self.b_join.height)
self.b_join.connect_signal(oc.SIG_CLICKED, self.do_join_game)
# List of servers.
self.l_servers = ow.Label("List of available servers:")
self.l_servers.topleft = (rect[0], rect[1])
self.li_servers = ow.ScrolledList(rect[2] - 16, rect[3] - 64)
self.li_servers.topleft = (rect[0], rect[1] + 16)
self.li_servers.set_selectionmode(oc.SELECTION_SINGLE)
# Add all the widgets.
self.renderer.add_widget(self.l_servers)
self.renderer.add_widget(self.li_servers)
self.renderer.add_widget(self.l_nickname)
self.renderer.add_widget(self.e_nickname)
self.renderer.add_widget(self.b_create)
self.renderer.add_widget(self.b_join)
self.lobby_visible = True
def show_create(self):
"""
Display the window for creating a new game.
"""
# Get a rectangle with amargin.
rect = self.renderer._get_rect()
rect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)
self.f_tab = ow.Table(4, 2)
self.f_tab.topleft = (rect[0], rect[1])
# Name of the game textbox.
self.e_gamename = ow.Entry("Ship Wreckyard")
self.l_gamename = ow.Label("Name of the game: ")
self.f_tab.add_child(0, 0, self.l_gamename)
self.f_tab.add_child(0, 1, self.e_gamename)
# Number of players.
self.e_players = ow.Entry("2")
self.l_players = ow.Label("Number of players: ")
self.f_tab.add_child(1, 0, self.l_players)
self.f_tab.add_child(1, 1, self.e_players)
# Board size.
self.l_boardw = ow.Label("Board width: ")
self.e_boardw = ow.Entry("10")
self.l_boardh = ow.Label("Board height: ")
self.e_boardh = ow.Entry("10")
self.f_tab.add_child(2, 0, self.l_boardw)
self.f_tab.add_child(2, 1, self.e_boardw)
self.f_tab.add_child(3, 0, self.l_boardh)
self.f_tab.add_child(3, 1, self.e_boardh)
# Create Game button.
self.b_cancel = ow.Button("Cancel")
self.b_cancel.topleft = (rect[2] - self.b_cancel.width - 100, rect[3] - self.b_cancel.height)
self.b_cancel.connect_signal(oc.SIG_CLICKED, self.do_lobby)
# Cancel button.
self.b_create = ow.Button("Start Game")
self.b_create.topleft = (rect[2] - self.b_create.width, rect[3] - self.b_create.height)
self.b_create.connect_signal(oc.SIG_CLICKED, self.do_start_hosted)
# Add all the widgets.
self.renderer.add_widget(self.f_tab)
self.renderer.add_widget(self.b_create)
self.renderer.add_widget(self.b_cancel)
def do_lobby(self):
"""
Retreat back into the lobby.
"""
event = pygame.event.Event(be.E_STATE, {"state":be.S_LOBBY})
pygame.event.post(event)
self.hide_all()
self.show_lobby()
self.renderer.color = (255, 255, 255, 0)
def do_create_game(self):
"""
Display the game creation window.
"""
self.nickname = self.e_nickname.text
self.hide_all()
self.show_create()
self.renderer.color = (255, 255, 255, 0)
def do_start_hosted(self):
"""
Start the game in hosted mode.
"""
self.game_name = self.e_gamename.text
self.num_players = (1, int(self.e_players.text))
self.boardsize = (int(self.e_boardw.text), int(self.e_boardh.text))
d = {"state": be.S_GAME,
"hosting": True,
"uuid": None,
"name": self.game_name,
"nickname": self.nickname,
"num_players": self.num_players,
"boardsize": self.boardsize}
event = pygame.event.Event(be.E_STATE, d)
pygame.event.post(event)
self.hide_all()
self.renderer.color = (0, 0, 0, 0)
def do_join_game(self):
"""
Join the selected game.
"""
item = self.li_servers.get_selected()[0]
self.nickname = self.e_nickname.text
self.server_uuid = item.server.uuid
self.game_name = item.server.name
self.num_players = item.server.num_players
self.boardsize = item.server.boardsize
d = {"state": be.S_JOIN,
"uuid": self.server_uuid,
"name": self.game_name,
"nickname": self.nickname}
event = pygame.event.Event(be.E_STATE, d)
pygame.event.post(event)
self.hide_all()
self.renderer.color = (0, 0, 0, 0)
def do_start_joined(self):
"""
Start a joined game.
"""
d = {"state": be.S_GAME,
"hosting": False,
"uuid": None,
"name": self.game_name,
"nickname": self.nickname,
"num_players": self.num_players,
"boardsize": self.boardsize}
event = pygame.event.Event(be.E_STATE, d)
pygame.event.post(event)
self.hide_all()
self.renderer.color = (0, 0, 0, 0)
def process_serverlist(self, serverlist):
"""
Process the announcements collected from the message queue.
"""
# Note that events may be late.
# However, mustn't work on widgets that are being
# garbage collected.
if not self.lobby_visible:
return
num_servers = 0
for key, val in serverlist.iteritems():
# Either update an existing list item.
if len(self.li_servers.items) > 0 and num_servers < len(self.li_servers.items):
self.li_servers.items[num_servers].set_server(val)
# Or create a new one.
else:
self.li_servers.items.append(LobbyListItem(val))
num_servers += 1
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
from tensorflow.python.training import training as train
__all__ = [
"load_checkpoint",
"load_variable",
"list_variables",
"init_from_checkpoint"]
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return saver.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = gen_io_ops._restore_slice(
file_pattern,
tensor_name,
slice_spec,
base_type,
preferred_shard=-1,
name=name)
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assingment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
`'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
`'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initalize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
`'scope_variable_name': variable` - will initialize given `tf.Variable`
object with variable from the checkpoint.
`'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with variable from the checkpoint.
`'scope_name/': '/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
'<variable>/part_<part #>'.
Example:
```python
# Create variables.
with tf.variable_scope('test'):
m = tf.get_variable('my_var')
with tf.variable_scope('test2'):
var2 = tf.get_variable('my_var')
var3 = tf.get_variable(name="my1", shape=[100, 100],
partitioner=lambda shape, dtype: [5, 1])
...
# Specify which variables to intialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'some_var': 'test/my_var',
'some_scope/': 'test2/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
'some_scope/var2': var2,
})
# Initialize partitioned variables
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': 'part_var',
})
# Or specifying the list of `Variable` objects.
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': var3._get_variable_list(),
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
var_scope = vs._get_default_variable_store()
# Check if this variable is in var_store.
var = var_scope._vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
if current_var_or_name + "/part_0" in var_scope._vars:
var = []
i = 0
while current_var_or_name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[current_var_or_name + "/part_%d" % i])
i += 1
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint" % (
tensor_name_in_ckpt, checkpoint_dir
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, filepattern, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name_in_ckpt
))
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope.
for var_name in var_scope._vars:
if var_name.startswith(scopes):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + var_name[len(scopes) + 1:]
else:
full_tensor_name = var_name[len(scopes) + 1:]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, checkpoint_dir
))
var = var_scope._vars[var_name]
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, full_tensor_name
))
# pylint: enable=protected-access
lv=list_variables("tmp/testsancho2_model.ckpt")
for v in lv:
print(v)
|
|
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_bgp_config
short_description: Manage BGP routing configuraiton in EOS
description:
- The eos_bgp_config module provides resource management of the
global BGP routing process for Arista EOS nodes
version_added: 1.1.0
category: BGP
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enable
- Python Client for eAPI 0.3.1 or later
notes:
- All configuraiton is idempontent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports tateful resource configuration
options:
bgp_as:
description:
- The BGP autonomous system number to be configured for the
local BGP routing instance. The value must be in the valid
BGP AS range of 1 to 65535.
required: true
default: null
choices: []
aliases: []
version_added: 1.1.0
enable:
description:
- Configures the administrative state for the global BGP routing
process. If enable is True then the BGP routing process is
administartively enabled and if enable is False then
the BGP routing process is administratively disabled.
default: true
required: false
choices: ['True', 'False']
aliases: []
version_added: 1.1.0
router_id:
description:
- Configures the BGP routing process router-id value. The router
id must be in the form of A.B.C.D
default: false
required: false
choices: []
aliases: []
version_added: 1.1.0
maximum_paths:
description:
- Configures the maximum number of parallel routes. The EOS default for
this attribute is 1. This value should be less than or equal to
maximum_ecmp_paths.
default: null
required: false
choices: []
aliases: []
version_added: 1.2.0
maximum_ecmp_paths:
description:
- Configures the maximum number of ecmp paths for each route.
The EOS default for this attribute is the maximum value, which varies
by hardware platform. Check your Arista documentation for more
information. This value should be greater than or equal to
maximum_paths.
default: null
required: false
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- name: enable BGP routing with AS 65535
eos_bgp_config: bgp_as=65535 state=present enable=yes
- name: disable the BGP routing process
eos_bgp_config: bgp_as=65535 enable=no
- name: configure the BGP router-id
eos_bgp_config: bgp_as=65535 router_id=1.1.1.1
- name: configure the BGP with just max paths
eos_bgp_config: bgp_as=65535 router_id=1.1.1.1 maximum_paths=20
- name: configure the BGP with maximum_paths and maximum_ecmp_paths
eos_bgp_config: bgp_as=65535 router_id=1.1.1.1 maximum_paths=20 maximum_ecmp_paths=20
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
"""Returns the BGP routing instance configuration
"""
bgp_as = module.attributes['bgp_as']
result = module.node.api('bgp').get()
_instance = dict(bgp_as=bgp_as, state='absent')
if result and bgp_as == str(result['bgp_as']):
_instance['state'] = 'present'
_instance['router_id'] = result['router_id']
_instance['maximum_paths'] = str(result['maximum_paths'])
_instance['maximum_ecmp_paths'] = str(result['maximum_ecmp_paths'])
_instance['enable'] = not result['shutdown']
return _instance
def create(module):
"""Creates a new isntance of BGP routing on the node
"""
bgp_as = module.attributes['bgp_as']
module.log('Invoked create for eos_bgp_config[{}]'.format(bgp_as))
module.node.api('bgp').create(bgp_as)
def remove(module):
"""Removes the BGP routing instance from the node
"""
bgp_as = module.attributes['bgp_as']
module.log('Invoked remove for eos_bgp_config[{}]'.format(bgp_as))
module.node.api('bgp').delete()
def set_enable(module):
"""Globally enables or disables the BGP process
"""
value = not module.attributes['enable']
bgp_as = module.attributes['bgp_as']
module.log('Invoked set_enable for eos_bgp_config[{}] '
'with value {}'.format(bgp_as, value))
module.node.api('bgp').set_shutdown(value)
def set_router_id(module):
"""Configures the BGP router-id
"""
value = module.attributes['router_id']
bgp_as = module.attributes['bgp_as']
module.log('Invoked set_router_id for eos_bgp_config[{}] '
'with value {}'.format(bgp_as, value))
module.node.api('bgp').set_router_id(value)
def set_maximum_paths(module):
"""Configures the BGP maximum-paths
"""
module.log('Inside set_maximum_paths')
bgp_as = module.attributes['bgp_as']
try:
max_paths = int(module.attributes['maximum_paths'])
except:
max_paths = None
try:
max_ecmp_paths = int(module.attributes['maximum_ecmp_paths'])
except:
max_ecmp_paths = None
if max_ecmp_paths and max_paths > max_ecmp_paths:
module.fail('maximum_paths {} must be less than or equal to '
'maximum_ecmp_paths {}'.format(max_paths, max_ecmp_paths))
module.log('Invoked set_maximum_paths for eos_bgp_config[{}] '
'with value {}'.format(bgp_as, max_paths))
module.node.api('bgp').set_maximum_paths(max_paths)
def set_maximum_ecmp_paths(module):
"""Configures the BGP maximum-paths
"""
module.log('Inside set_maximum_ecmp_paths')
bgp_as = module.attributes['bgp_as']
try:
max_paths = int(module.attributes['maximum_paths'])
except:
max_paths = None
try:
max_ecmp_paths = int(module.attributes['maximum_ecmp_paths'])
except:
max_ecmp_paths = None
if max_paths > max_ecmp_paths:
module.fail('maximum_paths {} must be less than or equal to '
'maximum_ecmp_paths {}'.format(max_paths, max_ecmp_paths))
module.log('Invoked set_maximum_paths for eos_bgp_config[{}] '
'with values {}/{}'.format(bgp_as, max_paths, max_ecmp_paths))
module.node.api('bgp').set_maximum_paths(max_paths, max_ecmp_paths)
def main():
"""The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
bgp_as=dict(required=True),
enable=dict(type='bool', default=True),
router_id=dict(),
maximum_paths=dict(),
maximum_ecmp_paths=dict()
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main()
|
|
import copy
import inspect
from django import forms
from django.forms.formsets import all_valid, DELETION_FIELD_NAME
from django.forms.models import inlineformset_factory, BaseInlineFormSet, modelform_defines_fields
from django.contrib.contenttypes.generic import BaseGenericInlineFormSet, generic_inlineformset_factory
from django.template import loader
from django.template.loader import render_to_string
from django.contrib.auth import get_permission_codename
from xadmin.layout import FormHelper, Layout, flatatt, Container, Column, Field, Fieldset
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ModelFormAdminView, DetailAdminView, filter_hook
class ShowField(Field):
template = "xadmin/layout/field_value.html"
def __init__(self, admin_view, *args, **kwargs):
super(ShowField, self).__init__(*args, **kwargs)
self.admin_view = admin_view
if admin_view.style == 'table':
self.template = "xadmin/layout/field_value_td.html"
def render(self, form, form_style, context):
html = ''
detail = form.detail
for field in self.fields:
if not isinstance(form.fields[field].widget, forms.HiddenInput):
result = detail.get_field_result(field)
html += loader.render_to_string(
self.template, {'field': form[field], 'result': result})
return html
class DeleteField(Field):
def render(self, form, form_style, context):
if form.instance.pk:
self.attrs['type'] = 'hidden'
return super(DeleteField, self).render(form, form_style, context)
else:
return ""
class TDField(Field):
template = "xadmin/layout/td-field.html"
class InlineStyleManager(object):
inline_styles = {}
def register_style(self, name, style):
self.inline_styles[name] = style
def get_style(self, name='stacked'):
return self.inline_styles.get(name)
style_manager = InlineStyleManager()
class InlineStyle(object):
template = 'xadmin/edit_inline/stacked.html'
def __init__(self, view, formset):
self.view = view
self.formset = formset
def update_layout(self, helper):
pass
def get_attrs(self):
return {}
style_manager.register_style('stacked', InlineStyle)
class OneInlineStyle(InlineStyle):
template = 'xadmin/edit_inline/one.html'
style_manager.register_style("one", OneInlineStyle)
class AccInlineStyle(InlineStyle):
template = 'xadmin/edit_inline/accordion.html'
style_manager.register_style("accordion", AccInlineStyle)
class TabInlineStyle(InlineStyle):
template = 'xadmin/edit_inline/tab.html'
style_manager.register_style("tab", TabInlineStyle)
class TableInlineStyle(InlineStyle):
template = 'xadmin/edit_inline/tabular.html'
def update_layout(self, helper):
helper.add_layout(
Layout(*[TDField(f) for f in self.formset[0].fields.keys()]))
def get_attrs(self):
fields = []
readonly_fields = []
if len(self.formset):
fields = [f for k, f in self.formset[0].fields.items() if k != DELETION_FIELD_NAME]
readonly_fields = [f for f in getattr(self.formset[0], 'readonly_fields', [])]
return {
'fields': fields,
'readonly_fields': readonly_fields
}
style_manager.register_style("table", TableInlineStyle)
def replace_field_to_value(layout, av):
if layout:
for i, lo in enumerate(layout.fields):
if isinstance(lo, Field) or issubclass(lo.__class__, Field):
layout.fields[i] = ShowField(av, *lo.fields, **lo.attrs)
elif isinstance(lo, basestring):
layout.fields[i] = ShowField(av, lo)
elif hasattr(lo, 'get_field_names'):
replace_field_to_value(lo, av)
class InlineModelAdmin(ModelFormAdminView):
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = None
can_delete = True
fields = []
admin_view = None
style = 'stacked'
def init(self, admin_view):
self.admin_view = admin_view
self.parent_model = admin_view.model
self.org_obj = getattr(admin_view, 'org_obj', None)
self.model_instance = self.org_obj or admin_view.model()
return self
@filter_hook
def get_formset(self, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission()
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
'fields': forms.ALL_FIELDS,
"exclude": exclude,
"formfield_callback": self.formfield_for_dbfield,
"extra": self.extra,
"max_num": self.max_num,
"can_delete": can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
@filter_hook
def instance_form(self, **kwargs):
formset = self.get_formset(**kwargs)
attrs = {
'instance': self.model_instance,
'queryset': self.queryset()
}
if self.request_method == 'post':
attrs.update({
'data': self.request.POST, 'files': self.request.FILES,
'save_as_new': "_saveasnew" in self.request.POST
})
instance = formset(**attrs)
instance.view = self
helper = FormHelper()
helper.form_tag = False
# override form method to prevent render csrf_token in inline forms, see template 'bootstrap/whole_uni_form.html'
helper.form_method = 'get'
style = style_manager.get_style(
'one' if self.max_num == 1 else self.style)(self, instance)
style.name = self.style
if len(instance):
layout = copy.deepcopy(self.form_layout)
if layout is None:
layout = Layout(*instance[0].fields.keys())
elif type(layout) in (list, tuple) and len(layout) > 0:
layout = Layout(*layout)
rendered_fields = [i[1] for i in layout.get_field_names()]
layout.extend([f for f in instance[0]
.fields.keys() if f not in rendered_fields])
helper.add_layout(layout)
style.update_layout(helper)
# replace delete field with Dynamic field, for hidden delete field when instance is NEW.
helper[DELETION_FIELD_NAME].wrap(DeleteField)
instance.helper = helper
instance.style = style
readonly_fields = self.get_readonly_fields()
if readonly_fields:
for form in instance:
form.readonly_fields = []
inst = form.save(commit=False)
if inst:
for readonly_field in readonly_fields:
value = None
label = None
if readonly_field in inst._meta.get_all_field_names():
label = inst._meta.get_field_by_name(readonly_field)[0].verbose_name
value = unicode(getattr(inst, readonly_field))
elif inspect.ismethod(getattr(inst, readonly_field, None)):
value = getattr(inst, readonly_field)()
label = getattr(getattr(inst, readonly_field), 'short_description', readonly_field)
elif inspect.ismethod(getattr(self, readonly_field, None)):
value = getattr(self, readonly_field)(inst)
label = getattr(getattr(self, readonly_field), 'short_description', readonly_field)
if value:
form.readonly_fields.append({'label': label, 'contents': value})
return instance
def has_auto_field(self, form):
if form._meta.model._meta.has_auto_field:
return True
for parent in form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def queryset(self):
queryset = super(InlineModelAdmin, self).queryset()
if not self.has_change_permission() and not self.has_view_permission():
queryset = queryset.none()
return queryset
def has_add_permission(self):
if self.opts.auto_created:
return self.has_change_permission()
codename = get_permission_codename('add', self.opts)
return self.user.has_perm("%s.%s" % (self.opts.app_label, codename))
def has_change_permission(self):
opts = self.opts
if opts.auto_created:
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return self.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self):
if self.opts.auto_created:
return self.has_change_permission()
codename = get_permission_codename('delete', self.opts)
return self.user.has_perm("%s.%s" % (self.opts.app_label, codename))
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, **kwargs):
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission()
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": self.formfield_for_dbfield,
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"max_num": self.max_num,
"exclude": exclude,
'fields': forms.ALL_FIELDS
}
defaults.update(kwargs)
return generic_inlineformset_factory(self.model, **defaults)
class InlineFormset(Fieldset):
def __init__(self, formset, allow_blank=False, **kwargs):
self.fields = []
self.css_class = kwargs.pop('css_class', '')
self.css_id = "%s-group" % formset.prefix
self.template = formset.style.template
self.inline_style = formset.style.name
if allow_blank and len(formset) == 0:
self.template = 'xadmin/edit_inline/blank.html'
self.inline_style = 'blank'
self.formset = formset
self.model = formset.model
self.opts = formset.model._meta
self.flat_attrs = flatatt(kwargs)
self.extra_attrs = formset.style.get_attrs()
def render(self, form, form_style, context):
return render_to_string(
self.template, dict({'formset': self, 'prefix': self.formset.prefix, 'inline_style': self.inline_style}, **self.extra_attrs),
context_instance=context)
class Inline(Fieldset):
def __init__(self, rel_model):
self.model = rel_model
self.fields = []
def render(self, form, form_style, context):
return ""
def get_first_field(layout, clz):
for layout_object in layout.fields:
if issubclass(layout_object.__class__, clz):
return layout_object
elif hasattr(layout_object, 'get_field_names'):
gf = get_first_field(layout_object, clz)
if gf:
return gf
def replace_inline_objects(layout, fs):
if not fs:
return
for i, layout_object in enumerate(layout.fields):
if isinstance(layout_object, Inline) and layout_object.model in fs:
layout.fields[i] = fs.pop(layout_object.model)
elif hasattr(layout_object, 'get_field_names'):
replace_inline_objects(layout_object, fs)
class InlineFormsetPlugin(BaseAdminPlugin):
inlines = []
@property
def inline_instances(self):
if not hasattr(self, '_inline_instances'):
inline_instances = []
for inline_class in self.inlines:
inline = self.admin_view.get_view(
(getattr(inline_class, 'generic_inline', False) and GenericInlineModelAdmin or InlineModelAdmin),
inline_class).init(self.admin_view)
if not (inline.has_add_permission() or
inline.has_change_permission() or
inline.has_delete_permission() or
inline.has_view_permission()):
continue
if not inline.has_add_permission():
inline.max_num = 0
inline_instances.append(inline)
self._inline_instances = inline_instances
return self._inline_instances
def instance_forms(self, ret):
self.formsets = []
for inline in self.inline_instances:
if inline.has_change_permission():
self.formsets.append(inline.instance_form())
else:
self.formsets.append(self._get_detail_formset_instance(inline))
self.admin_view.formsets = self.formsets
def valid_forms(self, result):
return all_valid(self.formsets) and result
def save_related(self):
for formset in self.formsets:
formset.instance = self.admin_view.new_obj
formset.save()
def get_context(self, context):
context['inline_formsets'] = self.formsets
return context
def get_error_list(self, errors):
for fs in self.formsets:
errors.extend(fs.non_form_errors())
for errors_in_inline_form in fs.errors:
errors.extend(errors_in_inline_form.values())
return errors
def get_form_layout(self, layout):
allow_blank = isinstance(self.admin_view, DetailAdminView)
# fixed #176 bug, change dict to list
fs = [(f.model, InlineFormset(f, allow_blank)) for f in self.formsets]
replace_inline_objects(layout, fs)
if fs:
container = get_first_field(layout, Column)
if not container:
container = get_first_field(layout, Container)
if not container:
container = layout
# fixed #176 bug, change dict to list
for key, value in fs:
container.append(value)
return layout
def get_media(self, media):
for fs in self.formsets:
media = media + fs.media
if self.formsets:
media = media + self.vendor(
'xadmin.plugin.formset.js', 'xadmin.plugin.formset.css')
return media
def _get_detail_formset_instance(self, inline):
formset = inline.instance_form(extra=0, max_num=0, can_delete=0)
formset.detail_page = True
if True:
replace_field_to_value(formset.helper.layout, inline)
model = inline.model
opts = model._meta
fake_admin_class = type(str('%s%sFakeAdmin' % (opts.app_label, opts.model_name)), (object, ), {'model': model})
for form in formset.forms:
instance = form.instance
if instance.pk:
form.detail = self.get_view(
DetailAdminUtil, fake_admin_class, instance)
return formset
class DetailAdminUtil(DetailAdminView):
def init_request(self, obj):
self.obj = obj
self.org_obj = obj
class DetailInlineFormsetPlugin(InlineFormsetPlugin):
def get_model_form(self, form, **kwargs):
self.formsets = [self._get_detail_formset_instance(
inline) for inline in self.inline_instances]
return form
site.register_plugin(InlineFormsetPlugin, ModelFormAdminView)
site.register_plugin(DetailInlineFormsetPlugin, DetailAdminView)
|
|
#########################################
# base.py
#
# Author zrong(zengrong.net)
# Creation 2014-12-04
# Modification 2015-11-22
#########################################
import os
import re
import platform
import shutil
from xmlrpc.client import Fault
from string import Template
from datetime import (datetime, timedelta)
import configparser
from wordpress_xmlrpc import (Client,
WordPressPost, WordPressPage, WordPressTerm, WordPressMedia)
from wordpress_xmlrpc.exceptions import InvalidCredentialsError
from wordpress_xmlrpc.methods.taxonomies import (GetTerms)
from pkg_resources import (resource_filename, resource_string)
from rookout import slog
from rookout.base import (list_dir, read_file, write_file)
from rookout.conf import PYConf
class Conf(object):
TPL_FILE = 'wpcmd.ini.tpl'
PRE_NAME = '_' if platform.system() == 'Windows' else '.'
INI_FILE = PRE_NAME+'wpcmd.ini'
CACHE_FILE = PRE_NAME+'wpcmd.cache.py'
ARTICLE_TYPES = ('post', 'page', 'draft')
def __init__(self, conffile):
self.conffile = conffile
self.ini = configparser.ConfigParser()
self.cache = None
def init(self, workdir):
if os.path.exists(self.conffile):
self.read_from_file()
return True
tplstr = read_file(resource_filename('wpcmd', Conf.TPL_FILE))
inistr = Template(tplstr).substitute({
'CACHEFILE':Conf.CACHE_FILE,
'WORK':workdir,
})
self.save_to_file(inistr)
self.read_from_file()
slog.info('Please modify %s !'%self.conffile)
return False
def init_cache(self, site_name):
self.__site_section = site_name
self.cache = TermCache(self.get_site('cachefile'))
self.cache.init()
def __missing__(self, key):
return None
def __getattr__(self, name):
return self.ini[name]
def get(self, section, option):
return self.ini.get(section, option, raw=True, fallback=None)
def get_site(self, option):
return self.get(self.__site_section, option)
def get_user(self):
return self.get_site('user')
def get_password(self):
return self.get_site('password')
def get_url(self, only_site=False):
url = self.get_site('url')
site = None
if url.endswith('/xmlrpc.php'):
site = url[:-11]
elif url.endswith('/'):
site = url[:-1]
url = url + 'xmlrpc.php'
else:
site = url
url = url + '/xmlrpc.php'
if only_site:
return site
return url
def save_to_file(self, inistr):
write_file(self.conffile, inistr)
def read_from_file(self):
self.ini.read(self.conffile)
def is_article(self, posttype):
return posttype in Conf.ARTICLE_TYPES
def get_draft(self, name):
"""
There are two kind of draft file in draft directory.
One has published to wordpress and in draft status;
One has not been published to wordpress yet.
"""
draftname = (self.get_site('draftfmt') % str(name))+self.get_site('ext')
return self.get_work_path('draft', draftname), draftname
def get_new_draft(self, name=None):
draftdir = self.get_work_path('draft')
draftnames = list(list_dir(draftdir))
draftfile, draftname = None, None
if name:
draftfile, draftname = self.get_draft(name)
if draftname in draftnames:
raise WPError('The draft file "%s" is already existence!'%
draftname)
else:
name = 1
draftfile, draftname = self.get_draft(name)
while os.path.exists(draftfile):
name += 1
draftfile, draftname = self.get_draft(name)
return draftfile, draftname
def get_article(self, name, posttype):
postname = name+self.get_site('ext')
if self.is_article(posttype):
return self.get_work_path(posttype, postname), postname
return None, None
def get_path(self, name, *path):
workdir = os.path.join(self.get_site('work'), name)
if path:
return os.path.abspath(os.path.join(workdir, *path))
return workdir
def get_work_path(self, dirname, *path):
workpath = self.get_path(self.get_site(dirname))
if not os.path.exists(workpath):
os.makedirs(workpath)
if path:
return os.path.join(workpath, *path)
return workpath
def get_mdfiles(self, posttype):
workpath = self.get_work_path(posttype)
for afile in os.listdir(workpath):
if afile.endswith(self.get_site('ext')):
name = afile.split('.')[0]
filepath = os.path.join(workpath, afile)
yield (posttype, name, filepath)
class Action(object):
def __init__(self, gconf, gtermcache, gargs, gparser):
self.conf = gconf
self.conf.site = gargs.site
self.cache = gtermcache
self.args = gargs
self.parser = gparser
self._wp = None
def get_postid(self, as_list=False):
if not self.args.query:
return None
if as_list:
postids = []
for postid in self.args.query:
match = re.match(r'^(\d+)-(\d+)$', postid)
if match:
a = int(match.group(1))
b = int(match.group(2))
for i in range(a,b+1):
postids.append(str(i))
else:
postids.append(postid)
return postids
return self.args.query[0]
def get_dict_from_query(self, query):
if query:
d = {}
for v in query:
value = v.split('=')
d[value[0]] = value[1]
return d
return None
def get_term_query(self):
typ = self.args.type
q = self.args.query
query = []
if typ == 'term':
query = q
else:
if typ == 'tag':
typ = 'post_tag'
query.append(typ)
if q and len(q)>0:
query.append(q[0])
return query
def get_terms_from_wp(self, query, force=False):
if not query or len(query)== 0:
slog.error('Please provide a taxonomy name! You can use '
'"show -t tax" to get one.')
return None
taxname = query[0]
slug = query[1] if len(query)>1 else None
terms = self.cache[taxname]
if not terms or force:
results = self.wpcall(GetTerms(taxname))
if results:
self.cache.save_terms(results, taxname)
if terms and slug:
return terms[slug]
return terms
def print_result(self, result):
if isinstance(result, WordPressTerm):
slog.info('id=%s, group=%s, '
'taxnomy_id=%s, name=%s, slug=%s, '
'parent=%s, count=%d',
result.id, result.group,
result.taxonomy_id, result.name, result.slug,
result.parent, result.count)
elif isinstance(result, WordPressPost):
slog.info('id=%s, date=%s, date_modified=%s, '
'slug=%s, title=%s, post_status=%s, post_type=%s',
result.id, str(result.date), str(result.date_modified),
result.slug, result.title,
result.post_status, result.post_type)
elif isinstance(result, WordPressMedia):
slog.info('id=%s, parent=%s, title=%s, '
'description=%s, caption=%s, date_created=%s, link=%s, '
'thumbnail=%s, metadata=%s',
result.id, result.parent, result.title,
result.description, result.caption, str(result.date_created),
result.link,
result.thumbnail, result.metadata)
else:
slog.info(result)
def print_results(self, results):
if isinstance(results, list):
for result in results:
self.print_result(result)
elif isinstance(results, dict):
for k,v in results.items():
slog.info('%s %s'%(k, str(v)))
else:
self.print_result(results)
def get_datetime(self, datestring):
dt = datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S')
return dt - timedelta(hours=8)
def wpcall(self, method):
if not self._wp:
self._wp = Client(self.conf.get_url(),
self.conf.get_user(),
self.conf.get_password())
try:
results = self._wp.call(method)
except InvalidCredentialsError as e:
slog.error(e)
return None
except Fault as e:
slog.error(e)
return None
return results
def go(self):
pass
def build(self):
if self.args.type:
self.go()
elif self.parser:
self.parser.print_help()
class TermCache(PYConf):
""" A cache for terms.
"""
def __init__(self, filepath):
self.cachefile = filepath
def init(self):
if os.path.exists(self.cachefile):
super().read_from_file(self.cachefile)
def save_to_file(self):
super().save_to_file(self.cachefile)
def save_terms(self, terms, taxname):
termdict = PYConf()
for term in terms:
self.save_term(term, taxname, termdict)
self[taxname] = termdict
self.save_to_file()
def save_term(self, term, taxname, termdict=None):
if termdict == None:
termdict = self[taxname]
if termdict == None:
termdict = PYConf()
self[taxname] = termdict
termdict[term.slug] = PYConf({
'id':term.id,
'group':term.group,
'taxonomy':term.taxonomy,
'taxonomy_id':term.taxonomy_id,
'name':term.name,
'slug':term.slug,
'description':term.description,
'parent':term.parent,
'count':term.count,
})
def get_term(self, taxname, slug):
if not self[taxname]:
return None
if not self[taxname][slug]:
return None
termdict = self[taxname][slug]
term = WordPressTerm()
term.id = termdict['id']
term.group = termdict['group']
term.taxonomy = termdict['taxonomy']
term.taxonomy_id = termdict['taxonomy_id']
term.name = termdict['name']
term.slug = termdict['slug']
term.description = termdict['description']
term.parent = termdict['parent']
term.count = termdict['count']
return term
def get_terms_from_meta(self, categories, tags):
terms = []
if categories:
for cat in categories:
term = self.get_term('category', cat)
if not term:
slog.error('The category "%s" is not in wordpress.'
' Please create it first.'%cat)
return None
terms.append(term)
if tags:
for tag in tags:
term = self.get_term('post_tag', tag)
if not term:
slog.error('The tag "%s" is not in wordpress.'
'Please create it first'%tag)
return None
terms.append(term)
return terms
|
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD Style.
from __future__ import print_function
from time import time
import numpy as np
import pylab as pl
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name,
train, test,
coverages, xgrid, ygrid):
"""
create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
points = dict(test=test, train=train)
for label, pts in points.iteritems():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
pl.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
pl.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
pl.xticks([])
pl.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
pl.contourf(X, Y, Z, levels=levels, cmap=pl.cm.Reds)
pl.colorbar(format='%.2f')
# scatter training/testing points
pl.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
pl.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
pl.legend()
pl.title(species.name)
pl.axis('equal')
# Compute AUC w.r.t. background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
pl.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
pl.show()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
)
from test_framework.messages import (
CBlockHeader,
CInv,
msg_block,
msg_headers,
msg_inv,
)
from test_framework.mininode import (
mininode_lock,
P2PInterface,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-noparkdeepreorg"],
["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[n.generatetoaddress(1, n.get_deterministic_priv_key().address)
for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(
tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info(
"First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(
int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk",
self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(
block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(
block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any
# headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(
tip.sha256, create_coinbase(i + 4), tip.nTime + 1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted
# (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found",
self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found",
self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing
# header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because
# it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(
-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info(
"Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk",
self.nodes[0].getblock, all_blocks[287].hash)
self.log.info(
"Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(
all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1)
block_289f.solve()
block_290f = create_block(
block_289f.sha256, create_coinbase(290), block_289f.nTime + 1)
block_290f.solve()
block_291 = create_block(
block_290f.sha256, create_coinbase(291), block_290f.nTime + 1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(
block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(
block_291.sha256, create_coinbase(292), block_291.nTime + 1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger
# reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk",
self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block
# 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(
block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked
# off, and expect to get disconnected
block_293 = create_block(
block_292.sha256, create_coinbase(293), block_292.nTime + 1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], self.nodes[1])
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
"""
tabimport is a Python utility to ease imports of tabular data from CSV, ODF,
XLS or XLSX files.
Usage:
>>> smart_file = FileFactory(file_path)
>>> for data_line in smart_file:
>>> do_something_with(data_line['header'])
"""
import csv
import logging
import tempfile
from collections import OrderedDict
from datetime import datetime, time
try:
# XLS format
# http://www.lexicon.net/sjmachin/xlrd.html
import xlrd
has_xlrd = True
except ImportError:
has_xlrd = False
try:
# XLSX format
import openpyxl
has_openpyxl = True
except ImportError:
has_openpyxl = False
try:
# ODF format
import ooolib
has_ooolib = True
except ImportError:
has_ooolib = False
from django.utils.translation import gettext as _
class UnsupportedFileFormat(Exception):
pass
class HeaderError(Exception):
pass
class FileFactory:
""" Returns a file object depending on the file format """
def __new__(cls, datafile, **imp_params):
format = cls._sniff_format(datafile)
if format == 'ods':
return ODSImportedFile(datafile, **imp_params)
elif format == 'xls':
return XLSImportedFile(datafile, **imp_params)
elif format == 'xlsx':
return XLSXImportedFile(datafile, **imp_params)
elif format == 'csv':
return CSVImportedFile(datafile, **imp_params)
else:
raise UnsupportedFileFormat(_("Unknown file extension '%s'") % format)
@classmethod
def _sniff_format(cls, dfile):
""" dfile may be a file path or a django (Uploaded)File object """
if isinstance(dfile, str):
format = dfile.rsplit('.', 1)[-1]
else:
if "opendocument.spreadsheet" in dfile.content_type or dfile.name.endswith(".ods"):
format = "ods"
elif dfile.name.endswith(".xlsx"):
format = "xlsx"
elif "excel" in dfile.content_type or dfile.name.endswith(".xls"):
format = "xls"
elif "text/csv" in dfile.content_type or dfile.name.endswith(".csv"):
format = "csv"
else:
raise UnsupportedFileFormat(_("This file is not in an accepted format (ods, xls, csv)"))
return format
class ImportedFile:
""" Abstract class to get file object in different formats """
# Set to True if the external lib does not support file-like objects
force_file_to_disk = False
def __init__(self, datafile, sheet_index=0, skip_lines=None):
""" datafile can be either a path (string) or a file object
sheet_index is the spreadsheet index, if applicable
skip_lines is a list of row indexes to skip """
# Some internal variables are initialized in activate_sheet so as they
# are resetted whenever a new sheet is activated
self._headers = {} # dict of lists
self._ignored_headers_idx = {} # dict of lists
self.data_sheet_indexes = [sheet_index]
self.skip_lines = skip_lines
self.file_content = None
if isinstance(datafile, str):
self.file_path = datafile
else:
try:
self.file_path = datafile.temporary_file_path()
except AttributeError:
try:
self.file_path = datafile.file.name
except AttributeError:
if self.force_file_to_disk:
# set to self, so it is not removed before the instance is deleted
self.temp_f = tempfile.NamedTemporaryFile()
for data in datafile.chunks():
self.temp_f.write(data)
assert self.temp_f.tell() == datafile.size
self.file_path = self.temp_f.name
else:
self.file_path = None
self.file_content = datafile.file.read()
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError("Abstract class")
def next(self):
# Python 2 compatibility
return type(self).__next__(self)
def get_headers(self):
raise NotImplementedError("Abstract class")
def activate_sheet(self, idx):
self.current_index = idx
self._row_index = 1 # skip first line
def current_sheet_name(self):
raise NotImplementedError("Abstract class")
def check_header_validity(self, possible_headers, mandatory_headers, case_sensitive=False): # TODO: ignore_pattern
""" This method has the side effect of swallowing the first line (headers) of the file """
errors = []
warnings = [] # List of tuples: (sheet name, header name, motive)
def idx_to_header(idx):
return (idx>25 and chr(64 + idx/26) or '') + chr(65 + idx%26)
for sheet_idx in self.data_sheet_indexes:
self.activate_sheet(sheet_idx)
good_headers = set()
if not case_sensitive:
possible_headers = [h.lower() for h in possible_headers]
for i, h in enumerate(self.get_headers()):
h_norm = case_sensitive and h.strip() or h.strip().lower()
if h_norm == "":
warnings.append(
(self.current_sheet_name(),
"(%s header empty)" % idx_to_header(i),
_("Empty header")))
self._ignored_headers_idx[self.current_index].append(i)
elif h_norm not in possible_headers:
warnings.append((self.current_sheet_name(), h, _("Unknown header")))
self._ignored_headers_idx[self.current_index].append(i)
elif h in good_headers:
errors.append(_("The column '%s' is twice in your file's headers (sheet '%s')") % (h, self.current_sheet_name()))
else:
good_headers.add(h)
for h in mandatory_headers:
if h not in good_headers:
errors.append(_("The header '%s' is mandatory and is missing in sheet '%s' of your file") % (h, self.current_sheet_name()))
self.activate_sheet(0)
if errors:
raise HeaderError("\n".join(errors))
return warnings
class CSVImportedFile(ImportedFile):
encoding = 'latin-1'
def __init__(self, datafile, sheet_index=0, skip_lines=None, **kwds):
super().__init__(datafile, sheet_index)
if isinstance(datafile, str):
# if datafile is a path, try to open the file
datafile = open(datafile, 'r')
try:
dialect = csv.Sniffer().sniff(datafile.read(2048))
# Python csv module weakness?
if not dialect.delimiter or dialect.delimiter == '\r':
dialect.delimiter = ";"
except Exception:
dialect = csv.excel
dialect.delimiter = kwds.get('delimiter', ';')
self.delimiter = dialect.delimiter
datafile.seek(0)
self.reader = csv.DictReader(datafile, dialect=dialect, **kwds)
# It may happen that fieldnames are not filled before first line has been read
self._first_line_read = False
self._first_line = None
self.current_index = 0
def get_headers(self):
if not self.current_index in self._headers:
self._ignored_headers_idx[self.current_index] = []
if not self._first_line_read:
self._first_line = next(self.reader)
self._first_line_read = True
self._headers[self.current_index] = self.reader.fieldnames
if not isinstance(self._headers[self.current_index], list):
self._headers[self.current_index] = self._headers[self.current_index].split(self.delimiter)
return self._headers[self.current_index]
def __next__(self):
""" Returns an OrderedDict : {'DESCRIPTOR': value, ...} """
if self._first_line:
row = self._first_line
self._first_line = None
else:
row = next(self.reader)
for key, val in row.items():
row[key] = '' if val is None else val
return row
def current_sheet_name(self):
return "1"
class XLSImportedFile(ImportedFile):
""" XLS reader based on xlrd (multiple sheets not yet implemented)"""
def __init__(self, datafile, sheet_index=0, skip_lines=None):
if not has_xlrd:
raise NotImplementedError("The xlrd library is not available")
super().__init__(datafile, sheet_index, skip_lines)
try:
self.book = xlrd.open_workbook(filename=self.file_path, file_contents=self.file_content)
except xlrd.XLRDError as e:
logging.warn("XLS import error: %s" % str(e))
raise UnsupportedFileFormat(_("Unable to read the file. Are you sure it is an XLS file?"))
self.data_sheet_indexes = [i for i, ws in enumerate(self.book.sheets()) if (ws.nrows > 0 and ws.ncols > 0)]
self.activate_sheet(self.data_sheet_indexes[0])
def get_headers(self):
if not self.current_index in self._headers:
self._headers[self.current_index] = []
self._ignored_headers_idx[self.current_index] = []
row = self.current_sheet.row(0)
for i, cell in enumerate(row):
self._headers[self.current_index].append(str(cell.value).strip())
return self._headers[self.current_index]
def __next__(self):
""" Returns an OrderedDict : {'DESCRIPTOR': value, ...} """
while self.skip_lines and self._row_index in self.skip_lines:
self._row_index += 1
if self._row_index >= self._nrows:
# Increment current_index and skip to next sheet, if any
try:
new_index = self.data_sheet_indexes[self.data_sheet_indexes.index(self.current_index)+1]
except IndexError:
pass
else:
self.activate_sheet(new_index)
return next(self)
raise StopIteration
row_dict = OrderedDict()
row = self.current_sheet.row(self._row_index)
headers = self.get_headers()
for i, cell in enumerate(row):
if i in self._ignored_headers_idx[self.current_index] or i >= len(headers):
continue
if cell.ctype == xlrd.XL_CELL_DATE:
date_tuple = xlrd.xldate_as_tuple(cell.value, self.book.datemode)
if date_tuple[0] == 0:
# No year, probably a time value
value = time(*date_tuple[3:])
else:
value = datetime(*date_tuple)
else:
value = cell.value
row_dict[headers[i]] = value
self._row_index += 1
return row_dict
def activate_sheet(self, idx):
super().activate_sheet(idx)
self.current_sheet = self.book.sheet_by_index(idx)
self._nrows = self.current_sheet.nrows
self._ncols = self.current_sheet.ncols
def current_sheet_name(self):
return self.book.sheet_by_index(self.current_index).name
class XLSXImportedFile(ImportedFile):
""" XLS reader based on xlrd (multiple sheets not yet implemented)"""
def __init__(self, datafile, sheet_index=0, skip_lines=()):
if not has_openpyxl:
raise NotImplementedError("The openpyxl library is not available")
super().__init__(datafile, sheet_index=sheet_index, skip_lines=skip_lines)
try:
self.book = openpyxl.load_workbook(filename=self.file_path)
except Exception as e:
logging.warn("XLSX import error: %s" % str(e))
raise UnsupportedFileFormat(_("Unable to read the file. Are you sure it is an XLSX file?"))
self.data_sheet_indexes = [
idx for idx, ws in enumerate(self.book.worksheets)
if (ws.max_row > 1 and ws.max_column > 1)
]
self.activate_sheet(self.data_sheet_indexes[0])
def get_headers(self):
if not self.current_index in self._headers:
self._headers[self.current_index] = []
self._ignored_headers_idx[self.current_index] = []
row = next(self.current_sheet.rows)
for i, cell in enumerate(row):
self._headers[self.current_index].append(str(cell.value).strip())
return self._headers[self.current_index]
def __next__(self):
""" Returns an OrderedDict : {'DESCRIPTOR': value, ...} """
while self.skip_lines and self._row_index in self.skip_lines:
self._row_index += 1
try:
row = next(self._row_iterator)
except StopIteration:
# Increment current_index and skip to next sheet, if any
try:
new_index = self.data_sheet_indexes[self.data_sheet_indexes.index(self.current_index)+1]
except (IndexError, ValueError):
pass
else:
self.activate_sheet(new_index)
return next(self)
raise
row_dict = OrderedDict()
headers = self.get_headers()
for i, cell in enumerate(row):
if i in self._ignored_headers_idx[self.current_index] or i >= len(headers):
continue
row_dict[headers[i]] = cell.value
self._row_index += 1
return row_dict
def activate_sheet(self, idx):
super().activate_sheet(idx)
self.current_sheet = self.book.worksheets[idx]
self._row_iterator = self.current_sheet.iter_rows(min_row=2)
def current_sheet_name(self):
return self.book.sheetnames[self.current_index]
class ODSImportedFile(ImportedFile):
""" OO Calc reader based on ooolib (multiple sheets not yet implemented)"""
# FIXME: missing multi-sheet import
def __init__(self, datafile, sheet_index=0, skip_lines=None):
if not has_ooolib:
raise NotImplementedError("The ooolib library is not available")
super().__init__(datafile, sheet_index, skip_lines)
book = ooolib.Calc(opendoc=self.file_path)
book.set_sheet_index(sheet_index)
self.current_sheet = book
(self._ncols, self._nrows) = self.current_sheet.get_sheet_dimensions()
def get_headers(self):
if not self.current_index in self._headers:
self._headers[self.current_index] = []
self._ignored_headers_idx[self.current_index] = []
for i in range(self._ncols):
cell_value = self.current_sheet.get_cell_value(i+1, 1)
if cell_value:
self._headers[self.current_index].append(self.current_sheet.get_cell_value(i+1, 1)[1])
else:
logging.warn("Empty header in %s" % self.file_path)
self._headers[self.current_index].append("--empty--")
return self._headers[self.current_index]
def __next__(self):
""" Returns an OrderedDict : {'DESCRIPTOR': value, ...} """
while self.skip_lines and self._row_index in self.skip_lines:
self._row_index += 1
if self._row_index >= self._nrows:
raise StopIteration
row_dict = OrderedDict()
for i in range(self._ncols):
if i in self._ignored_headers_idx[self.current_index]:
continue
cell_value = self.current_sheet.get_cell_value(i+1, self._row_index+1)
if cell_value and cell_value[0] == 'formula' and cell_value[1]:
raise ValueError(_("The ODS file contains formula. Please convert them to raw values before importing the file."))
row_dict[self.get_headers()[i]] = cell_value and cell_value[1] or ""
self._row_index += 1
return row_dict
def current_sheet_name(self):
return "??" # FIXME: self.current_sheet.title
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, DisplayOption, \
ObjectDisplayOption
from redhat_support_tool.helpers import common
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.helpers.genericinteractiveprompt import GenericPrompt
from redhat_support_tool.plugins.list_attachments import ListAttachments
from redhat_support_tool.plugins.add_attachment import AddAttachment
from redhat_support_tool.plugins.add_comment import AddComment
from redhat_support_tool.plugins.kb import Kb
from redhat_support_tool.plugins.modify_case import ModifyCase
from redhat_support_tool.helpers.constants import Constants
import redhat_support_tool.helpers.recommendationprompter as \
recommendationprompter
import pydoc
import redhat_support_tool.helpers.apihelper as apihelper
import logging
import textwrap
__author__ = 'Keith Robertson <kroberts@redhat.com>'
__author__ = 'Spenser Shumaker <sshumake@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.case")
class GetCase(InteractivePlugin):
plugin_name = 'getcase'
ALL = _("Display all cases")
_submenu_opts = None
_sections = None
case = None
case_obj = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is an OptionParser built-in. Use it!
'''
return _('%prog CASENUMBER')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to find a specific case by \
number.') % cls.plugin_name
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s <case number here>') % (cls.plugin_name)
def get_intro_text(self):
return _('\nType the number of the section to view or \'e\' '
'to return to the previous menu.')
def get_prompt_text(self):
return _('Option: ')
def get_sub_menu_options(self):
return self._submenu_opts
def _check_case_number(self):
msg = _("ERROR: %s requires a case number.")\
% self.plugin_name
self.case = ''
if self._args:
self.case = ' '.join(self._args)
elif common.is_interactive():
line = raw_input(_('Please provide a case number (or \'q\' '
'to exit): '))
line = str(line).strip()
if line == 'q':
raise Exception()
if str(line).strip():
self.case = line
else:
print msg
raise Exception(msg)
else:
print msg
raise Exception(msg)
def validate_args(self):
# Check for required arguments.
self._check_case_number()
def postinit(self):
self._submenu_opts = deque()
self._sections = {}
api = None
try:
api = apihelper.get_api()
self.case_obj = api.cases.get(self.case)
# add the case group info (if it exists) to the case object
self.case_obj.group = None
case_group = self.case_obj.get_folderNumber()
if case_group:
self.case_obj.group = api.groups.get(case_group)
if not self._parse_sections(self.case_obj):
raise Exception()
except EmptyValueError, eve:
msg = _('ERROR: %s') % str(eve)
print msg
logger.log(logging.WARNING, msg)
raise
except RequestError, re:
msg = _('Unable to connect to support services API. '
'Reason: %s') % re.reason
print msg
logger.log(logging.WARNING, msg)
raise
except ConnectionError:
msg = _('Problem connecting to the support services '
'API. Is the service accessible from this host?')
print msg
logger.log(logging.WARNING, msg)
raise
except Exception:
msg = _("Unable to find case")
print msg
logger.log(logging.WARNING, msg)
raise
def non_interactive_action(self):
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
try:
print doc.encode("UTF-8", 'replace')
except Exception, e:
# There are some truly bizarre errors when you pipe
# the output from python's 'print' function with sys encoding
# set to ascii. These errors seem to manifes when you pipe
# to something like 'more' or 'less'. You'll get encoding errors.
# Curiously, you don't see them with 'grep' or even simply piping
# to terminal. WTF :(
logger.log(logging.WARNING, e)
import sys
print doc.encode(sys.getdefaultencoding(),
'replace')
def interactive_action(self, display_option=None):
if display_option.display_text == self.ALL:
doc = u''
for opt in self._submenu_opts:
if opt.display_text != self.ALL:
doc += self._sections[opt]
pydoc.pipepager(doc.encode("UTF-8", 'replace'),
cmd='less -R')
else:
if display_option.display_text == Constants.CASE_GET_ATTACH:
lh = LaunchHelper(ListAttachments)
lh.run('%s' % self.case)
elif display_option.display_text == Constants.CASE_ADD_ATTACH:
lh = LaunchHelper(AddAttachment)
lh.run('-c %s' % self.case)
elif display_option.display_text == Constants.CASE_ADD_COMMENT:
lh = LaunchHelper(AddComment)
lh.run('-c %s' % self.case)
# Check if we need to reload the case as adding comments may
# result in new options for case view.
comments = self.case_obj.get_comments()
if comments is None or len(comments) == 0:
self.postinit()
self.opts_updated = True
elif (display_option.display_text == Constants.CASE_RECOMMENDATIONS
and common.is_interactive()):
lh = LaunchHelper(GenericPrompt)
lh.run('', display_option)
elif (display_option.display_text == Constants.CASE_MODIFY
and common.is_interactive()):
lh = LaunchHelper(ModifyCase)
lh.run('%s' % self.case)
else:
doc = self._sections[display_option]
pydoc.pipepager(doc.encode("UTF-8", 'replace'), cmd='less -R')
def _parse_sections(self, case):
'''
Find available sections, format, and put in dictionary.
'''
try:
# Info (all cases should have this):
doc = u''
doc += '\n%s%s%s\n' % (Constants.BOLD,
Constants.CASE_DETAILS,
Constants.END)
doc += '%s%s%s\n' % (Constants.BOLD,
str(self.ruler * Constants.MAX_RULE),
Constants.END)
doc += '%-20s %-40s\n' % (Constants.CASE_NUMBER,
case.get_caseNumber())
doc += '%-20s %-40s\n' % (Constants.CASE_TYPE,
case.get_type())
doc += '%-20s %-40s\n' % (Constants.CASE_SEVERITY,
case.get_severity())
doc += '%-20s %-40s\n' % (Constants.CASE_STATUS,
case.get_status())
doc += '%-20s %-40s\n\n' % (Constants.CASE_AID,
case.get_alternateId())
doc += '%-20s %-40s\n' % (Constants.CASE_PROD,
case.get_product())
doc += '%-20s %-40s\n' % (Constants.CASE_VER,
case.get_version())
if case.get_entitlement() is None:
doc += '%-20s %-40s\n' % (Constants.CASE_SLA, ' ')
else:
doc += '%-20s %-40s\n' % (Constants.CASE_SLA,
case.get_entitlement().get_sla())
doc += '%-20s %-40s\n' % (Constants.CASE_OWNER,
case.get_contactName())
doc += '%-20s %-40s\n\n' % (Constants.CASE_RHOWN,
case.get_owner())
if case.group:
doc += '%-20s %-40s\n' % (Constants.CASE_GRP,
case.group.get_name())
else:
doc += '%-20s %-40s\n' % (Constants.CASE_GRP, 'None')
doc += '%-20s %-40s\n' % (Constants.CASE_OPENED,
common.iso8601tolocal(case.get_createdDate()))
doc += '%-20s %-40s\n' % (Constants.CASE_OPENEDBY,
case.get_createdBy())
doc += '%-20s %-40s\n' % (Constants.CASE_UPDATED,
common.iso8601tolocal(case.get_lastModifiedDate()))
doc += '%-20s %-40s\n\n' % (Constants.CASE_UPDATEDBY,
case.get_lastModifiedBy())
doc += '%-20s %-40s\n\n' % (Constants.CASE_SUMMARY,
case.get_summary())
disp_opt = DisplayOption(Constants.CASE_DETAILS,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
if common.is_interactive():
disp_opt = DisplayOption(Constants.CASE_MODIFY,
'interactive_action')
self._submenu_opts.append(disp_opt)
# Description
des = case.get_description()
if des is not None:
doc = u''
doc += '\n%s%s%s\n' % (Constants.BOLD,
Constants.CASE_DESCRIPTION,
Constants.END)
doc += '%s%s%s\n' % (Constants.BOLD,
str(self.ruler * Constants.MAX_RULE),
Constants.END)
doc += '%s\n' % des
disp_opt = DisplayOption(Constants.CASE_DESCRIPTION,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
# Comments
commentAry = case.get_comments()
num_comments = len(commentAry)
if commentAry is not None and num_comments > 0:
doc = u''
doc += '\n%s%s%s\n' % (Constants.BOLD,
Constants.CASE_DISCUSSION,
Constants.END)
doc += '%s%s%s\n' % (Constants.BOLD,
str(self.ruler * Constants.MAX_RULE),
Constants.END)
for i, cmt in enumerate(commentAry):
cmt_type = 'private'
if cmt.get_public():
cmt_type = 'public'
doc += '%-20s #%s %s(%s)%s\n' % \
(Constants.COMMENT, num_comments-i,
Constants.BOLD if cmt_type == 'private' else
Constants.END, cmt_type, Constants.END)
doc += '%-20s %-40s\n' % (Constants.CASE_CMT_AUTHOR,
cmt.get_lastModifiedBy())
doc += '%-20s %-40s\n\n' % (Constants.CASE_CMT_DATE,
common.iso8601tolocal(cmt.get_lastModifiedDate()))
doc += cmt.get_text()
doc += '\n\n%s%s%s\n\n' % (Constants.BOLD,
str('-' * Constants.MAX_RULE),
Constants.END)
disp_opt = DisplayOption(Constants.CASE_DISCUSSION,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
recommendAry = case.get_recommendations()
if recommendAry is not None and len(recommendAry) > 0:
doc = u''
doc += '\n%s%s%s\n' % (Constants.BOLD,
Constants.CASE_RECOMMENDATIONS,
Constants.END)
doc += '%s%s%s\n' % (Constants.BOLD,
str(self.ruler * Constants.MAX_RULE),
Constants.END)
# For de-duplication this is now in a helper module,
# generate_metadata will return the formatted doc for non-
# interactive prompts, plus the prompt for interactive users.
disp_opt, recdoc = recommendationprompter.generate_metadata(
recommendAry)
doc += recdoc
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = doc
# Get Attachments
disp_opt = DisplayOption(Constants.CASE_GET_ATTACH,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = Constants.CASE_GET_ATTACH
# Add Attachment
disp_opt = DisplayOption(Constants.CASE_ADD_ATTACH,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = Constants.CASE_ADD_ATTACH
# Comment
disp_opt = DisplayOption(Constants.CASE_ADD_COMMENT,
'interactive_action')
self._submenu_opts.append(disp_opt)
self._sections[disp_opt] = Constants.CASE_ADD_COMMENT
except Exception:
msg = _('ERROR: problem parsing the cases.')
print msg
logger.log(logging.WARNING, msg)
return False
return True
|
|
import os
import time
import random
import threading
import socket
from TCAction import TCActionBase
from NativeLog import NativeLog
from NativeLog import ThroughputResult
from Utility import RSSICalibrator
LOG_FOLDER = os.path.join("Performance", "Throughput")
AP_PROP_KEY = ("ssid", "password", "apc")
class SendThread(threading.Thread):
def __init__(self, sock, send_len):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
self.send_len = send_len
self.exit_event = threading.Event()
self.calc_event = threading.Event()
self.bytes_sent = 0
pass
def start_calc(self):
self.calc_event.set()
def stop_calc(self):
self.calc_event.clear()
self.exit_event.set()
def run(self):
data = "A" * self.send_len
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
self.sock.send(data)
except StandardError:
break
if self.calc_event.isSet() is True:
self.bytes_sent += self.send_len
def get_bytes_sent(self):
return self.bytes_sent
pass
class RecvThread(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
self.exit_event = threading.Event()
self.calc_event = threading.Event()
self.bytes_recv = 0
def start_calc(self):
self.calc_event.set()
def stop_calc(self):
self.calc_event.clear()
self.exit_event.set()
def run(self):
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
data = self.sock.recv(8 * 1024)
except StandardError:
break
if self.calc_event.isSet() is True:
self.bytes_recv += len(data)
def get_bytes_recv(self):
return self.bytes_recv
pass
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=30, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
self.performance_folder_path = log_path
self.att_test_list = range(60)
cmd_set = test_case["cmd set"]
# load param from excel
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
pass
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
try:
# configurable params
ap_list = self.get_parameter("shield_box_ap_list")
pc_ip = self.get_parameter("pc_ip")
send_len = self.send_len
att_test_list = self.att_test_list
tx_enable = self.tx_enable
rx_enable = self.rx_enable
measure_period = self.measure_period
# configurable params
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for TCPThroughput script, error is %s" % e)
raise StandardError("Error configuration")
tcp_port = random.randint(40000, 50000)
# init throughput result data
test_item = ""
if tx_enable is True:
test_item += "Tx"
if rx_enable is True:
test_item += "Rx"
if test_item == "":
raise StandardError("no throughput test item")
folder_path = os.path.join(self.performance_folder_path, LOG_FOLDER)
file_name = os.path.join(folder_path,
"TCPThroughput_%s_%s" % (test_item, time.strftime("%d%H%M%S", time.localtime())))
result = ThroughputResult.ThroughputResult(file_name, standard_required=True)
# restart before executing throughput
checker_stings = ["R SSC1 C !!!ready!!!"]
test_action_string = ["SSC SSC1 reboot"]
fail_string = "Fail, Fail to reboot"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
# disable recv print during throughput test
checker_stings = ["R SSC1 C +RECVPRINT"]
test_action_string = ["SSC SSC1 soc -R -o 0"]
fail_string = "Fail, Fail to disable recv print"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
ret = True
for ap_prop in ap_list:
ssid = ap_prop[0]
password = ap_prop[1]
apc = ap_prop[2]
if ap_prop[1] == "":
# set a default string for open ap
password = "1"
# switch off all outlet, switch on AP outlet
outlet_config_dict = dict.fromkeys(range(1, 9), "OFF")
outlet_config_dict[apc] = "ON"
apc_cmd = "APC <APC1>"
for outlet in outlet_config_dict:
apc_cmd += " %s %s" % (outlet_config_dict[outlet], outlet)
checker_stings = ["P PC_COM L OK"]
fail_string = "Fail, Fail to switch apc"
if self.load_and_exe_one_step(checker_stings, [apc_cmd], fail_string) is False:
ret = False
break
# wait AP ready
time.sleep(20)
# create server
server_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
server_sock.bind((pc_ip, tcp_port))
server_sock.settimeout(5)
server_sock.listen(5)
if tx_enable is True:
result.add_test_item(ssid + "_tx")
if rx_enable is True:
result.add_test_item(ssid + "_rx")
# create RSSI Calibrator
calibrator = RSSICalibrator.Calibrator()
for att_value in att_test_list:
# step 0 set att value
checker_stings = ["R PC_COM L OK"]
test_action_string = ["ATT <att_port> %s" % att_value]
fail_string = "Fail, Fail to set att value"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
# step 1 get AP RSSI
checker_stings = ["R SSC1 A <rssi>:\+SCAN:%s,[:\d\w]+,\d+,\d+,([-\d]+)" % ssid]
test_action_string = ["SSC SSC1 sta -S -s %s" % ssid]
fail_string = "Fail, Fail to scan"
rssi = scan_count = 0
for i in range(3):
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
continue
rssi += int(self.test_env.get_variable_by_name("rssi")[1])
scan_count += 1
rssi = calibrator.calibrate_rssi(float(rssi) / scan_count if scan_count > 0 else 0, att_value)
# step 2 connect to AP
checker_stings = ["R SSC1 C +JAP:CONNECTED"]
test_action_string = ["SSC SSC1 sta -C -s %s -p %s" % (ssid, password)]
fail_string = "Fail, Fail to JAP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string,
check_freq=1, check_time=30) is False:
if rssi < -89:
continue
else:
ret = False
break
# step 3 close all connections
checker_stings = ["R SSC1 C +CLOSEALL"]
test_action_string = ["SSC SSC1 soc -T"]
fail_string = "Fail, Fail to close socket"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
# step 4 create tcp connection
checker_stings = ["R SSC1 A <client_sock>:\+BIND:(\d+),OK"]
test_action_string = ["SSC SSC1 soc -B -t TCP"]
fail_string = "Fail, Fail to bind"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
checker_stings = ["P SSC1 RE \+CONNECT:\d+,OK"]
test_action_string = ["SSC SSC1 soc -C -s <client_sock> -i %s -p %s" % (pc_ip, tcp_port)]
fail_string = "Fail, Fail to connect socket"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
ret = False
break
# continue
try:
sock, addr = server_sock.accept()
except socket.error, e:
NativeLog.add_trace_critical("%s" % e)
continue
sock.settimeout(measure_period)
# step 5 do throughput test
send_thread = SendThread(sock if rx_enable is True else None, send_len)
send_thread.start()
recv_thread = RecvThread(sock if tx_enable is True else None)
recv_thread.start()
if tx_enable is True:
# do send from target
test_action_string = ["SSC SSC1 soc -S -s <client_sock> -l %s -n 10000000" % send_len]
fail_string = "Fail, Fail to send"
if self.load_and_exe_one_step([], test_action_string, fail_string) is False:
pass
# start throughput calculate
send_thread.start_calc()
recv_thread.start_calc()
# sleep for measure period
time.sleep(measure_period)
# stop throughput calculate
send_thread.stop_calc()
recv_thread.stop_calc()
send_thread.join()
recv_thread.join()
sock.close()
# output throughput result
# in Mbps
if send_thread.get_bytes_sent() > 0:
result.log_throughput(ssid + "_rx", rssi, att_value,
float(send_thread.get_bytes_sent() * 8) / (measure_period * 1000000))
if recv_thread.get_bytes_recv() > 0:
result.log_throughput(ssid + "_tx", rssi, att_value,
float(recv_thread.get_bytes_recv() * 8) / (measure_period * 1000000))
result.output_to_file()
pass
server_sock.close()
if not ret:
NativeLog.add_trace_critical("Test SUC for %s" % ssid)
elif ret:
NativeLog.add_trace_critical("Test FAIL for %s!!!" % ssid)
if ret:
self.result_cntx.set_result("Succeed")
else:
self.result_cntx.set_result("Fail")
# finally, execute done
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import time
import generate_data
import numpy as np
import captcha_input as cap_in
import cnn_model as model
import tensorflow as tf
from tensorflow.contrib import slim
ALPHABET = generate_data.ALPHABET
CAPTCHA_SIZE = generate_data.CAPTCHA_SIZE
def get_model_params(app_args):
"""
Creating ModelParams object.
"""
if app_args.data_format == "NCHW":
data_format = "channels_first"
else:
data_format = "channels_last"
model_params = model.ModelParams(
filters_counts=app_args.filters_counts,
conv_ksizes=app_args.conv_ksizes,
conv_strides=app_args.conv_strides,
pool_ksizes=app_args.pool_ksizes,
pool_strides=app_args.pool_strides,
fc_sizes=app_args.fc_sizes,
drop_rates=app_args.drop_rates,
data_format=data_format)
return model_params
def train(app_args):
"""
Train model for a number of steps.
"""
with tf.Graph().as_default() as graph:
coordinator = tf.train.Coordinator()
manager = cap_in.CaptchaDataManager(app_args.batch_size,
CAPTCHA_SIZE, app_args.hdf5_dir,
coordinator, app_args.data_format)
# Build a Graph that computes the logits predictions
model_params = get_model_params(app_args)
with tf.device("/CPU:0"):
images, labels = manager.dequeue()
im_shape = images.get_shape().as_list()[1:4]
images = tf.placeholder_with_default(images, ([None] + im_shape),
name="images")
labels = tf.placeholder_with_default(labels,
[None, CAPTCHA_SIZE,
len(ALPHABET)],
name="labels")
logits = model.cnn_model(images, model_params)
logits = tf.reshape(logits, tf.shape(labels))
tf.add_to_collection('inputs', images)
tf.add_to_collection('inputs', labels)
tf.add_to_collection("logits", logits)
# Calculate loss.
loss = tf.losses.softmax_cross_entropy(labels, logits)
loss = tf.losses.get_total_loss()
# Set learning rate and optimizer
opt = tf.train.AdamOptimizer(app_args.init_lr)
# Define ops
train_op = slim.learning.create_train_op(loss, opt)
prediction = tf.argmax(logits, 2)
actual = tf.argmax(labels, 2)
equal = tf.equal(tf.cast(prediction, tf.int32),
tf.cast(actual, tf.int32))
accuracy = tf.reduce_sum(tf.cast(equal, tf.int32), 1)
accuracy = tf.divide(accuracy, CAPTCHA_SIZE, name="accuracy")
init_op = tf.initialize_all_variables()
tf.summary.scalar("Loss", loss)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(app_args.log_dir, graph)
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init_op)
start_time = time.time()
threads = manager.start_threads(session)
for step in xrange(1, app_args.max_steps + 1):
if step % app_args.log_frequency != 0:
session.run(train_op)
else:
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, loss_value, precission, summary = session.run(
[train_op, loss, accuracy, summary_op],
options=run_options,
run_metadata=run_metadata)
if (step % app_args.save_summary_steps == 0):
summary_writer.add_run_metadata(run_metadata,
"step%d" % step)
summary_writer.add_summary(summary, step)
current_time = time.time()
duration = current_time - start_time
start_time = current_time
loss_value = session.run(loss)
examples_per_sec = int(app_args.log_frequency *
app_args.batch_size / duration)
sec_per_batch = float(duration /
app_args.log_frequency)
print(
"Step = %d Loss = %f Precission = %f"
" Samples per sec = %d"
" Sec per batch = %f" %
(step, loss_value,
(np.sum(precission) / float(app_args.batch_size)),
examples_per_sec, sec_per_batch))
if (step % app_args.save_checkpoint_steps == 0 or
step == app_args.max_steps):
checkpoint_file = os.path.join(app_args.log_dir,
"model.ckpt")
saver.save(session, checkpoint_file, step)
print("Checkpoint saved")
session.run(manager.queue.close(cancel_pending_enqueues=True))
coordinator.request_stop()
coordinator.join(threads)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--hdf5-dir",
help="Path to the train directory",
default="train_data")
parser.add_argument("--log-dir",
help="Path to the directory, where log will write",
default="cnn_train")
parser.add_argument("--max-steps", type=int,
help="Number of batches to run",
default=100000)
parser.add_argument("--batch-size", type=int,
help="Number of images to process in a batch",
default=128)
parser.add_argument("--init-lr", type=float,
help="Start value for learning rate",
default=1e-3)
parser.add_argument("--log-frequency", type=int,
help="How often to log results to the console",
default=10)
parser.add_argument("--save-checkpoint-steps", type=int,
help="How often to save checkpoint",
default=1000)
parser.add_argument("--save-summary-steps", type=int,
help="How often to save summary",
default=100)
parser.add_argument("--filters-counts", nargs="+", type=int,
help="List of filter counts for each conv layer",
default=[64, 96, 384])
parser.add_argument("--conv-ksizes", nargs="+", type=int,
help="List of kernel sizes for each conv layer",
default=[5, 5, 5])
parser.add_argument("--conv-strides", nargs="+", type=int,
help="List of strides for each conv layer",
default=[])
parser.add_argument("--pool-ksizes", nargs="+", type=int,
help="List of kernel sizes for each pool layer",
default=[2])
parser.add_argument("--pool-strides", nargs="+", type=int,
help="List of strides for each pool layer",
default=[2])
parser.add_argument("--fc-sizes", nargs="+", type=int,
help="List of sizes for each fc layer",
default=[2048, 1024, CAPTCHA_SIZE * len(ALPHABET)])
parser.add_argument("--drop-rates", nargs="+", type=int,
help="List of probs for each conv and fc layer",
default=[])
parser.add_argument("--data-format",
help="Data format: NCHW or NHWC",
default="NHWC")
app_args = parser.parse_args()
if tf.gfile.Exists(app_args.log_dir):
tf.gfile.DeleteRecursively(app_args.log_dir)
tf.gfile.MakeDirs(app_args.log_dir)
tf.logging.set_verbosity("DEBUG")
train(app_args)
|
|
# -*- coding: utf-8 -*-
"""
Models
======
Creates a series of relevant Peewee models.
Ex: Game events
http://www.nhl.com/scores/htmlreports/20132014/PL021195.HTM
"""
import logging
from datetime import datetime, timedelta
from .version import __version__
from peewee import BooleanField, CharField, DateField, DateTimeField, \
ForeignKeyField, IntegerField, TextField, Model, Proxy
logger = logging.getLogger(__name__)
logger.debug('Loading {} ver {}'.format(__name__, __version__))
# Order should be the order these tables are created.
MODELS = [
'Arena',
'League',
'SeasonType',
'Season',
'Conference',
'Division',
'Team',
'Schedule',
'Player',
'PlayerSkaterStat',
'PlayerGoalieStat',
'Roster',
'Coach',
'Game',
'Lineup',
'Event',
'EventPlayer'
]
db_proxy = Proxy()
class BaseModel(Model):
class Meta:
database = db_proxy
class Arena(BaseModel):
"""
Represents a venue in which a game is played.
"""
name = CharField()
street = CharField()
city = CharField()
state = CharField()
country = CharField()
postal_code = CharField()
# TODO: find out how we can get capacity data
capacity = IntegerField(null=True)
class Meta:
db_table = 'arenas'
order_by = ('name',)
def __unicode__(self):
return self.name
class League(BaseModel):
name = CharField()
abbreviation = CharField()
class Meta:
db_table = 'leagues'
order_by = ('name',)
def __unicode__(self):
return self.name
class SeasonType(BaseModel):
"""
Represents a season type within a league. Such as preseason, regular or
playoffs. These are in relation to a league because each league can have
arbitrarty identifiers `external_id` for each season type.
"""
league = ForeignKeyField(League, related_name='season_types')
name = CharField()
external_id = CharField(null=True)
class Meta:
db_table = 'season_types'
def __unicode__(self):
return self.name
class Season(BaseModel):
league = ForeignKeyField(League, related_name='seasons',
on_delete='CASCADE', on_update='CASCADE')
year = CharField()
type = ForeignKeyField(SeasonType, related_name='seasons')
class Meta:
db_table = 'seasons'
order_by = ('league', 'year')
indexes = (
# create a unique on league/year/type
(('league', 'year', 'type'), True),
)
def __unicode__(self):
return self.year
class Conference(BaseModel):
league = ForeignKeyField(League, related_name='conferences',
on_delete='CASCADE', on_update='CASCADE')
name = CharField(unique=True)
class Meta:
db_table = 'conferences'
order_by = ('name',)
def __unicode__(self):
return self.name
class Division(BaseModel):
conference = ForeignKeyField(Conference, related_name='divisions',
on_delete='CASCADE', on_update='CASCADE')
name = CharField(unique=True)
class Meta:
db_table = 'divisions'
order_by = ('name',)
def __unicode__(self):
return self.name
class Team(BaseModel):
division = ForeignKeyField(Division, related_name='teams')
city = CharField()
name = CharField()
code = CharField()
url = CharField()
class Meta:
db_table = 'teams'
order_by = ('city', 'name')
def __unicode__(self):
return '{} {}'.format(self.city, self.name)
class Schedule(BaseModel):
season = ForeignKeyField(Season, related_name='scheduled_games',
on_delete='CASCADE', on_update='CASCADE')
day = IntegerField()
game = IntegerField()
date = DateField()
home = ForeignKeyField(Team, related_name='scheduled_home_games',
on_delete='CASCADE', on_update='CASCADE')
road = ForeignKeyField(Team, related_name='scheduled_road_games',
on_delete='CASCADE', on_update='CASCADE')
class Meta:
db_table = 'schedules'
order_by = ['season', 'game']
def __unicode__(self):
# pylint: disable=no-member
return '{} {} Day {} Game {}'.format(self.season.league.name,
self.season.type.name,
self.day,
self.game)
# pylint: enable=no-member
class Player(BaseModel):
SHOOTS = [('L', 'Left'),
('R', 'Right')]
name = CharField()
no = IntegerField()
pos = CharField()
shoots = CharField(choices=SHOOTS, verbose_name='Shoots/Catches')
dob = DateField(verbose_name='Date of Birth')
pob = CharField(verbose_name='Place of Birth')
height = IntegerField()
weight = IntegerField()
salary = IntegerField(null=True)
seasons = IntegerField(default=0)
drafted = CharField(null=True)
signed = CharField(null=True)
assets = TextField(null=True)
flaws = TextField(null=True)
potential = CharField(null=True)
status = CharField()
class Meta:
db_table = 'players'
order_by = ('name',)
def __unicode__(self):
return self.name
@property
def height_imperial(self):
if self.height:
return '{}\'{}"'.format(self.height / 12, self.height % 12)
return 'N/A'
@property
def height_metric(self):
if self.height:
return '{}cm'.format(round(self.height * 2.54))
return 'N/A'
@property
def weight_imperial(self):
if self.weight:
return '{}lbs'.format(self.weight)
return 'N/A'
@property
def weight_metric(self):
if self.weight:
return '{}kg'.format(round(self.weight * 0.453592))
return 'N/A'
class PlayerSkaterStat(BaseModel):
player = ForeignKeyField(Player, related_name='skater_stats')
season = ForeignKeyField(Season, related_name='skater_stats')
team = ForeignKeyField(Team, related_name='skater_stats')
gp = IntegerField(null=True, verbose_name='GP')
g = IntegerField(null=True)
a = IntegerField(null=True)
pts = IntegerField(null=True, verbose_name='PTS')
pm = IntegerField(null=True, verbose_name='+/-')
pim = IntegerField(null=True, verbose_name='PIM')
ppg = IntegerField(null=True, verbose_name='PPG')
shg = IntegerField(null=True, verbose_name='SHG')
gwg = IntegerField(null=True, verbose_name='GWG')
shots = IntegerField(null=True)
class Meta:
db_table = 'player_skater_stats'
order_by = ('season', 'team', 'pts')
@property
def ptspgp(self):
"""Points per game played"""
if self.gp and self.pts:
return '{:.2f}'.format(float(self.pts) / self.gp)
return None
@property
def shotpct(self):
"""Shooting percentage"""
if self.shots and self.g:
return '{:.1f}'.format((float(self.g) / self.shots) * 100)
return None
class PlayerGoalieStat(BaseModel):
player = ForeignKeyField(Player, related_name='goalie_stats')
season = ForeignKeyField(Season, related_name='goalie_stats')
team = ForeignKeyField(Team, related_name='goalie_stats')
gpi = IntegerField(null=True, verbose_name='GPI')
w = IntegerField(null=True)
l = IntegerField(null=True)
t = IntegerField(null=True)
otl = IntegerField(null=True, verbose_name='OTL')
min = IntegerField(null=True)
so = IntegerField(null=True, verbose_name='SO')
ga = IntegerField(null=True, verbose_name='GA')
sha = IntegerField(null=True, verbose_name='SHA')
class Meta:
db_table = 'player_goalie_stats'
order_by = ('-season', 'team', 'gpi')
@property
def gaa(self):
"""Goals against average"""
if self.ga and self.min:
return '{:.2f}'.format(self.ga / (self.min / 60.0))
return None
@property
def svpct(self):
"""save percentage"""
if self.ga and self.sha:
return '{:.3f}'.format(1 - self.ga / float(self.sha))
return None.format
class Roster(BaseModel):
"""
Represents a team's roster for a specific season. The relationship between
a team and a player.
"""
season = ForeignKeyField(Season, related_name='roster')
team = ForeignKeyField(Team, related_name='roster')
player = ForeignKeyField(Player, related_name='rosters')
no = IntegerField()
class Meta:
db_table = 'rosters'
class Coach(BaseModel):
name = CharField()
class Meta:
db_table = 'coaches'
order_by = ('name',)
def __unicode__(self):
return self.name
class Game(BaseModel):
season = ForeignKeyField(Season, related_name='games')
arena = ForeignKeyField(Arena, related_name='games', null=True)
attendence = IntegerField(null=True)
home = ForeignKeyField(Team, related_name='home_games')
road = ForeignKeyField(Team, related_name='road_games')
report_id = CharField(null=True)
start = DateTimeField()
end = DateTimeField(null=True)
class Meta:
db_table = 'games'
order_by = ('start',)
def __repr__(self):
# pylint: disable=no-member
return '{} at {} - {}(Game ID: {})'.format(
self.road.name.encode('utf-8'),
self.home.name.encode('utf-8'),
self.start.strftime('%A, %d. %B %Y %H:%M %Z'),
self.report_id if self.report_id else 'Unknown'
)
# pylint: enable=no-member
@classmethod
def get_active_games(cls):
"""
Returns only games that are currently being played.
- The start is before now
- The difference betweeen now and start is <= 24 hours
- There is no end marked for the game.
"""
return cls.select().where(
(Game.start <= datetime.now()) &
(Game.start >= (datetime.now() - timedelta(days=1))) &
Game.end.is_null(True)
)
@classmethod
def get_orphaned_games(cls):
"""
Gets games that should be, but presumably never will,
be marked finished. Same as get_active_games, except:
- The difference between now and start is > 24 hours
"""
return cls.select().where(
(Game.start <= datetime.now()) &
(Game.start < (datetime.now() - timedelta(days=1))) &
Game.end.is_null(True)
)
@classmethod
def get_games_in_date_range(cls, start=None, end=None):
"""
Returns only games that start between the start and
end dates, or either can be None to make the query
open-ended on one side.
"""
return cls.select().where((Game.start >= start) & (Game.start <= end))
class Lineup(BaseModel):
"""
Represents a team's lineup for a specific game. Should probably include
scratched players.
"""
game = ForeignKeyField(Game)
team = ForeignKeyField(Team)
Player = ForeignKeyField(Player)
scratched = BooleanField(default=False)
class Meta:
db_table = 'lineups'
class Event(BaseModel):
"""
Represents an event that occured within a game.
:param game: Game in which event occured.
: type game: Game
:param team: Team responsible for event.
:type team: Team
:param number: Number associated with event.
:type number: integer
:param period: Period in which the event occured.
:type period: integer
:param strength: Strength the current team had during event.
:type strength: string
:param elapse: Amount of time elapsed in period (in seconds).
:type elapsed: integer
:param remaining: Amount of time remaining in period (in seconds).
:type remaining: integer
:param type: The type of event that occured.
:type type: string
:param zone: The zone in which the event occured.
:type zone: string or None
:param description: A description of the event.
:type description: string or None
:param player1: Primary player involved in event (ex. goal scorer).
:type player1: Player or None
:param player2: Secondary player involved in event (ex. primary assist).
:type player2: Player or None
:param player3: Third player involved in an event (ex. secondary assist).
:type player3: Player or None
:param shot_type: The type of shot taken (if any).
:type shot_type: string or None
:param distance: Distance from opponents net.
:type distance: integer or None
:param penalty: Type of penalty taken (if any).
:type penalty: string or None
:param penalty_minutes: Amount of penalty minutes given for penalty.
:type penalty_minutes: integer or None
"""
STRENGTHS = [('ev', 'Even Strength'),
('pp', 'Power Play'),
('sh', 'Short Handed')]
EVENT_TYPES = [('block', 'Blocked Shot'),
('end', 'End of Period'),
('face', 'Faceoff'),
('give', 'Giveaway'),
('goal', 'Goal'),
('hit', 'Hit'),
('miss', 'Missed Shot'),
('penalty', 'Penalty'),
('shot', 'Shot on Net'),
('start', 'Start of Period'),
('stop', 'Stoppage'),
('take', 'Takewaway')]
ZONES = [('home', 'Home'),
('neutral', 'Neutral'),
('road', 'Road')]
SHOT_TYPES = [('slap', 'Slap Shot'),
('snap', 'Snap Shot'),
('wrist', 'Wrist Shot')]
game = ForeignKeyField(Game, related_name='events')
team = ForeignKeyField(Team, null=True)
number = IntegerField(verbose_name='#')
period = IntegerField()
strength = CharField(choices=STRENGTHS, default='EV')
elapsed = IntegerField()
remaining = IntegerField()
type = CharField(choices=EVENT_TYPES)
zone = CharField(choices=ZONES, null=True)
description = CharField(null=True)
player1 = ForeignKeyField(Player, null=True, related_name='player1_events')
player2 = ForeignKeyField(Player, null=True, related_name='player2_events')
player3 = ForeignKeyField(Player, null=True, related_name='player3_events')
shot_type = CharField(choices=SHOT_TYPES, null=True)
distance = IntegerField(null=True)
penalty = CharField(null=True)
penalty_minutes = IntegerField(null=True)
class Meta:
db_table = 'events'
order_by = ('game', 'number')
class EventPlayer(BaseModel):
"""
Represents a player who was on the ice at the time of the event.
:param event: Event in which the player was on the ice.
:type event: Event
:param team: The team the player was playing for (denormalization?).
:type team: Team
:param Player: The player on the ice during the event.
:type player: Player
"""
event = ForeignKeyField(Event, related_name='players')
team = ForeignKeyField(Team)
player = ForeignKeyField(Player)
class Meta:
db_table = 'event_players'
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
from monty.serialization import loadfn, dumpfn
from monty.os.path import which
from pymatgen.core.structure import Molecule
from pymatgen.io.qchem.outputs import QCOutput, check_for_structure_changes
from pymatgen.util.testing import PymatgenTest
try:
from openbabel import openbabel
have_babel = True
except ImportError:
have_babel = False
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
single_job_dict = loadfn(os.path.join(
os.path.dirname(__file__), "single_job.json"))
multi_job_dict = loadfn(os.path.join(
os.path.dirname(__file__), "multi_job.json"))
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "molecules")
property_list = {"errors",
"multiple_outputs",
"completion",
"unrestricted",
"using_GEN_SCFMAN",
"final_energy",
"S2",
"optimization",
"energy_trajectory",
"opt_constraint",
"frequency_job",
"charge",
"multiplicity",
"species",
"initial_geometry",
"initial_molecule",
"SCF",
"Mulliken",
"optimized_geometry",
"optimized_zmat",
"molecule_from_optimized_geometry",
"last_geometry",
"molecule_from_last_geometry",
"geometries",
"gradients",
"frequency_mode_vectors",
"walltime",
"cputime",
"point_group",
"frequencies",
"IR_intens",
"IR_active",
"g_electrostatic",
"g_cavitation",
"g_dispersion",
"g_repulsion",
"total_contribution_pcm",
"ZPE",
"trans_enthalpy",
"vib_enthalpy",
"rot_enthalpy",
"gas_constant",
"trans_entropy",
"vib_entropy",
"rot_entropy",
"total_entropy",
"total_enthalpy",
"warnings",
"SCF_energy_in_the_final_basis_set",
"Total_energy_in_the_final_basis_set",
"solvent_method",
"solvent_data",
"using_dft_d3",
"single_point_job",
"force_job",
"pcm_gradients",
"CDS_gradients",
"RESP",
"trans_dip"}
if have_babel:
property_list.add("structure_change")
single_job_out_names = {"unable_to_determine_lambda_in_geom_opt.qcout",
"thiophene_wfs_5_carboxyl.qcout",
"hf.qcout",
"hf_opt_failed.qcout",
"no_reading.qcout",
"exit_code_134.qcout",
"negative_eigen.qcout",
"insufficient_memory.qcout",
"freq_seg_too_small.qcout",
"crowd_gradient_number.qcout",
"quinoxaline_anion.qcout",
"tfsi_nbo.qcout",
"crowd_nbo_charges.qcout",
"h2o_aimd.qcout",
"quinoxaline_anion.qcout",
"crowd_gradient_number.qcout",
"bsse.qcout",
"thiophene_wfs_5_carboxyl.qcout",
"time_nan_values.qcout",
"pt_dft_180.0.qcout",
"qchem_energies/hf-rimp2.qcout",
"qchem_energies/hf_b3lyp.qcout",
"qchem_energies/hf_ccsd(t).qcout",
"qchem_energies/hf_cosmo.qcout",
"qchem_energies/hf_hf.qcout",
"qchem_energies/hf_lxygjos.qcout",
"qchem_energies/hf_mosmp2.qcout",
"qchem_energies/hf_mp2.qcout",
"qchem_energies/hf_qcisd(t).qcout",
"qchem_energies/hf_riccsd(t).qcout",
"qchem_energies/hf_tpssh.qcout",
"qchem_energies/hf_xyg3.qcout",
"qchem_energies/hf_xygjos.qcout",
"qchem_energies/hf_wb97xd_gen_scfman.qcout",
"new_qchem_files/pt_n2_n_wb_180.0.qcout",
"new_qchem_files/pt_n2_trip_wb_90.0.qcout",
"new_qchem_files/pt_n2_gs_rimp2_pvqz_90.0.qcout",
"new_qchem_files/VC_solv_eps10.2.qcout",
"crazy_scf_values.qcout",
"new_qchem_files/N2.qcout",
"new_qchem_files/julian.qcout",
"new_qchem_files/Frequency_no_equal.qout",
"new_qchem_files/gdm.qout",
"new_qchem_files/DinfH.qout",
"new_qchem_files/mpi_error.qout",
"new_qchem_files/molecule_read_error.qout",
"new_qchem_files/Optimization_no_equal.qout",
"new_qchem_files/2068.qout",
"new_qchem_files/2620.qout",
"new_qchem_files/1746.qout",
"new_qchem_files/1570.qout",
"new_qchem_files/1570_2.qout",
"new_qchem_files/single_point.qout",
"new_qchem_files/roothaan_diis_gdm.qout"}
multi_job_out_names = {"not_enough_total_memory.qcout",
"new_qchem_files/VC_solv_eps10.qcout",
"new_qchem_files/MECLi_solv_eps10.qcout",
"pcm_solvent_deprecated.qcout",
"qchem43_batch_job.qcout",
"ferrocenium_1pos.qcout",
"CdBr2.qcout",
"killed.qcout",
"aux_mpi_time_mol.qcout",
"new_qchem_files/VCLi_solv_eps10.qcout"}
class TestQCOutput(PymatgenTest):
@staticmethod
def generate_single_job_dict():
"""
Used to generate test dictionary for single jobs.
"""
single_job_dict = {}
for file in single_job_out_names:
single_job_dict[file] = QCOutput(os.path.join(test_dir, file)).data
dumpfn(single_job_dict, "single_job.json")
@staticmethod
def generate_multi_job_dict():
"""
Used to generate test dictionary for multiple jobs.
"""
multi_job_dict = {}
for file in multi_job_out_names:
outputs = QCOutput.multiple_outputs_from_file(
QCOutput, os.path.join(test_dir, file), keep_sub_files=False)
data = []
for sub_output in outputs:
data.append(sub_output.data)
multi_job_dict[file] = data
dumpfn(multi_job_dict, "multi_job.json")
def _test_property(self, key, single_outs, multi_outs):
for name, outdata in single_outs.items():
try:
self.assertEqual(outdata.get(key), single_job_dict[name].get(key))
except ValueError:
self.assertArrayEqual(outdata.get(key), single_job_dict[name].get(key))
for name, outputs in multi_outs.items():
for ii, sub_output in enumerate(outputs):
try:
self.assertEqual(sub_output.data.get(key), multi_job_dict[name][ii].get(key))
except ValueError:
self.assertArrayEqual(sub_output.data.get(key), multi_job_dict[name][ii].get(key))
def test_all(self):
single_outs = dict()
for file in single_job_out_names:
single_outs[file] = QCOutput(os.path.join(test_dir, file)).data
multi_outs = dict()
for file in multi_job_out_names:
multi_outs[file] = QCOutput.multiple_outputs_from_file(QCOutput,
os.path.join(test_dir, file),
keep_sub_files=False)
for key in property_list:
print('Testing ', key)
self._test_property(key, single_outs, multi_outs)
@unittest.skipIf((not (have_babel)) or (not which("babel")),
"OpenBabel not installed.")
def test_structural_change(self):
t1 = Molecule.from_file(os.path.join(test_dir, "structural_change",
"t1.xyz"))
t2 = Molecule.from_file(os.path.join(test_dir, "structural_change",
"t2.xyz"))
t3 = Molecule.from_file(os.path.join(test_dir, "structural_change",
"t3.xyz"))
thio_1 = Molecule.from_file(os.path.join(test_dir, "structural_change",
"thiophene1.xyz"))
thio_2 = Molecule.from_file(os.path.join(test_dir, "structural_change",
"thiophene2.xyz"))
frag_1 = Molecule.from_file(os.path.join(test_dir, "new_qchem_files",
"test_structure_change",
"frag_1.xyz"))
frag_2 = Molecule.from_file(os.path.join(test_dir, "new_qchem_files",
"test_structure_change",
"frag_2.xyz"))
self.assertEqual(check_for_structure_changes(t1, t1), "no_change")
self.assertEqual(check_for_structure_changes(t2, t3), "no_change")
self.assertEqual(check_for_structure_changes(t1, t2), "fewer_bonds")
self.assertEqual(check_for_structure_changes(t2, t1), "more_bonds")
self.assertEqual(check_for_structure_changes(thio_1, thio_2),
"unconnected_fragments")
self.assertEqual(check_for_structure_changes(frag_1, frag_2),
"bond_change")
if __name__ == "__main__":
unittest.main()
|
|
from functools import partial
import os
from datetime import datetime, timedelta
import numpy as np
import torch
import neptune
from torch.autograd import Variable
from torch.optim.lr_scheduler import ExponentialLR
from tempfile import TemporaryDirectory
from steppy.base import Step, IdentityOperation
from steppy.adapter import Adapter, E
from toolkit.pytorch_transformers.utils import Averager, persist_torch_model
from toolkit.pytorch_transformers.validation import score_model
from .utils import (
get_logger,
sigmoid,
softmax,
make_apply_transformer,
read_masks,
get_list_of_image_predictions,
)
from .metrics import intersection_over_union, intersection_over_union_thresholds
from .postprocessing import crop_image, resize_image, binarize
logger = get_logger()
Y_COLUMN = "file_path_mask"
ORIGINAL_SIZE = (101, 101)
THRESHOLD = 0.5
class Callback:
def __init__(self):
self.epoch_id = None
self.batch_id = None
self.model = None
self.optimizer = None
self.loss_function = None
self.output_names = None
self.validation_datagen = None
self.lr_scheduler = None
def set_params(self, transformer, validation_datagen, *args, **kwargs):
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.output_names = transformer.output_names
self.validation_datagen = validation_datagen
self.transformer = transformer
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
def on_train_end(self, *args, **kwargs):
pass
def on_epoch_begin(self, *args, **kwargs):
pass
def on_epoch_end(self, *args, **kwargs):
self.epoch_id += 1
def training_break(self, *args, **kwargs):
return False
def on_batch_begin(self, *args, **kwargs):
pass
def on_batch_end(self, *args, **kwargs):
self.batch_id += 1
def get_validation_loss(self):
if self.epoch_id not in self.transformer.validation_loss.keys():
self.transformer.validation_loss[self.epoch_id] = score_model(
self.model, self.loss_function, self.validation_datagen
)
return self.transformer.validation_loss[self.epoch_id]
class CallbackList:
def __init__(self, callbacks=None):
if callbacks is None:
self.callbacks = []
elif isinstance(callbacks, Callback):
self.callbacks = [callbacks]
else:
self.callbacks = callbacks
def __len__(self):
return len(self.callbacks)
def set_params(self, *args, **kwargs):
for callback in self.callbacks:
callback.set_params(*args, **kwargs)
def on_train_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_train_begin(*args, **kwargs)
def on_train_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_train_end(*args, **kwargs)
def on_epoch_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_epoch_begin(*args, **kwargs)
def on_epoch_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_epoch_end(*args, **kwargs)
def training_break(self, *args, **kwargs):
callback_out = [
callback.training_break(*args, **kwargs) for callback in self.callbacks
]
return any(callback_out)
def on_batch_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_batch_begin(*args, **kwargs)
def on_batch_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_batch_end(*args, **kwargs)
class TrainingMonitor(Callback):
def __init__(self, epoch_every=None, batch_every=None):
super().__init__()
self.epoch_loss_averagers = {}
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
def on_train_begin(self, *args, **kwargs):
self.epoch_loss_averagers = {}
self.epoch_id = 0
self.batch_id = 0
def on_epoch_end(self, *args, **kwargs):
for name, averager in self.epoch_loss_averagers.items():
epoch_avg_loss = averager.value
averager.reset()
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
logger.info(
"epoch {0} {1}: {2:.5f}".format(
self.epoch_id, name, epoch_avg_loss
)
)
self.epoch_id += 1
def on_batch_end(self, metrics, *args, **kwargs):
for name, loss in metrics.items():
loss = loss.data.cpu().numpy()[0]
if name in self.epoch_loss_averagers.keys():
self.epoch_loss_averagers[name].send(loss)
else:
self.epoch_loss_averagers[name] = Averager()
self.epoch_loss_averagers[name].send(loss)
if self.batch_every and ((self.batch_id % self.batch_every) == 0):
logger.info(
"epoch {0} batch {1} {2}: {3:.5f}".format(
self.epoch_id, self.batch_id, name, loss
)
)
self.batch_id += 1
class ExponentialLRScheduler(Callback):
def __init__(self, gamma, epoch_every=1, batch_every=None):
super().__init__()
self.gamma = gamma
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
def set_params(self, transformer, validation_datagen, *args, **kwargs):
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.lr_scheduler = ExponentialLR(self.optimizer, self.gamma, last_epoch=-1)
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
logger.info(
"initial lr: {0}".format(
self.optimizer.state_dict()["param_groups"][0]["initial_lr"]
)
)
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and (((self.epoch_id + 1) % self.epoch_every) == 0):
self.lr_scheduler.step()
logger.info(
"epoch {0} current lr: {1}".format(
self.epoch_id + 1,
self.optimizer.state_dict()["param_groups"][0]["lr"],
)
)
self.epoch_id += 1
def on_batch_end(self, *args, **kwargs):
if self.batch_every and ((self.batch_id % self.batch_every) == 0):
self.lr_scheduler.step()
logger.info(
"epoch {0} batch {1} current lr: {2}".format(
self.epoch_id + 1,
self.batch_id + 1,
self.optimizer.state_dict()["param_groups"][0]["lr"],
)
)
self.batch_id += 1
class ExperimentTiming(Callback):
def __init__(self, epoch_every=None, batch_every=None):
super().__init__()
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
self.batch_start = None
self.epoch_start = None
self.current_sum = None
self.current_mean = None
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
logger.info("starting training...")
def on_train_end(self, *args, **kwargs):
logger.info("training finished")
def on_epoch_begin(self, *args, **kwargs):
if self.epoch_id > 0:
epoch_time = datetime.now() - self.epoch_start
if self.epoch_every:
if (self.epoch_id % self.epoch_every) == 0:
logger.info(
"epoch {0} time {1}".format(
self.epoch_id - 1, str(epoch_time)[:-7]
)
)
self.epoch_start = datetime.now()
self.current_sum = timedelta()
self.current_mean = timedelta()
logger.info("epoch {0} ...".format(self.epoch_id))
def on_batch_begin(self, *args, **kwargs):
if self.batch_id > 0:
current_delta = datetime.now() - self.batch_start
self.current_sum += current_delta
self.current_mean = self.current_sum / self.batch_id
if self.batch_every:
if self.batch_id > 0 and (((self.batch_id - 1) % self.batch_every) == 0):
logger.info(
"epoch {0} average batch time: {1}".format(
self.epoch_id, str(self.current_mean)[:-5]
)
)
if self.batch_every:
if self.batch_id == 0 or self.batch_id % self.batch_every == 0:
logger.info(
"epoch {0} batch {1} ...".format(self.epoch_id, self.batch_id)
)
self.batch_start = datetime.now()
class NeptuneMonitor(Callback):
def __init__(self, image_nr, image_resize, model_name):
super().__init__()
self.model_name = model_name
self.ctx = neptune.Context()
self.epoch_loss_averager = Averager()
self.image_nr = image_nr
self.image_resize = image_resize
def on_train_begin(self, *args, **kwargs):
self.epoch_loss_averagers = {}
self.epoch_id = 0
self.batch_id = 0
def on_batch_end(self, metrics, *args, **kwargs):
for name, loss in metrics.items():
loss = loss.data.cpu().numpy()[0]
if name in self.epoch_loss_averagers.keys():
self.epoch_loss_averagers[name].send(loss)
else:
self.epoch_loss_averagers[name] = Averager()
self.epoch_loss_averagers[name].send(loss)
self.ctx.channel_send(
"{} batch {} loss".format(self.model_name, name),
x=self.batch_id,
y=loss,
)
self.batch_id += 1
def on_epoch_end(self, *args, **kwargs):
self._send_numeric_channels()
self.epoch_id += 1
def _send_numeric_channels(self, *args, **kwargs):
for name, averager in self.epoch_loss_averagers.items():
epoch_avg_loss = averager.value
averager.reset()
self.ctx.channel_send(
"{} epoch {} loss".format(self.model_name, name),
x=self.epoch_id,
y=epoch_avg_loss,
)
self.model.eval()
val_loss = self.get_validation_loss()
self.model.train()
for name, loss in val_loss.items():
loss = loss.data.cpu().numpy()[0]
self.ctx.channel_send(
"{} epoch_val {} loss".format(self.model_name, name),
x=self.epoch_id,
y=loss,
)
class ValidationMonitor(Callback):
def __init__(self, data_dir, loader_mode, epoch_every=None, batch_every=None):
super().__init__()
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
self.data_dir = data_dir
self.validation_pipeline = postprocessing_pipeline_simplified
self.loader_mode = loader_mode
self.meta_valid = None
self.y_true = None
self.activation_func = None
def set_params(
self, transformer, validation_datagen, meta_valid=None, *args, **kwargs
):
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.output_names = transformer.output_names
self.validation_datagen = validation_datagen
self.meta_valid = meta_valid
self.y_true = read_masks(self.meta_valid[Y_COLUMN].values)
self.activation_func = transformer.activation_func
self.transformer = transformer
def get_validation_loss(self):
return self._get_validation_loss()
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
self.model.eval()
val_loss = self.get_validation_loss()
self.model.train()
for name, loss in val_loss.items():
loss = loss.data.cpu().numpy()[0]
logger.info(
"epoch {0} validation {1}: {2:.5f}".format(
self.epoch_id, name, loss
)
)
self.epoch_id += 1
def _get_validation_loss(self):
output, epoch_loss = self._transform()
y_pred = self._generate_prediction(output)
logger.info("Calculating IOU and IOUT Scores")
iou_score = intersection_over_union(self.y_true, y_pred)
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
logger.info("IOU score on validation is {}".format(iou_score))
logger.info("IOUT score on validation is {}".format(iout_score))
if not self.transformer.validation_loss:
self.transformer.validation_loss = {}
self.transformer.validation_loss.setdefault(
self.epoch_id,
{
"sum": epoch_loss,
"iou": Variable(torch.Tensor([iou_score])),
"iout": Variable(torch.Tensor([iout_score])),
},
)
return self.transformer.validation_loss[self.epoch_id]
def _transform(self):
self.model.eval()
batch_gen, steps = self.validation_datagen
partial_batch_losses = []
outputs = {}
for batch_id, data in enumerate(batch_gen):
X = data[0]
targets_tensors = data[1:]
if torch.cuda.is_available():
X = Variable(X, volatile=True).cuda()
targets_var = []
for target_tensor in targets_tensors:
targets_var.append(Variable(target_tensor, volatile=True).cuda())
else:
X = Variable(X, volatile=True)
targets_var = []
for target_tensor in targets_tensors:
targets_var.append(Variable(target_tensor, volatile=True))
outputs_batch = self.model(X)
if len(self.output_names) == 1:
for (name, loss_function_one, weight), target in zip(
self.loss_function, targets_var
):
loss_sum = loss_function_one(outputs_batch, target) * weight
outputs.setdefault(self.output_names[0], []).append(
outputs_batch.data.cpu().numpy()
)
else:
batch_losses = []
for (name, loss_function_one, weight), output, target in zip(
self.loss_function, outputs_batch, targets_var
):
loss = loss_function_one(output, target) * weight
batch_losses.append(loss)
partial_batch_losses.setdefault(name, []).append(loss)
output_ = output.data.cpu().numpy()
outputs.setdefault(name, []).append(output_)
loss_sum = sum(batch_losses)
partial_batch_losses.append(loss_sum)
if batch_id == steps:
break
self.model.train()
average_losses = sum(partial_batch_losses) / steps
outputs = {
"{}_prediction".format(name): get_list_of_image_predictions(outputs_)
for name, outputs_ in outputs.items()
}
for name, prediction in outputs.items():
if self.activation_func == "softmax":
outputs[name] = [
softmax(single_prediction, axis=0)
for single_prediction in prediction
]
elif self.activation_func == "sigmoid":
outputs[name] = [sigmoid(np.squeeze(mask)) for mask in prediction]
else:
raise Exception("Only softmax and sigmoid activations are allowed")
return outputs, average_losses
def _generate_prediction(self, outputs):
data = {
"callback_input": {"meta": self.meta_valid, "meta_valid": None},
"unet_output": {**outputs},
}
with TemporaryDirectory() as cache_dirpath:
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode)
output = pipeline.transform(data)
y_pred = output["y_pred"]
return y_pred
class ModelCheckpoint(Callback):
def __init__(self, filepath, metric_name="sum", epoch_every=1, minimize=True):
self.filepath = filepath
self.minimize = minimize
self.best_score = None
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
self.metric_name = metric_name
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
os.makedirs(os.path.dirname(self.filepath), exist_ok=True)
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
self.model.eval()
val_loss = self.get_validation_loss()
loss_sum = val_loss[self.metric_name]
loss_sum = loss_sum.data.cpu().numpy()[0]
self.model.train()
if self.best_score is None:
self.best_score = loss_sum
if (
(self.minimize and loss_sum < self.best_score)
or (not self.minimize and loss_sum > self.best_score)
or (self.epoch_id == 0)
):
self.best_score = loss_sum
persist_torch_model(self.model, self.filepath)
logger.info(
"epoch {0} model saved to {1}".format(self.epoch_id, self.filepath)
)
self.epoch_id += 1
class EarlyStopping(Callback):
def __init__(self, metric_name="sum", patience=1000, minimize=True):
super().__init__()
self.patience = patience
self.minimize = minimize
self.best_score = None
self.epoch_since_best = 0
self._training_break = False
self.metric_name = metric_name
def training_break(self, *args, **kwargs):
return self._training_break
def on_epoch_end(self, *args, **kwargs):
self.model.eval()
val_loss = self.get_validation_loss()
loss_sum = val_loss[self.metric_name]
loss_sum = loss_sum.data.cpu().numpy()[0]
self.model.train()
if not self.best_score:
self.best_score = loss_sum
if (self.minimize and loss_sum < self.best_score) or (
not self.minimize and loss_sum > self.best_score
):
self.best_score = loss_sum
self.epoch_since_best = 0
else:
self.epoch_since_best += 1
if self.epoch_since_best > self.patience:
self._training_break = True
self.epoch_id += 1
def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):
if loader_mode == "resize_and_pad":
size_adjustment_function = partial(crop_image, target_size=ORIGINAL_SIZE)
elif loader_mode == "resize":
size_adjustment_function = partial(resize_image, target_size=ORIGINAL_SIZE)
else:
raise NotImplementedError
mask_resize = Step(
name="mask_resize",
transformer=make_apply_transformer(
size_adjustment_function, output_name="resized_images", apply_on=["images"]
),
input_data=["unet_output"],
adapter=Adapter({"images": E("unet_output", "mask_prediction")}),
experiment_directory=cache_dirpath,
)
binarizer = Step(
name="binarizer",
transformer=make_apply_transformer(
partial(binarize, threshold=THRESHOLD),
output_name="binarized_images",
apply_on=["images"],
),
input_steps=[mask_resize],
adapter=Adapter({"images": E(mask_resize.name, "resized_images")}),
experiment_directory=cache_dirpath,
)
output = Step(
name="output",
transformer=IdentityOperation(),
input_steps=[binarizer],
adapter=Adapter({"y_pred": E(binarizer.name, "binarized_images")}),
experiment_directory=cache_dirpath,
)
return output
|
|
# encoding=utf8
from gensim.models import Word2Vec
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
import sys
from sklearn_pandas import DataFrameMapper
import util
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf-8')
features = \
[
"id", "major", "age", "gender",
"isenglish", "isjunior", "isbachelor", "ismaster", "isintern",
"total_previous_job",
"last_salary", "last_size", "last_position_name", "last_industry", "last_type", "last_type1", "last_department",
"last_start_year", "last_start_month", "last_end_year", "last_end_month", "last_interval_month",
"third_salary", "third_size", "third_position_name", "third_industry", "third_type", "third_type1",
"third_department",
"third_start_year", "third_start_month", "third_end_year", "third_end_month", "third_interval_month",
"first_salary", "first_size", "first_position_name", "first_industry", "first_type", "first_type1",
"first_department",
"first_start_year", "first_start_month", "first_end_year", "first_end_month", "first_interval_month",
"last3_interval_month", "diff_last3_size", "diff_last3_salary", "diff_last3_industry",
"diff_last3_position_name",
"total_interval_month", "diff_salary", "diff_size", "diff_industry", "diff_position_name",
"major_1",
"last_position_name_1", "last_department_1",
"third_position_name_1", "third_department_1",
"first_position_name_1", "first_department_1",
"major_2",
"last_position_name_2", "last_department_2",
"third_position_name_2", "third_department_2",
"first_position_name_2", "first_department_2",
"start_working_age", "rev_working_age", "pre_working_month", "pre_interval_month",
"pre_largest_size", "pre_largest_salary",
"pre_least_size",
"pre_least_salary",
"pre_size1",
"pre_size2",
"pre_size3",
"pre_size4",
"pre_size5",
"pre_size6",
"pre_size7",
"pre_salary1",
"pre_salary2",
"pre_salary3",
"pre_salary4",
"pre_salary5",
"pre_salary6",
"pre_salary7",
"promotion_size",
"promotion_salary",
"decrease_size",
"decrease_salar"
]
all_features = features + ["predict_degree", "predict_salary", "predict_size", "predict_position_name"]
train = pd.read_pickle(util.features_prefix + "manual_feature.pkl")
print len(train), len(features), len(all_features)
train = train[all_features]
train = train[train["predict_position_name"].isin(util.position_name_list)]
data_all = pd.concat([train[features]])
def get_mapper(data_all):
param_list = [
('id', None),
('major', LabelEncoder()),
('age', None),
('gender', LabelEncoder()),
('isenglish', None),
('isjunior', None),
('isbachelor', None),
('ismaster', None),
('isintern', None),
('total_previous_job', None),
('last_type', LabelEncoder()),
('last_type1', LabelEncoder()),
('last_department', LabelEncoder()),
('last_size', None),
('last_salary', None),
('last_industry', LabelEncoder()),
('last_position_name', LabelEncoder()),
('last_start_year', None),
('last_start_month', None),
('last_end_year', None),
('last_end_month', None),
('last_interval_month', None),
('third_type', LabelEncoder()),
('third_type1', LabelEncoder()),
('third_department', LabelEncoder()),
('third_size', None),
('third_salary', None),
('third_industry', LabelEncoder()),
('third_position_name', LabelEncoder()),
('third_start_year', None),
('third_start_month', None),
('third_end_year', None),
('third_end_month', None),
('third_interval_month', None),
('first_type', LabelEncoder()),
('first_type1', LabelEncoder()),
('first_department', LabelEncoder()),
('first_size', None),
('first_salary', None),
('first_industry', LabelEncoder()),
('first_position_name', LabelEncoder()),
('first_start_year', None),
('first_start_month', None),
('first_end_year', None),
('first_end_month', None),
('first_interval_month', None),
('last3_interval_month', None),
('diff_last3_salary', LabelEncoder()),
('diff_last3_size', LabelEncoder()),
('diff_last3_industry', LabelEncoder()),
('diff_last3_position_name', LabelEncoder()),
('total_interval_month', None),
('diff_salary', LabelEncoder()),
('diff_size', LabelEncoder()),
('diff_industry', LabelEncoder()),
('diff_position_name', LabelEncoder()),
('major_1', LabelEncoder()),
('last_position_name_1', LabelEncoder()),
('last_department_1', LabelEncoder()),
('third_position_name_1', LabelEncoder()),
('third_department_1', LabelEncoder()),
('first_position_name_1', LabelEncoder()),
('first_department_1', LabelEncoder()),
('major_2', LabelEncoder()),
('last_position_name_2', LabelEncoder()),
('last_department_2', LabelEncoder()),
('third_position_name_2', LabelEncoder()),
('third_department_2', LabelEncoder()),
('first_position_name_2', LabelEncoder()),
('first_department_2', LabelEncoder()),
('start_working_age', None),
('rev_working_age', None),
('pre_working_month', None),
('pre_interval_month', None),
("pre_largest_size", None),
("pre_largest_salary", None),
("pre_least_size", None),
("pre_least_salary", None),
("pre_size1", None),
("pre_size2", None),
("pre_size3", None),
("pre_size4", None),
("pre_size5", None),
("pre_size6", None),
("pre_size7", None),
("pre_salary1", None),
("pre_salary2", None),
("pre_salary3", None),
("pre_salary4", None),
("pre_salary5", None),
("pre_salary6", None),
("pre_salary7", None),
("promotion_size", None),
("promotion_salary", None),
("decrease_size", None),
("decrease_salar", None)
]
print "the mapper's param list is %s" % (len(param_list))
mapper = DataFrameMapper(param_list)
mapper.fit(data_all)
return mapper
mapper = get_mapper(data_all)
def getPrecision(multiclf, train_X, train_Y, label_dict):
pred_Y = multiclf.predict(train_X)
pred_Y = [int(p) for p in pred_Y]
print "total accuracy_score%s" % (accuracy_score(train_Y, pred_Y))
diff_num = len(label_dict.classes_)
for i in xrange(diff_num):
hit, test_cnt, pred_cnt = 0, 0, 0
for k in xrange(len(train_Y)):
if train_Y[k] == i:
test_cnt += 1
if pred_Y[k] == i:
pred_cnt += 1
if train_Y[k] == i and pred_Y[k] == i:
hit += 1
print "\t\t%s %d %d %d\tprecision_score %s\trecall_score %s" % (
label_dict.inverse_transform([i])[0], hit, test_cnt, pred_cnt, hit * 1.0 / (pred_cnt + 0.01),
hit * 1.0 / (test_cnt + 0.01))
def get_feature_by_experienceList(workExperienceList, c_k_64_dic):
level_two = [u'industry', u'department', u'type', u'position_name']
feature_list = []
for k in [0, -1]:
for i in level_two:
try:
feature_list.append(c_k_64_dic[workExperienceList[k][i]])
except Exception, e:
feature_list.append(-1)
return feature_list
level_one = [u'major', u'degree', u'gender', u'age', u'workExperienceList', u'_id', u'id']
level_two = [u'salary', u'end_date', u'industry', u'position_name', u'department', u'type', u'start_date', u'size']
def sentence_to_matrix_vec(sentence, model, featuresNum, k_mean_dict_1, k_mean_dict_2):
temp = np.zeros((featuresNum * (7 * 5 + 3) + 7 * 5 * 2))
if sentence == None: return temp
num = (len(sentence) - 3) / 7 if (len(sentence) - 3) / 7 <= 5 else 5
for i in range(num * 7):
temp[featuresNum * i:featuresNum * (i + 1)] = model[sentence[i]]
try:
temp[38 * featuresNum + num * 2] = k_mean_dict_1[sentence[i]]
temp[38 * featuresNum + num * 2 + 1] = k_mean_dict_2[sentence[i]]
except Exception, e:
continue
for i in range(3):
temp[(5 * 7 + i) * featuresNum:(5 * 7 + i + 1) * featuresNum] = model[sentence[-1 * (i + 1)]]
return temp
def getAllFeatures(train, mapper):
print "this is getAllFeatures"
# every record has a cluster value calculated by lda
w2c_f, w2c_w = 10, 14
lda_dict_1 = util.read_dict(util.features_prefix + 'id_lda_256.pkl')
lda_dict_2 = util.read_dict(util.features_prefix + 'id_lda_512.pkl')
k_mean_dict_1 = util.read_dict(util.features_prefix + 'c_k_all_64.pkl')
k_mean_dict_2 = util.read_dict(util.features_prefix + 'c_k_all_128.pkl')
sentence_dict_path = util.txt_prefix + 'id_sentences.pkl'
word2vec_path = util.txt_prefix + str(w2c_f) + 'features_1minwords_' + str(w2c_w) + 'context.pkl'
sentence_dic = util.read_dict(sentence_dict_path)
model = Word2Vec.load(word2vec_path)
train_X = train[features]
train_X = mapper.transform(train_X) # .values
new_train_X = []
for i in xrange(len(train_X)):
id = train_X[i][0]
lda_1 = lda_dict_1[id]
lda_2 = lda_dict_2[id]
s = sentence_dic.get(id)
f = np.concatenate(([train_X[i][1:].astype(np.float32)],
[sentence_to_matrix_vec(s, model, w2c_f, k_mean_dict_1, k_mean_dict_2)]), axis=1)[0]
f = np.concatenate(([f], [[lda_1, lda_2]]), axis=1)[0]
new_train_X.append(f)
new_train_X = np.array(new_train_X)
return new_train_X
if __name__ == "__main__":
train_Y = []
train_X = []
test_X = []
import os
train_X = getAllFeatures(train, mapper)
if os.path.exists(util.features_prefix + "/position_XY.pkl") is False:
train_Y = list(train["predict_position_name"].values)
label_dict = LabelEncoder().fit(train_Y)
label_dict_classes = len(label_dict.classes_)
train_Y = label_dict.transform(train_Y)
pd.to_pickle([train_X, train_Y], util.features_prefix + "/position_XY.pkl")
else:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + "/position_XY.pkl")
print len(train_X[0]), len(train_Y)
print 95 + 380 + 7 * 5 * 2 + 2
print train_X[0]
if os.path.exists(util.features_prefix + "/degree_XY.pkl") is False:
train_Y = list(train["predict_degree"].values)
label_dict = LabelEncoder().fit(train_Y)
label_dict_classes = len(label_dict.classes_)
train_Y = label_dict.transform(train_Y)
pd.to_pickle([train_X, train_Y], util.features_prefix + "/degree_XY.pkl")
else:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + "/degree_XY.pkl")
print len(train_X[0]), len(train_Y)
if os.path.exists(util.features_prefix + "/size_XY.pkl") is False:
train_Y = list(train["predict_size"].values)
label_dict = LabelEncoder().fit(train_Y)
label_dict_classes = len(label_dict.classes_)
train_Y = label_dict.transform(train_Y)
pd.to_pickle([train_X, train_Y], util.features_prefix + "/size_XY.pkl")
else:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + "/size_XY.pkl")
# 99 + 380 + 7*5*2 + 2
print len(train_X[0]), len(train_Y)
if os.path.exists(util.features_prefix + "/salary_XY.pkl") is False:
train_Y = list(train["predict_salary"].values)
label_dict = LabelEncoder().fit(train_Y)
label_dict_classes = len(label_dict.classes_)
train_Y = label_dict.transform(train_Y)
pd.to_pickle([train_X, train_Y], util.features_prefix + "/salary_XY.pkl")
else:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + "/salary_XY.pkl")
99 + 380 + 7*5*2 + 2
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(np.array(train_X[:100]), np.array(train_Y[:100]))
print clf.predict(np.array(train_X[100:200]))
print train_Y[100:200]
from sklearn.feature_selection import SelectFromModel
model = SelectFromModel(clf, prefit=True)
list_1 = model.get_support()
for i in range(len(list_1)):
if list_1[i] == True:
print i
print 'pickle end'
|
|
"""
course_catalog models
"""
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from django.db.models import Exists, ExpressionWrapper, OuterRef, Prefetch, Value
from course_catalog.constants import (
CONTENT_TYPE_FILE,
OCW_DEPARTMENTS,
VALID_COURSE_CONTENT_CHOICES,
AvailabilityType,
OfferedBy,
PlatformType,
PrivacyLevel,
ResourceType,
)
from course_catalog.utils import user_list_image_upload_uri
from open_discussions.models import TimestampedModel, TimestampedModelQuerySet
OPEN = "Open Content"
PROFESSIONAL = "Professional Offerings"
CERTIFICATE = "Certificates"
PROFESSIONAL_COURSE_PLATFORMS = [
PlatformType.bootcamps.value,
PlatformType.xpro.value,
PlatformType.see.value,
PlatformType.mitpe.value,
PlatformType.csail.value,
]
class LearningResourceQuerySet(TimestampedModelQuerySet):
"""QuerySet for resource models that can be favorited"""
def prefetch_list_items_for_user(self, user=None):
"""Prefetch list_items based on the current user"""
if user and user.is_authenticated:
return self.prefetch_related(
Prefetch(
"list_items",
queryset=UserListItem.objects.filter(
user_list__author=user
).prefetch_related("content_type"),
)
)
# force list_items to be an empty query for anonymous users
return self.prefetch_related(
Prefetch("list_items", queryset=UserListItem.objects.none())
)
def annotate_is_favorite_for_user(self, user=None):
"""Annotate the query with a subquery for is_favorite"""
return self.annotate(
is_favorite=ExpressionWrapper(
Exists(
FavoriteItem.objects.filter(
content_type=ContentType.objects.get_for_model(self.model),
object_id=OuterRef("id"),
)
)
if user and user.is_authenticated
else Value(False),
output_field=models.BooleanField(),
)
)
class LearningResourceGenericRelationsMixin(models.Model):
"""
Model mixin for resource models that are favoriteable
This is intended to be used only for models that have a
GenericRelation referenced by FavoriteItem
"""
favorite_items = GenericRelation("course_catalog.FavoriteItem")
list_items = GenericRelation("course_catalog.UserListItem")
class Meta:
abstract = True
class CourseInstructor(TimestampedModel):
"""
Instructors for all courses
"""
first_name = models.CharField(max_length=128, null=True, blank=True)
last_name = models.CharField(max_length=128, null=True, blank=True)
full_name = models.CharField(max_length=256, null=True, blank=True)
class Meta:
ordering = ["last_name"]
def __str__(self):
return self.full_name or " ".join((self.first_name, self.last_name))
class CourseTopic(TimestampedModel):
"""
Topics for all courses (e.g. "History")
"""
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class CoursePrice(TimestampedModel):
"""
Price model for all courses (e.g. "price": 0.00, "mode": "audit")
"""
price = models.DecimalField(decimal_places=2, max_digits=12)
mode = models.CharField(max_length=128)
upgrade_deadline = models.DateTimeField(null=True)
def __str__(self):
return "${:,.2f}".format(self.price)
class LearningResourceOfferor(TimestampedModel):
"""Data model for who is offering a learning resource"""
name = models.CharField(max_length=256, unique=True)
def __str__(self):
return self.name
class LearningResource(TimestampedModel):
"""
Base class for all learning resource models under course_catalog app.
"""
title = models.CharField(max_length=256)
short_description = models.TextField(null=True, blank=True)
topics = models.ManyToManyField(CourseTopic, blank=True)
offered_by = models.ManyToManyField(LearningResourceOfferor, blank=True)
class Meta:
abstract = True
class AbstractCourse(LearningResource):
"""
Abstract data model for course models
"""
full_description = models.TextField(null=True, blank=True)
image_src = models.TextField(max_length=2048, null=True, blank=True)
image_description = models.CharField(max_length=1024, null=True, blank=True)
last_modified = models.DateTimeField(null=True, blank=True)
featured = models.BooleanField(default=False)
published = models.BooleanField(default=True)
url = models.URLField(null=True, max_length=2048)
learning_resource_type = models.CharField(
max_length=20, default=ResourceType.course.value
)
raw_json = JSONField(null=True, blank=True)
class Meta:
abstract = True
index_together = (("id", "published"),)
class LearningResourceRun(AbstractCourse):
"""
Model for course runs
"""
run_id = models.CharField(max_length=128)
platform = models.CharField(max_length=128)
year = models.IntegerField(null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
enrollment_start = models.DateTimeField(null=True, blank=True)
enrollment_end = models.DateTimeField(null=True, blank=True)
best_start_date = models.DateTimeField(null=True, blank=True)
best_end_date = models.DateTimeField(null=True, blank=True)
level = models.CharField(max_length=128, null=True, blank=True)
semester = models.CharField(max_length=20, null=True, blank=True)
availability = models.CharField(max_length=128, null=True, blank=True)
language = models.CharField(max_length=128, null=True, blank=True)
slug = models.CharField(max_length=1024, null=True, blank=True)
instructors = models.ManyToManyField(
CourseInstructor, blank=True, related_name="course_instructors"
)
prices = models.ManyToManyField(CoursePrice, blank=True)
course = models.ForeignKey(
"Course",
null=True,
blank=True,
related_name="deprecated_runs",
on_delete=models.CASCADE,
)
content_type = models.ForeignKey(
ContentType,
null=True,
limit_choices_to={"model__in": ("course", "bootcamp", "program")},
on_delete=models.CASCADE,
)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey("content_type", "object_id")
def __str__(self):
return f"LearningResourceRun platform={self.platform} run_id={self.run_id}"
class Meta:
unique_together = (("platform", "run_id"),)
index_together = (
(
"content_type",
"start_date",
), # index for sorting course runs by start date
("content_type", "object_id"),
)
class ContentFile(TimestampedModel):
"""
ContentFile model for courserun files
"""
uid = models.CharField(max_length=36, null=True, blank=True)
key = models.CharField(max_length=1024, null=True, blank=True)
run = models.ForeignKey(
LearningResourceRun, related_name="content_files", on_delete=models.CASCADE
)
title = models.CharField(max_length=1024, null=True, blank=True)
description = models.TextField(null=True, blank=True)
image_src = models.URLField(null=True, blank=True)
url = models.TextField(null=True, blank=True)
short_url = models.TextField(null=True, blank=True)
file_type = models.CharField(max_length=128, null=True, blank=True)
section = models.CharField(max_length=512, null=True, blank=True)
section_slug = models.CharField(max_length=512, null=True, blank=True)
content = models.TextField(null=True, blank=True)
content_title = models.CharField(max_length=1024, null=True, blank=True)
content_author = models.CharField(max_length=1024, null=True, blank=True)
content_language = models.CharField(max_length=24, null=True, blank=True)
content_type = models.CharField(
choices=VALID_COURSE_CONTENT_CHOICES, default=CONTENT_TYPE_FILE, max_length=10
)
learning_resource_types = ArrayField(
models.CharField(max_length=256, null=False, blank=False), null=True, blank=True
)
published = models.BooleanField(default=True)
class Meta:
unique_together = (("key", "run"),)
verbose_name = "contentfile"
def get_max_length(field):
"""
Get the max length of a ContentFile field
Args:
field (str): the name of the field
Returns:
int: the max_length of the field
"""
return ContentFile._meta.get_field(field).max_length
class Course(AbstractCourse, LearningResourceGenericRelationsMixin):
"""
Course model for courses on all platforms
"""
objects = LearningResourceQuerySet.as_manager()
course_id = models.CharField(max_length=128)
platform = models.CharField(max_length=128)
location = models.CharField(max_length=128, null=True, blank=True)
program_type = models.CharField(max_length=32, null=True, blank=True)
program_name = models.CharField(max_length=256, null=True, blank=True)
department = ArrayField(
models.CharField(max_length=256, null=False, blank=False), null=True, blank=True
)
runs = GenericRelation(LearningResourceRun)
course_feature_tags = JSONField(null=True, blank=True)
extra_course_numbers = ArrayField(
models.CharField(max_length=128), null=True, blank=True
)
ocw_next_course = models.BooleanField(default=False)
@property
def audience(self):
"""Returns the audience for the course"""
if self.platform in PROFESSIONAL_COURSE_PLATFORMS:
return [PROFESSIONAL]
else:
return [OPEN]
@property
def certification(self):
"""Returns the certification for the course"""
if self.platform in PROFESSIONAL_COURSE_PLATFORMS or (
self.platform == PlatformType.mitx.value
and any(
availability != AvailabilityType.archived.value
for availability in self.runs.values_list("availability", flat=True)
)
):
return [CERTIFICATE]
else:
return []
@property
def department_name(self):
"""Returns the names of the departments"""
names = list(
map(
lambda department_num: OCW_DEPARTMENTS.get(department_num, {}).get(
"name"
),
self.department or [],
)
)
return [name for name in names if name]
@property
def department_slug(self):
"""Returns the department slug"""
if self.department:
first_department = self.department[0]
else:
first_department = None
return OCW_DEPARTMENTS.get(first_department, {}).get("slug")
@property
def coursenum(self):
""" Returns the course number from the course_id"""
return self.course_id.split("+")[-1]
def __str__(self):
return self.title
class Meta:
unique_together = ("platform", "course_id")
class List(LearningResource):
"""
List model tracks an ordered list of other LearningResources.
"""
image_description = models.CharField(max_length=1024, null=True, blank=True)
class Meta:
abstract = True
class ListItem(TimestampedModel):
"""
ListItem model tracks associated metadata and LearningResource.
`content_type` is restricted to the learning resources we want.
Lists should not contain other Lists such as Programs and UserLists (such as learning paths).
"""
position = models.PositiveIntegerField()
content_type = models.ForeignKey(
ContentType,
limit_choices_to={"model__in": ("course",)},
on_delete=models.CASCADE,
)
object_id = models.PositiveIntegerField()
item = GenericForeignKey("content_type", "object_id")
class Meta:
abstract = True
class UserList(List, LearningResourceGenericRelationsMixin):
"""
UserList is a user-created model tracking a restricted list of LearningResources.
"""
objects = LearningResourceQuerySet.as_manager()
author = models.ForeignKey(User, on_delete=models.PROTECT)
privacy_level = models.CharField(max_length=32, default=PrivacyLevel.private.value)
image_src = models.ImageField(
null=True, blank=True, max_length=2083, upload_to=user_list_image_upload_uri
)
list_type = models.CharField(max_length=128)
@property
def audience(self):
"""Returns the audience for the user list"""
for list_item in self.items.all():
if OPEN not in list_item.item.audience:
return []
return [OPEN]
@property
def certification(self):
"""Returns the certification for the user list"""
return []
class Meta:
verbose_name = "userlist"
class UserListItem(ListItem):
"""
ListItem model for UserLists
"""
content_type = models.ForeignKey(
ContentType,
limit_choices_to={
"model__in": ("course", "program", "video", "podcast", "podcastepisode")
},
on_delete=models.CASCADE,
)
user_list = models.ForeignKey(
UserList, related_name="items", on_delete=models.CASCADE
)
class Program(List, LearningResourceGenericRelationsMixin):
"""
Program model for MIT programs. Consists of specified list of LearningResources.
"""
objects = LearningResourceQuerySet.as_manager()
program_id = models.CharField(max_length=80, null=True)
image_src = models.URLField(max_length=2048, null=True, blank=True)
url = models.URLField(null=True, max_length=2048)
published = models.BooleanField(default=True)
runs = GenericRelation(LearningResourceRun)
@property
def audience(self):
"""Returns the audience for the program"""
if OfferedBy.micromasters.value in self.offered_by.values_list(
"name", flat=True
):
return [OPEN, PROFESSIONAL]
else:
return [PROFESSIONAL]
@property
def certification(self):
"""Returns the certification for the program"""
return [CERTIFICATE]
class ProgramItem(ListItem):
"""
ListItem model for Programs
"""
program = models.ForeignKey(Program, related_name="items", on_delete=models.CASCADE)
class FavoriteItem(TimestampedModel):
"""
FavoriteItem model tracks LearningResources that are marked by user as their favorite.
Favorites don't need to track an user-specified order, although they can by
default be displayed ordered by timestamp. Users should be able to favorite any
LearningResource, including Lists like Programs and UserLists.
"""
user = models.ForeignKey(User, on_delete=models.PROTECT)
content_type = models.ForeignKey(
ContentType,
limit_choices_to={
"model__in": (
"course",
"userlist",
"program",
"video",
"podcast",
"podcastepisode",
)
},
on_delete=models.CASCADE,
)
object_id = models.PositiveIntegerField()
item = GenericForeignKey("content_type", "object_id")
class Meta:
unique_together = ("user", "content_type", "object_id")
class VideoChannel(LearningResource, LearningResourceGenericRelationsMixin):
"""Data model for video channels"""
platform = models.CharField(max_length=40)
channel_id = models.CharField(max_length=80)
full_description = models.TextField(null=True, blank=True)
published = models.BooleanField(default=True)
class Video(LearningResource, LearningResourceGenericRelationsMixin):
"""Data model for video resources"""
objects = LearningResourceQuerySet.as_manager()
video_id = models.CharField(max_length=80)
platform = models.CharField(max_length=128)
full_description = models.TextField(null=True, blank=True)
image_src = models.URLField(max_length=400, null=True, blank=True)
last_modified = models.DateTimeField(null=True, blank=True)
duration = models.CharField(null=True, blank=True, max_length=11)
published = models.BooleanField(default=True)
url = models.URLField(null=True, max_length=2048)
transcript = models.TextField(blank=True, default="")
raw_data = models.TextField(blank=True, default="")
@property
def audience(self):
"""Returns the audience"""
return [OPEN]
@property
def certification(self):
"""Returns the certification"""
return []
class Meta:
unique_together = ("platform", "video_id")
class Playlist(List, LearningResourceGenericRelationsMixin):
"""
Video playlist model, contains videos
"""
objects = LearningResourceQuerySet.as_manager()
platform = models.CharField(max_length=40)
playlist_id = models.CharField(max_length=80)
channel = models.ForeignKey(
VideoChannel, on_delete=models.CASCADE, related_name="playlists"
)
image_src = models.URLField(max_length=400, null=True, blank=True)
url = models.URLField(null=True, max_length=2048)
published = models.BooleanField(default=True)
has_user_list = models.BooleanField(default=True)
user_list = models.OneToOneField(
UserList, on_delete=models.SET_NULL, null=True, related_name="playlist"
)
videos = models.ManyToManyField(
Video, through="PlaylistVideo", through_fields=("playlist", "video")
)
class PlaylistVideo(models.Model):
"""Join table for Playlist -> Video"""
video = models.ForeignKey(
Video, on_delete=models.CASCADE, related_name="playlist_videos"
)
playlist = models.ForeignKey(
Playlist, on_delete=models.CASCADE, related_name="playlist_videos"
)
position = models.PositiveIntegerField()
class Meta:
unique_together = ("playlist", "video")
class Podcast(LearningResource, LearningResourceGenericRelationsMixin):
"""Data model for podcasts"""
objects = LearningResourceQuerySet.as_manager()
podcast_id = models.CharField(max_length=80, unique=True)
full_description = models.TextField(null=True, blank=True)
image_src = models.URLField(max_length=400, null=True, blank=True)
published = models.BooleanField(default=True)
url = models.URLField(null=True, max_length=2048)
apple_podcasts_url = models.URLField(null=True, max_length=2048)
google_podcasts_url = models.URLField(null=True, max_length=2048)
searchable = models.BooleanField(default=True)
rss_url = models.URLField(null=True, max_length=2048)
def __str__(self):
return self.title
@property
def platform(self):
"""Platform for podcast"""
return PlatformType.podcast.value
@property
def audience(self):
"""Returns the audience"""
return [OPEN]
@property
def certification(self):
"""Returns the certification"""
return []
class Meta:
ordering = ("id",)
class PodcastEpisode(LearningResource, LearningResourceGenericRelationsMixin):
"""Data model for podcast episodes"""
objects = LearningResourceQuerySet.as_manager()
episode_id = models.CharField(max_length=80)
full_description = models.TextField(null=True, blank=True)
image_src = models.URLField(max_length=400, null=True, blank=True)
last_modified = models.DateTimeField(null=True, blank=True)
podcast = models.ForeignKey(
Podcast, related_name="episodes", on_delete=models.CASCADE
)
published = models.BooleanField(default=True)
transcript = models.TextField(blank=True, default="")
url = models.URLField(null=True, max_length=2048)
episode_link = models.URLField(null=True, max_length=2048)
searchable = models.BooleanField(default=True)
duration = models.CharField(null=True, blank=True, max_length=10)
rss = models.TextField(null=True, blank=True)
def __str__(self):
return self.title
@property
def platform(self):
"""Platform for podcast episode"""
return PlatformType.podcast.value
@property
def audience(self):
"""Returns the audience"""
return [OPEN]
@property
def certification(self):
"""Returns the certification"""
return []
class Meta:
verbose_name = "podcastepisode"
unique_together = ("podcast", "episode_id")
ordering = ("id",)
class Enrollment(TimestampedModel):
"""Data model for enrollments"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="enrollments")
run = models.ForeignKey(
LearningResourceRun,
on_delete=models.CASCADE,
related_name="enrollments",
null=True,
)
course = models.ForeignKey(
Course, on_delete=models.CASCADE, related_name="enrollments", null=True
)
enrollments_table_run_id = models.CharField(max_length=256, null=True, blank=True)
enrollment_timestamp = models.DateTimeField(null=True)
def __str__(self):
return f"Enrollment user_id={self.user_id} enrollments_table_run_id={self.enrollments_table_run_id}"
class Meta:
unique_together = (("user", "run"),)
unique_together = (("user", "enrollments_table_run_id"),)
|
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Template for a multi-modal dual encoder model."""
from absl import logging
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import base_model
from lingvo.core import hyperparams
from lingvo.core import layers as lingvo_layers
from lingvo.core import metrics as metrics_lib
from lingvo.core import py_utils
from lingvo.tasks.milan import labels as label_lib
from lingvo.tasks.milan import score_functions
from lingvo.tasks.milan import tpu_utils
from lingvo.tasks.milan import utils
def _EncodeBatch(encoder: base_layer.BaseLayer, inputs,
batch_shape: tf.TensorShape):
"""Runs `encoder` on `inputs` with an arbitrary number of batch dimensions."""
# Flatten inputs so they have a single batch dimension, run the encoder,
# and restore the batch dims of the output.
batch_adapter = utils.BatchFlattener(batch_shape)
return batch_adapter.Unflatten(encoder(*batch_adapter.Flatten(inputs)))
def EncoderConfig() -> hyperparams.Params:
"""Returns Params for configuring one `DualEncoder` modality."""
p = hyperparams.Params()
p.Define(
'input_features', '',
'Feature(s) from the input batch to feed to the encoder. The structure '
'of this field determines the number and structure of the encoder '
'arguments. Examples: If set to "feature_name", the encoder is called '
'with a single argument `input_batch["feature_name"]`; if set to an '
'N-element tuple, it is called with N arguments. See `Selector` class '
'for more details.')
p.Define('id_feature', '', 'Name of id feature to use for loss masking.')
p.Define(
'encoder', None,
'Params of a layer that encodes input_features. The layer should '
'accept the output of Selector(input_features) as arguments.')
p.Define('output_dim', None,
'Dimension of the embeddings produced by `encoder`.')
p.Define('encoder_scope', '',
'Optional variable scope name to create the encoder in.')
p.Define('projection_scope', '',
'Optional variable scope in which to create the projection layer.')
return p
class DualEncoder(base_layer.BaseLayer):
"""Implements a dual encoder trained with in-batch softmax loss."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('encoder_configs', {}, 'Modality name => EncoderConfig params.')
p.Define('score_function', score_functions.DotProductScoreFunction.Params(),
'Layer that computes similarity score.')
p.Define(
'joint_embedding_dim', 0,
'Dimension to project x and y encoders\' outputs. Defaults to '
'max(x_encoder.output_dim, y_encoder.output_dim).')
p.Define('regularization_loss_weight', 1.0,
'Weight of regularization loss.')
p.Define(
'loss_weights', {},
'Weights of retrieval losses. Keys should be modality pair tuples and '
'values should be floats >= 0.')
p.Define(
'label_fn', None, 'Label function to call to label batches during '
'training. Has signature ExamplePairs -> tf.Tensor.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
if not p.label_fn:
raise ValueError('Required param label_fn not set.')
if not p.loss_weights:
raise ValueError('Required param loss_weights not set.')
for modality_pair in p.loss_weights:
if (not isinstance(modality_pair, tuple) or len(modality_pair) != 2 or
not isinstance(modality_pair[0], str) or
not isinstance(modality_pair[1], str)):
raise ValueError(
'loss_weights: Keys should be tuples of modality names; got '
f'{modality_pair}')
for modality in modality_pair:
if modality not in p.encoder_configs:
raise ValueError(f'loss_weights: Unknown modality "{modality}"')
for modality_name, config in p.encoder_configs.items():
for param_name in ('input_features', 'id_feature'):
if not getattr(config, param_name):
raise ValueError(f'Required param {param_name} not set')
# Set default scope names if not configured.
if not config.encoder_scope:
config.encoder_scope = f'{modality_name}_encoder'
if not config.projection_scope:
config.projection_scope = f'{modality_name}_projection'
if not config.encoder.name:
config.encoder.name = config.encoder_scope
# Infer joint_embedding_dim if not provided.
if not p.joint_embedding_dim:
p.joint_embedding_dim = max(
config.output_dim for config in p.encoder_configs.values())
tf.logging.info('Defaulting DualEncoder joint_embedding_dim to %d',
p.joint_embedding_dim)
for name, config in p.encoder_configs.items():
self.CreateChild(f'encoder_{name}', config.encoder)
# Where necessary, create layers to project the encoders' output to the
# joint space.
base_projection_params = lingvo_layers.ProjectionLayer.Params().Set(
output_dim=p.joint_embedding_dim,
activation='NONE',
has_bias=True,
batch_norm=False)
projections = {}
for modality, config in p.encoder_configs.items():
if config.output_dim != p.joint_embedding_dim:
projections[modality] = (
base_projection_params.Copy().Set(
name=config.projection_scope, input_dim=config.output_dim))
self.CreateChildren('projections', projections)
self.CreateChild('score_function', p.score_function)
def _child_variable_scope_override(self):
p = self.params
res = super()._child_variable_scope_override()
for modality, config in p.encoder_configs.items():
res[f'encoder_{modality}'] = [p.name, config.encoder_scope]
return res
def EncodeModality(self, modality: str, inputs, batch_shape=None):
"""Runs `inputs` through `modality`'s encoder and optional projection.
Args:
modality: Name of the modality to encode.
inputs: Tensor(s) of input to the encoder, e.g. a batch of decoded images.
batch_shape: TensorShape describing the batch structure of the inputs.
Defaults to `[None]`, which means `inputs` have a single batch
dimension. Set to (e.g.) `[None, 5]` if each example in the batch
contains 5 encodable items.
Returns:
A float32 Tensor of the encoded items, shape
`batch_shape + [joint_embedding_dim]`
"""
if batch_shape is None:
batch_shape = tf.TensorShape([None])
if not isinstance(inputs, tuple):
inputs = (inputs,)
encodings = _EncodeBatch(
self.children[f'encoder_{modality}'], inputs, batch_shape=batch_shape)
# If necessary, project outputs to joint_embedding_dim.
if modality in self.projections:
return self.projections[modality](encodings)
else:
return encodings
# NB: ComputePredictions and ComputeLoss methods below mimic the interface of
# a BaseTask.
def ComputePredictions(self, theta, input_batch):
"""Encodes the examples in `input_batch` with respect to each modality.
Args:
theta: `NestedMap` containing variable values of this task.
input_batch: `NestedMap` of input tensors.
Returns:
A `NestedMap` of encodings in all configured modalities. Maps modality
name to a `NestedMap` with
- 'ids': int32 ids Tensor, shape `[batch_size, ...]`
- 'encodings': float32 encodings Tensor, shape
`ids_shape + [joint_embedding_dim]`
"""
del theta # Unused
p = self.params
input_batch = py_utils.NestedMap(input_batch)
outputs = py_utils.NestedMap()
for modality, config in p.encoder_configs.items():
inputs = utils.Selector(config.input_features)(input_batch)
if not isinstance(inputs, tuple):
inputs = (inputs,)
ids = utils.GetFromNestedMapOrDie(input_batch, config.id_feature)
outputs[modality] = py_utils.NestedMap(
encodings=self.EncodeModality(
modality, inputs, batch_shape=ids.shape),
ids=ids)
return outputs
def ComputeLoss(self, theta, predictions, input_batch):
"""Computes loss and other metrics for the given predictions.
Args:
theta: A `.NestedMap` object containing variable values of this task.
predictions: The output of `ComputePredictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
A tuple (metrics, per_example_tensors), where
- `metrics` is a dict of str keys to (metric, weight) values
- `per_example_tensors` is a dict of str keys to tensors describing each
training example, where the first dimension of each tensor is the
batch index.
"""
p = self.params
# During TPU training, collect the encodings and ids from all TPUs so the
# loss can be computed over all query-result pairs in the global batch.
# To avoid duplicating work, each TPU operates on a non-overlapping
# slice of these pairs. Specifically, each TPU uses queries drawn from its
# local batch and results from the global batch.
# Encodings of the local and global examples, keyed by modality.
local_flat_encodings = py_utils.NestedMap({
modality: tf.reshape(predictions[modality].encodings,
[-1, p.joint_embedding_dim])
for modality in predictions
})
global_flat_encodings = tpu_utils.ConcatenateAcrossReplicas(
local_flat_encodings)
def _ComputePerQueryLoss(query_modality, result_modality):
labeler_inputs = label_lib.ExamplePairs.BetweenLocalAndGlobalBatches(
input_batch,
query_modality=query_modality,
result_modality=result_modality)
labels = p.label_fn(labeler_inputs)
# [num_queries, num_results]
flat_similarities = self.score_function(
local_flat_encodings[query_modality],
global_flat_encodings[result_modality])
flat_labels = tf.reshape(labels, flat_similarities.shape)
# [num_queries]
return label_lib.MultiLabelContrastiveLoss(
labels=flat_labels, logits=flat_similarities)
loss_terms = []
metrics = {}
for direction, loss_weight in p.loss_weights.items():
query_modality, result_modality = direction
if not loss_weight:
logging.info('Skipping %s retrieval', direction)
continue
per_query_losses = _ComputePerQueryLoss(query_modality, result_modality)
mean_per_query_loss = tf.reduce_mean(per_query_losses)
loss_terms.append(loss_weight * mean_per_query_loss)
metrics['loss_{}_to_{}'.format(query_modality,
result_modality)] = (mean_per_query_loss,
1)
regularization_losses = utils.CollectRegularizationLosses(self)
if p.regularization_loss_weight and regularization_losses:
tf.logging.info('Adding TF1 regularization loss: %s',
regularization_losses)
total_reg_loss = tf.reduce_sum(regularization_losses)
loss_terms.append(p.regularization_loss_weight * total_reg_loss)
metrics['loss_regularization'] = (total_reg_loss, 1)
loss = tf.add_n(loss_terms)
metrics['loss'] = (loss, 1)
return metrics, {}
class MilanTask(base_model.BaseTask):
"""Task that runs a `DualEncoder`."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('dual_encoder', DualEncoder.Params(),
'Configuration of the DualEncoder to train.')
p.name = 'milan'
return p
def __init__(self, params):
if not params.name:
raise ValueError('params.name not set.')
super().__init__(params)
p = self.params
# Construct the model.
self.CreateChild('dual_encoder', p.dual_encoder)
def ComputePredictions(self, theta, input_batch):
return self.dual_encoder.ComputePredictions(theta.dual_encoder, input_batch)
def ComputeLoss(self, theta, predictions, input_batch):
return self.dual_encoder.ComputeLoss(theta.dual_encoder, predictions,
input_batch)
# Methods below implement parts of `BaseTask` that get called by lingvo
# 'decoder' jobs. The current minimal implementation just causes summaries to
# be generated.
def Decode(self, input_batch):
preds = self.ComputePredictions(self.theta, input_batch)
# Add summary ops to the graph.
_ = self.ComputeLoss(self.theta, preds, input_batch)
return preds
def CreateDecoderMetrics(self):
return {
'num_samples_in_batch': metrics_lib.AverageMetric(),
}
|
|
# -*- coding: utf-8 -*-
"""
State tracking functionality for django models
"""
import inspect
import sys
from functools import wraps
import django
from django.db import models
from django.db.models.query_utils import DeferredAttribute
from django.db.models.signals import class_prepared
from django_fsm.signals import pre_transition, post_transition
try:
from functools import partialmethod
except ImportError:
# python 2.7, so we are on django<=1.11
from django.utils.functional import curry as partialmethod
try:
from django.apps import apps as django_apps
def get_model(app_label, model_name):
app = django_apps.get_app_config(app_label)
return app.get_model(model_name)
except ImportError:
from django.db.models.loading import get_model
__all__ = ['TransitionNotAllowed', 'ConcurrentTransition',
'FSMFieldMixin', 'FSMField', 'FSMIntegerField',
'FSMKeyField', 'ConcurrentTransitionMixin',
'transition', 'can_proceed', 'has_transition_perm',
'GET_STATE', 'RETURN_VALUE']
if sys.version_info[:2] == (2, 6):
# Backport of Python 2.7 inspect.getmembers,
# since Python 2.6 ships buggy implementation
def __getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
inspect.getmembers = __getmembers
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^django_fsm\.FSMField"])
add_introspection_rules([], [r"^django_fsm\.FSMIntegerField"])
add_introspection_rules([], [r"^django_fsm\.FSMKeyField"])
class TransitionNotAllowed(Exception):
"""Raised when a transition is not allowed"""
def __init__(self, *args, **kwargs):
self.object = kwargs.pop('object', None)
self.method = kwargs.pop('method', None)
super(TransitionNotAllowed, self).__init__(*args, **kwargs)
class InvalidResultState(Exception):
"""Raised when we got invalid result state"""
class ConcurrentTransition(Exception):
"""
Raised when the transition cannot be executed because the
object has become stale (state has been changed since it
was fetched from the database).
"""
class Transition(object):
def __init__(self, method, source, target, on_error, conditions, permission, custom):
self.method = method
self.source = source
self.target = target
self.on_error = on_error
self.conditions = conditions
self.permission = permission
self.custom = custom
@property
def name(self):
return self.method.__name__
def has_perm(self, instance, user):
if not self.permission:
return True
elif callable(self.permission):
return bool(self.permission(instance, user))
elif user.has_perm(self.permission, instance):
return True
elif user.has_perm(self.permission):
return True
else:
return False
def get_available_FIELD_transitions(instance, field):
"""
List of transitions available in current model state
with all conditions met
"""
curr_state = field.get_state(instance)
transitions = field.transitions[instance.__class__]
for name, transition in transitions.items():
meta = transition._django_fsm
if meta.has_transition(curr_state) and meta.conditions_met(instance, curr_state):
yield meta.get_transition(curr_state)
def get_all_FIELD_transitions(instance, field):
"""
List of all transitions available in current model state
"""
return field.get_all_transitions(instance.__class__)
def get_available_user_FIELD_transitions(instance, user, field):
"""
List of transitions available in current model state
with all conditions met and user have rights on it
"""
for transition in get_available_FIELD_transitions(instance, field):
if transition.has_perm(instance, user):
yield transition
class FSMMeta(object):
"""
Models methods transitions meta information
"""
def __init__(self, field, method):
self.field = field
self.transitions = {} # source -> Transition
def get_transition(self, source):
transition = self.transitions.get(source, None)
if transition is None:
transition = self.transitions.get('*', None)
if transition is None:
transition = self.transitions.get('+', None)
return transition
def add_transition(self, method, source, target, on_error=None, conditions=[], permission=None, custom={}):
if source in self.transitions:
raise AssertionError('Duplicate transition for {0} state'.format(source))
self.transitions[source] = Transition(
method=method,
source=source,
target=target,
on_error=on_error,
conditions=conditions,
permission=permission,
custom=custom)
def has_transition(self, state):
"""
Lookup if any transition exists from current model state using current method
"""
if state in self.transitions:
return True
if '*' in self.transitions:
return True
if '+' in self.transitions and self.transitions['+'].target != state:
return True
return False
def conditions_met(self, instance, state):
"""
Check if all conditions have been met
"""
transition = self.get_transition(state)
if transition is None:
return False
elif transition.conditions is None:
return True
else:
return all(map(lambda condition: condition(instance), transition.conditions))
def has_transition_perm(self, instance, state, user):
transition = self.get_transition(state)
if not transition:
return False
else:
return transition.has_perm(instance, user)
def next_state(self, current_state):
transition = self.get_transition(current_state)
if transition is None:
raise TransitionNotAllowed('No transition from {0}'.format(current_state))
return transition.target
def exception_state(self, current_state):
transition = self.get_transition(current_state)
if transition is None:
raise TransitionNotAllowed('No transition from {0}'.format(current_state))
return transition.on_error
class FSMFieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, type=None):
if instance is None:
return self
return self.field.get_state(instance)
def __set__(self, instance, value):
if self.field.protected and self.field.name in instance.__dict__:
raise AttributeError('Direct {0} modification is not allowed'.format(self.field.name))
# Update state
self.field.set_proxy(instance, value)
self.field.set_state(instance, value)
class FSMFieldMixin(object):
descriptor_class = FSMFieldDescriptor
def __init__(self, *args, **kwargs):
self.protected = kwargs.pop('protected', False)
self.transitions = {} # cls -> (transitions name -> method)
self.state_proxy = {} # state -> ProxyClsRef
state_choices = kwargs.pop('state_choices', None)
choices = kwargs.get('choices', None)
if state_choices is not None and choices is not None:
raise ValueError('Use one of choices or state_choices value')
if state_choices is not None:
choices = []
for state, title, proxy_cls_ref in state_choices:
choices.append((state, title))
self.state_proxy[state] = proxy_cls_ref
kwargs['choices'] = choices
super(FSMFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(FSMFieldMixin, self).deconstruct()
if self.protected:
kwargs['protected'] = self.protected
return name, path, args, kwargs
def get_state(self, instance):
# The state field may be deferred. We delegate the logic of figuring
# this out and loading the deferred field on-demand to Django's
# built-in DeferredAttribute class. DeferredAttribute's instantiation
# signature changed over time, so we need to check Django version
# before proceeding to call DeferredAttribute. An alternative to this
# would be copying the latest implementation of DeferredAttribute to
# django_fsm, but this comes with the added responsibility of keeping
# the copied code up to date.
if django.VERSION[:3] >= (3, 0, 0):
return DeferredAttribute(self).__get__(instance)
elif django.VERSION[:3] >= (2, 1, 0):
return DeferredAttribute(self.name).__get__(instance)
elif django.VERSION[:3] >= (1, 10, 0):
return DeferredAttribute(self.name, model=None).__get__(instance)
else:
# The field was either not deferred (in which case we can return it
# right away) or ir was, but we are running on an unknown version
# of Django and we do not know the appropriate DeferredAttribute
# interface, and accessing the field will raise KeyError.
return instance.__dict__[self.name]
def set_state(self, instance, state):
instance.__dict__[self.name] = state
def set_proxy(self, instance, state):
"""
Change class
"""
if state in self.state_proxy:
state_proxy = self.state_proxy[state]
try:
app_label, model_name = state_proxy.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = instance._meta.app_label
model_name = state_proxy
model = get_model(app_label, model_name)
if model is None:
raise ValueError('No model found {0}'.format(state_proxy))
instance.__class__ = model
def change_state(self, instance, method, *args, **kwargs):
meta = method._django_fsm
method_name = method.__name__
current_state = self.get_state(instance)
if not meta.has_transition(current_state):
raise TransitionNotAllowed(
"Can't switch from state '{0}' using method '{1}'".format(current_state, method_name),
object=instance, method=method)
if not meta.conditions_met(instance, current_state):
raise TransitionNotAllowed(
"Transition conditions have not been met for method '{0}'".format(method_name),
object=instance, method=method)
next_state = meta.next_state(current_state)
signal_kwargs = {
'sender': instance.__class__,
'instance': instance,
'name': method_name,
'field': meta.field,
'source': current_state,
'target': next_state,
'method_args': args,
'method_kwargs': kwargs
}
pre_transition.send(**signal_kwargs)
try:
result = method(instance, *args, **kwargs)
if next_state is not None:
if hasattr(next_state, 'get_state'):
next_state = next_state.get_state(
instance, transition, result,
args=args, kwargs=kwargs)
signal_kwargs['target'] = next_state
self.set_proxy(instance, next_state)
self.set_state(instance, next_state)
except Exception as exc:
exception_state = meta.exception_state(current_state)
if exception_state:
self.set_proxy(instance, exception_state)
self.set_state(instance, exception_state)
signal_kwargs['target'] = exception_state
signal_kwargs['exception'] = exc
post_transition.send(**signal_kwargs)
raise
else:
post_transition.send(**signal_kwargs)
return result
def get_all_transitions(self, instance_cls):
"""
Returns [(source, target, name, method)] for all field transitions
"""
transitions = self.transitions[instance_cls]
for name, transition in transitions.items():
meta = transition._django_fsm
for transition in meta.transitions.values():
yield transition
def contribute_to_class(self, cls, name, **kwargs):
self.base_cls = cls
super(FSMFieldMixin, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
setattr(cls, 'get_all_{0}_transitions'.format(self.name),
partialmethod(get_all_FIELD_transitions, field=self))
setattr(cls, 'get_available_{0}_transitions'.format(self.name),
partialmethod(get_available_FIELD_transitions, field=self))
setattr(cls, 'get_available_user_{0}_transitions'.format(self.name),
partialmethod(get_available_user_FIELD_transitions, field=self))
class_prepared.connect(self._collect_transitions)
def _collect_transitions(self, *args, **kwargs):
sender = kwargs['sender']
if not issubclass(sender, self.base_cls):
return
def is_field_transition_method(attr):
return (inspect.ismethod(attr) or inspect.isfunction(attr)) \
and hasattr(attr, '_django_fsm') \
and attr._django_fsm.field in [self, self.name]
sender_transitions = {}
transitions = inspect.getmembers(sender, predicate=is_field_transition_method)
for method_name, method in transitions:
method._django_fsm.field = self
sender_transitions[method_name] = method
self.transitions[sender] = sender_transitions
class FSMField(FSMFieldMixin, models.CharField):
"""
State Machine support for Django model as CharField
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 50)
super(FSMField, self).__init__(*args, **kwargs)
class FSMIntegerField(FSMFieldMixin, models.IntegerField):
"""
Same as FSMField, but stores the state value in an IntegerField.
"""
pass
class FSMKeyField(FSMFieldMixin, models.ForeignKey):
"""
State Machine support for Django model
"""
def get_state(self, instance):
return instance.__dict__[self.attname]
def set_state(self, instance, state):
instance.__dict__[self.attname] = self.to_python(state)
class ConcurrentTransitionMixin(object):
"""
Protects a Model from undesirable effects caused by concurrently executed transitions,
e.g. running the same transition multiple times at the same time, or running different
transitions with the same SOURCE state at the same time.
This behavior is achieved using an idea based on optimistic locking. No additional
version field is required though; only the state field(s) is/are used for the tracking.
This scheme is not that strict as true *optimistic locking* mechanism, it is however
more lightweight - leveraging the specifics of FSM models.
Instance of a model based on this Mixin will be prevented from saving into DB if any
of its state fields (instances of FSMFieldMixin) has been changed since the object
was fetched from the database. *ConcurrentTransition* exception will be raised in such
cases.
For guaranteed protection against such race conditions, make sure:
* Your transitions do not have any side effects except for changes in the database,
* You always run the save() method on the object within django.db.transaction.atomic()
block.
Following these recommendations, you can rely on ConcurrentTransitionMixin to cause
a rollback of all the changes that have been executed in an inconsistent (out of sync)
state, thus practically negating their effect.
"""
def __init__(self, *args, **kwargs):
super(ConcurrentTransitionMixin, self).__init__(*args, **kwargs)
self._update_initial_state()
@property
def state_fields(self):
return filter(
lambda field: isinstance(field, FSMFieldMixin),
self._meta.fields
)
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
# _do_update is called once for each model class in the inheritance hierarchy.
# We can only filter the base_qs on state fields (can be more than one!) present in this particular model.
# Select state fields to filter on
filter_on = filter(lambda field: field.model == base_qs.model, self.state_fields)
# state filter will be used to narrow down the standard filter checking only PK
state_filter = dict((field.attname, self.__initial_states[field.attname]) for field in filter_on)
updated = super(ConcurrentTransitionMixin, self)._do_update(
base_qs=base_qs.filter(**state_filter),
using=using,
pk_val=pk_val,
values=values,
update_fields=update_fields,
forced_update=forced_update
)
# It may happen that nothing was updated in the original _do_update method not because of unmatching state,
# but because of missing PK. This codepath is possible when saving a new model instance with *preset PK*.
# In this case Django does not know it has to do INSERT operation, so it tries UPDATE first and falls back to
# INSERT if UPDATE fails.
# Thus, we need to make sure we only catch the case when the object *is* in the DB, but with changed state; and
# mimic standard _do_update behavior otherwise. Django will pick it up and execute _do_insert.
if not updated and base_qs.filter(pk=pk_val).exists():
raise ConcurrentTransition("Cannot save object! The state has been changed since fetched from the database!")
return updated
def _update_initial_state(self):
self.__initial_states = dict(
(field.attname, field.value_from_object(self)) for field in self.state_fields
)
def save(self, *args, **kwargs):
super(ConcurrentTransitionMixin, self).save(*args, **kwargs)
self._update_initial_state()
def transition(field, source='*', target=None, on_error=None, conditions=[], permission=None, custom={}):
"""
Method decorator to mark allowed transitions.
Set target to None if current state needs to be validated and
has not changed after the function call.
"""
def inner_transition(func):
wrapper_installed, fsm_meta = True, getattr(func, '_django_fsm', None)
if not fsm_meta:
wrapper_installed = False
fsm_meta = FSMMeta(field=field, method=func)
setattr(func, '_django_fsm', fsm_meta)
if isinstance(source, (list, tuple, set)):
for state in source:
func._django_fsm.add_transition(func, state, target, on_error, conditions, permission, custom)
else:
func._django_fsm.add_transition(func, source, target, on_error, conditions, permission, custom)
@wraps(func)
def _change_state(instance, *args, **kwargs):
return fsm_meta.field.change_state(instance, func, *args, **kwargs)
if not wrapper_installed:
return _change_state
return func
return inner_transition
def can_proceed(bound_method, check_conditions=True):
"""
Returns True if model in state allows to call bound_method
Set ``check_conditions`` argument to ``False`` to skip checking
conditions.
"""
if not hasattr(bound_method, '_django_fsm'):
im_func = getattr(bound_method, 'im_func', getattr(bound_method, '__func__'))
raise TypeError('%s method is not transition' % im_func.__name__)
meta = bound_method._django_fsm
im_self = getattr(bound_method, 'im_self', getattr(bound_method, '__self__'))
current_state = meta.field.get_state(im_self)
return meta.has_transition(current_state) and (
not check_conditions or meta.conditions_met(im_self, current_state))
def has_transition_perm(bound_method, user):
"""
Returns True if model in state allows to call bound_method and user have rights on it
"""
if not hasattr(bound_method, '_django_fsm'):
im_func = getattr(bound_method, 'im_func', getattr(bound_method, '__func__'))
raise TypeError('%s method is not transition' % im_func.__name__)
meta = bound_method._django_fsm
im_self = getattr(bound_method, 'im_self', getattr(bound_method, '__self__'))
current_state = meta.field.get_state(im_self)
return (meta.has_transition(current_state) and
meta.conditions_met(im_self, current_state) and
meta.has_transition_perm(im_self, current_state, user))
class State(object):
def get_state(self, model, transition, result, args=[], kwargs={}):
raise NotImplementedError
class RETURN_VALUE(State):
def __init__(self, *allowed_states):
self.allowed_states = allowed_states if allowed_states else None
def get_state(self, model, transition, result, args=[], kwargs={}):
if self.allowed_states is not None:
if result not in self.allowed_states:
raise InvalidResultState(
'{} is not in list of allowed states\n{}'.format(
result, self.allowed_states))
return result
class GET_STATE(State):
def __init__(self, func, states=None):
self.func = func
self.allowed_states = states
def get_state(self, model, transition, result, args=[], kwargs={}):
result_state = self.func(model, *args, **kwargs)
if self.allowed_states is not None:
if result_state not in self.allowed_states:
raise InvalidResultState(
'{} is not in list of allowed states\n{}'.format(
result, self.allowed_states))
return result_state
|
|
"""Xbox Media Source Implementation."""
from dataclasses import dataclass
from typing import List, Tuple
# pylint: disable=no-name-in-module
from pydantic.error_wrappers import ValidationError
from xbox.webapi.api.client import XboxLiveClient
from xbox.webapi.api.provider.catalog.models import FieldsTemplate, Image
from xbox.webapi.api.provider.gameclips.models import GameclipsResponse
from xbox.webapi.api.provider.screenshots.models import ScreenshotResponse
from xbox.webapi.api.provider.smartglass.models import InstalledPackage
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_GAME,
MEDIA_CLASS_IMAGE,
MEDIA_CLASS_VIDEO,
)
from homeassistant.components.media_source.const import MEDIA_MIME_TYPES
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .browse_media import _find_media_image
from .const import DOMAIN
MIME_TYPE_MAP = {
"gameclips": "video/mp4",
"screenshots": "image/png",
}
MEDIA_CLASS_MAP = {
"gameclips": MEDIA_CLASS_VIDEO,
"screenshots": MEDIA_CLASS_IMAGE,
}
async def async_get_media_source(hass: HomeAssistantType):
"""Set up Xbox media source."""
entry = hass.config_entries.async_entries(DOMAIN)[0]
client = hass.data[DOMAIN][entry.entry_id]["client"]
return XboxSource(hass, client)
@callback
def async_parse_identifier(
item: MediaSourceItem,
) -> Tuple[str, str, str]:
"""Parse identifier."""
identifier = item.identifier or ""
start = ["", "", ""]
items = identifier.lstrip("/").split("~~", 2)
return tuple(items + start[len(items) :])
@dataclass
class XboxMediaItem:
"""Represents gameclip/screenshot media."""
caption: str
thumbnail: str
uri: str
media_class: str
class XboxSource(MediaSource):
"""Provide Xbox screenshots and gameclips as media sources."""
name: str = "Xbox Game Media"
def __init__(self, hass: HomeAssistantType, client: XboxLiveClient):
"""Initialize Xbox source."""
super().__init__(DOMAIN)
self.hass: HomeAssistantType = hass
self.client: XboxLiveClient = client
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
_, category, url = async_parse_identifier(item)
_, kind = category.split("#", 1)
return PlayMedia(url, MIME_TYPE_MAP[kind])
async def async_browse_media(
self, item: MediaSourceItem, media_types: Tuple[str] = MEDIA_MIME_TYPES
) -> BrowseMediaSource:
"""Return media."""
title, category, _ = async_parse_identifier(item)
if not title:
return await self._build_game_library()
if not category:
return _build_categories(title)
return await self._build_media_items(title, category)
async def _build_game_library(self):
"""Display installed games across all consoles."""
apps = await self.client.smartglass.get_installed_apps()
games = {
game.one_store_product_id: game
for game in apps.result
if game.is_game and game.title_id
}
app_details = await self.client.catalog.get_products(
games.keys(),
FieldsTemplate.BROWSE,
)
images = {
prod.product_id: prod.localized_properties[0].images
for prod in app_details.products
}
return BrowseMediaSource(
domain=DOMAIN,
identifier="",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title="Xbox Game Media",
can_play=False,
can_expand=True,
children=[_build_game_item(game, images) for game in games.values()],
children_media_class=MEDIA_CLASS_GAME,
)
async def _build_media_items(self, title, category):
"""Fetch requested gameclip/screenshot media."""
title_id, _, thumbnail = title.split("#", 2)
owner, kind = category.split("#", 1)
items: List[XboxMediaItem] = []
try:
if kind == "gameclips":
if owner == "my":
response: GameclipsResponse = (
await self.client.gameclips.get_recent_clips_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: GameclipsResponse = await self.client.gameclips.get_recent_community_clips_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(
dt_util.parse_datetime(item.date_recorded)
).strftime("%b. %d, %Y %I:%M %p"),
item.thumbnails[0].uri,
item.game_clip_uris[0].uri,
MEDIA_CLASS_VIDEO,
)
for item in response.game_clips
]
elif kind == "screenshots":
if owner == "my":
response: ScreenshotResponse = (
await self.client.screenshots.get_recent_screenshots_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: ScreenshotResponse = await self.client.screenshots.get_recent_community_screenshots_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(item.date_taken).strftime(
"%b. %d, %Y %I:%M%p"
),
item.thumbnails[0].uri,
item.screenshot_uris[0].uri,
MEDIA_CLASS_IMAGE,
)
for item in response.screenshots
]
except ValidationError:
# Unexpected API response
pass
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children=[_build_media_item(title, category, item) for item in items],
children_media_class=MEDIA_CLASS_MAP[kind],
thumbnail=thumbnail,
)
def _build_game_item(item: InstalledPackage, images: List[Image]):
"""Build individual game."""
thumbnail = ""
image = _find_media_image(images.get(item.one_store_product_id, []))
if image is not None:
thumbnail = image.uri
if thumbnail[0] == "/":
thumbnail = f"https:{thumbnail}"
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{item.title_id}#{item.name}#{thumbnail}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=item.name,
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
def _build_categories(title):
"""Build base categories for Xbox media."""
_, name, thumbnail = title.split("#", 2)
base = BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=name,
can_play=False,
can_expand=True,
children=[],
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
owners = ["my", "community"]
kinds = ["gameclips", "screenshots"]
for owner in owners:
for kind in kinds:
base.children.append(
BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{owner}#{kind}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_MAP[kind],
)
)
return base
def _build_media_item(title: str, category: str, item: XboxMediaItem):
"""Build individual media item."""
_, kind = category.split("#", 1)
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}~~{item.uri}",
media_class=item.media_class,
media_content_type=MIME_TYPE_MAP[kind],
title=item.caption,
can_play=True,
can_expand=False,
thumbnail=item.thumbnail,
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters as utils_filters
SERVICE_ENABLED = "enabled"
SERVICE_DISABLED = "disabled"
SERVICE_STATUS_DISPLAY_CHOICES = (
(SERVICE_ENABLED, _("Enabled")),
(SERVICE_DISABLED, _("Disabled")),
)
SERVICE_STATE_DISPLAY_CHOICES = (
('up', _("Up")),
('down', _("Down")),
)
class ServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class SubServiceFilterAction(ServiceFilterAction):
filter_field = 'binary'
def get_status(service):
# if not configured in this region, neither option makes sense
if service.host:
return SERVICE_ENABLED if not service.disabled else SERVICE_DISABLED
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_status,
verbose_name=_('Status'),
status=True,
display_choices=SERVICE_STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["status"]
def get_available(zone):
return zone.zoneState['available']
def get_agent_status(agent):
template_name = 'admin/info/_cell_status.html'
context = {
'status': agent.status,
'disabled_reason': agent.disabled_reason
}
return template.loader.render_to_string(template_name, context)
class NovaServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
display_choices=SERVICE_STATE_DISPLAY_CHOICES)
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta(object):
name = "nova_services"
verbose_name = _("Compute Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class CinderServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
display_choices=SERVICE_STATE_DISPLAY_CHOICES)
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta(object):
name = "cinder_services"
verbose_name = _("Block Storage Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class NetworkAgentsFilterAction(tables.FilterAction):
def filter(self, table, agents, filter_string):
q = filter_string.lower()
def comp(agent):
if q in agent.agent_type.lower():
return True
return False
return filter(comp, agents)
def get_network_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_network_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class NetworkAgentsTable(tables.DataTable):
agent_type = tables.Column('agent_type', verbose_name=_('Type'))
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_network_agent_status, verbose_name=_('Status'))
state = tables.Column(get_network_agent_state, verbose_name=_('State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s" % (obj.binary, obj.host)
class Meta(object):
name = "network_agents"
verbose_name = _("Network Agents")
table_actions = (NetworkAgentsFilterAction,)
multi_select = False
class HeatServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class HeatServiceTable(tables.DataTable):
hostname = tables.Column('hostname', verbose_name=_('Hostname'))
binary = tables.Column("binary", verbose_name=_('Name'))
engine_id = tables.Column('engine_id', verbose_name=_('Engine Id'))
host = tables.Column('host', verbose_name=_('Host'))
topic = tables.Column('topic', verbose_name=_('Topic'))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
status = tables.Column('status', verbose_name=_('Status'),
display_choices=SERVICE_STATE_DISPLAY_CHOICES)
def get_object_id(self, obj):
return "%s" % obj.engine_id
class Meta(object):
name = "heat_services"
verbose_name = _("Orchestration Services")
table_actions = (HeatServiceFilterAction,)
multi_select = False
|
|
"""
This might look like an SSH protocol implementation. IT IS NOT. It's not even
remotely secure. Nothing is verified, nothing is random. DO NOT USE.
"""
import binascii
import struct
from sshtransport import BinaryPacket
from sshtype import *
SSH_MSG_KEXINIT = 20
SSH_MSG_KEX_DH_GEX_REQUEST_OLD = 30
SSH_MSG_KEX_DH_GEX_GROUP = 31
SSH_MSG_KEX_DH_GEX_INIT = 32
SSH_MSG_KEX_DH_GEX_REPLY = 33
SSH_MSG_KEX_DH_GEX_REQUEST = 34
def message_from_packet(binpkt):
message_types = {
SSH_MSG_KEXINIT: KexInit,
SSH_MSG_KEX_DH_GEX_REQUEST: DHGEXRequest,
SSH_MSG_KEX_DH_GEX_GROUP: DHGEXGroup,
}
return message_types[binpkt.payload[0]](packet=binpkt)
class SSHStruct(object):
def __init__(self, parse_attr, *args, **kwargs):
object.__setattr__(self, "_SSHStruct__structure", args)
object.__setattr__(self, "_SSHStruct__values", {})
for arg in args:
self.__values[arg.name] = arg.default
if parse_attr in kwargs:
self.parse(kwargs[parse_attr])
else:
kwargs_set = set(kwargs)
valid_set = set([ s.name for s in self.__structure ])
if not kwargs_set.issubset(valid_set):
raise TypeError(
"unexpected arguments: " + ", ".join(kwargs_set.difference(valid_set))
)
self.__values.update(kwargs)
def __getattr__(self, name):
if name in self.__values:
return self.__values[name]
else:
raise AttributeError("'{0}' object has no attribute '{1}'".format(
type(self).__name__,
name
))
def __setattr__(self, name, value):
if name in self.__values:
self.__values[name] = value
else:
raise AttributeError("'{0}' object has no attribute '{1}'".format(
type(self).__name__,
name
))
def __dir__(self):
return object.__dir__(self) + list(self.__values.keys())
def __eq__(self, value):
return type(self) == type(value) and self.__values == value.__values
def __hash__(self):
return hash(( type(self), frozenset(self.__values.items()) ))
def __str__(self):
return "{0}({1})".format(
type(self).__name__,
", ".join([ s.name + "=" + s.to_str(self.__values[s.name]) for s in self.__structure ])
)
def parse(self, x):
self.from_bytes(x)
def from_bytes(self, data):
for s in self.__structure:
( data, value ) = s.from_bytes(data)
self.__values[s.name] = value
def to_bytes(self):
data = b""
for s in self.__structure:
data += s.to_bytes(self.__values[s.name])
return data
def to_dict(self):
result = {}
for key, value in self.__values.items():
result[key] = binascii.hexlify(value).decode() if type(value) == bytes else value
return result
class SSHMessage(SSHStruct):
def __init__(self, message_type, *args, **kwargs):
object.__setattr__(self, "_SSHMessage__message_type", message_type)
super(SSHMessage, self).__init__("packet", *args, **kwargs)
def parse(self, x):
self.from_packet(x)
def from_packet(self, packet):
data = packet.payload
if data[0] != self.__message_type:
raise RuntimeError("invalid type {0}, expected {1}".format(
data[0],
self.__message_type
))
self.from_bytes(data[1:])
def to_packet(self):
data = bytes([ self.__message_type ])
data += self.to_bytes()
return BinaryPacket(payload=data)
class KexInit(SSHMessage):
def __init__(self, **kwargs):
super(type(self), self).__init__(
SSH_MSG_KEXINIT,
Bytes(16, "cookie", b"\x00" * 16),
NameList("kex_algorithms", []),
NameList("server_host_key_algorithms", []),
NameList("encryption_algorithms_c2s", []),
NameList("encryption_algorithms_s2c", []),
NameList("mac_algorithms_c2s", []),
NameList("mac_algorithms_s2c", []),
NameList("compression_algorithms_c2s", []),
NameList("compression_algorithms_s2c", []),
NameList("languages_c2s", []),
NameList("languages_s2c", []),
Boolean("first_kex_packet_follows", False),
UInt32("reserved", 0),
**kwargs
)
def optimal_response(self):
return KexInit(
kex_algorithms=self.kex_algorithms,
server_host_key_algorithms=self.server_host_key_algorithms,
encryption_algorithms_c2s=self.encryption_algorithms_c2s,
encryption_algorithms_s2c=self.encryption_algorithms_s2c,
mac_algorithms_c2s=self.mac_algorithms_c2s,
mac_algorithms_s2c=self.mac_algorithms_s2c,
compression_algorithms_c2s=self.compression_algorithms_c2s,
compression_algorithms_s2c=self.compression_algorithms_s2c,
languages_c2s=self.languages_c2s,
languages_s2c=self.languages_s2c
)
def __str__(self):
def strlist(name, value):
return name + ": " + ", ".join(value)
return "\n".join([
strlist("Key exchange algorithms", self.kex_algorithms),
strlist("Host key algorithms", self.server_host_key_algorithms),
strlist("Encryption algorithms (client to server)", self.encryption_algorithms_c2s),
strlist("Encryption algorithms (server to client)", self.encryption_algorithms_s2c),
strlist("MAC algorithms (client to server)", self.mac_algorithms_c2s),
strlist("MAC algorithms (server to client)", self.mac_algorithms_s2c),
])
class DHGEXRequest(SSHMessage):
def __init__(self, **kwargs):
super(type(self), self).__init__(
SSH_MSG_KEX_DH_GEX_REQUEST,
UInt32("min", 1024),
UInt32("n"),
UInt32("max", 8192),
**kwargs
)
class DHGEXGroup(SSHMessage):
def __init__(self, **kwargs):
super(type(self), self).__init__(
SSH_MSG_KEX_DH_GEX_GROUP,
MPInt("prime"),
MPInt("generator"),
**kwargs
)
class DHGEXInit(SSHMessage):
def __init__(self, **kwargs):
super(type(self), self).__init__(SSH_MSG_KEX_DH_GEX_INIT, MPInt("e"), **kwargs)
class DHGEXReply(SSHMessage):
def __init__(self, **kwargs):
super(DHGEXReply, self).__init__(
SSH_MSG_KEX_DH_GEX_REPLY,
String("server_public_key"),
MPInt("f"),
String("signature"),
**kwargs
)
class RSAPublicKey(SSHStruct):
def __init__(self, **kwargs):
super(RSAPublicKey, self).__init__(
"data",
String("key_type"),
MPInt("public_exponent"),
MPInt("modulus"),
**kwargs
)
if self.key_type != b"ssh-rsa":
raise RuntimeError("invalid key type {0}, expected 'ssh-rsa'".format(
self.key_type.decode("ASCII"),
))
|
|
from itertools import product
import math
import mpmath
from sympy.utilities.pytest import XFAIL, raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Matrix, Lambda, Piecewise, exp, Integral, oo, I, Abs, Function,
true, false, And, Or, Not, ITE, Min, Max, floor, diff, IndexedBase, Sum,
DotProduct, Eq)
from sympy.printing.lambdarepr import LambdaPrinter
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
numexpr = import_module('numexpr')
tensorflow = import_module('tensorflow')
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
@conserve_mpmath_dps
def test_mpmath_precision():
mpmath.mp.dps = 100
assert str(lambdify((), pi.evalf(100), 'mpmath')()) == str(pi.evalf(100))
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_tensorflow_transl():
if not tensorflow:
skip("tensorflow not installed")
from sympy.utilities.lambdify import TENSORFLOW_TRANSLATIONS
for sym, tens in TENSORFLOW_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert tens in tensorflow.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
from sympy import S
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = sympy.S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
def test_numpy_transpose():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A.T, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
def test_numpy_dotproduct():
if not numpy:
skip("numpy not installed")
A = Matrix([x, y, z])
f1 = lambdify([x, y, z], DotProduct(A, A), modules='numpy')
f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='numpy')
f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
assert f1(1, 2, 3) == \
f2(1, 2, 3) == \
f3(1, 2, 3) == \
f4(1, 2, 3) == \
numpy.array([14])
def test_numpy_inverse():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A**-1, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
def test_numpy_old_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableMatrix': numpy.matrix}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.matrix)
def test_python_div_zero_issue_11306():
if not numpy:
skip("numpy not installed.")
p = Piecewise((1 / x, y < -1), (x, y <= 1), (1 / x, True))
lambdify([x, y], p, modules='numpy')(0, 1)
def test_issue9474():
mods = [None, 'math']
if numpy:
mods.append('numpy')
if mpmath:
mods.append('mpmath')
for mod in mods:
f = lambdify(x, sympy.S(1)/x, modules=mod)
assert f(2) == 0.5
f = lambdify(x, floor(sympy.S(1)/x), modules=mod)
assert f(2) == 0
if mpmath:
f = lambdify(x, sympy.S(1)/sympy.Abs(x), modules=['mpmath'])
assert isinstance(f(2), mpmath.mpf)
for absfunc, modules in product([Abs, abs], mods):
f = lambdify(x, absfunc(x), modules=modules)
assert f(-1) == 1
assert f(1) == 1
assert f(3+4j) == 5
def test_issue_9871():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
r = sqrt(x**2 + y**2)
expr = diff(1/r, x)
xn = yn = numpy.linspace(1, 10, 16)
# expr(xn, xn) = -xn/(sqrt(2)*xn)^3
fv_exact = -numpy.sqrt(2.)**-3 * xn**-2
fv_numpy = lambdify((x, y), expr, modules='numpy')(xn, yn)
fv_numexpr = lambdify((x, y), expr, modules='numexpr')(xn, yn)
numpy.testing.assert_allclose(fv_numpy, fv_exact, rtol=1e-10)
numpy.testing.assert_allclose(fv_numexpr, fv_exact, rtol=1e-10)
def test_numpy_piecewise():
if not numpy:
skip("numpy not installed.")
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules="numpy")
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
def test_numpy_logical_ops():
if not numpy:
skip("numpy not installed.")
and_func = lambdify((x, y), And(x, y), modules="numpy")
or_func = lambdify((x, y), Or(x, y), modules="numpy")
not_func = lambdify((x), Not(x), modules="numpy")
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
def test_numpy_matmul():
if not numpy:
skip("numpy not installed.")
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [Abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy")
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy")
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_tensorflow_basic_math():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.constant(0, dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s) == 0.5
def test_tensorflow_placeholders():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5
def test_tensorflow_variables():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.Variable(0, dtype=tensorflow.float32)
s = tensorflow.Session()
s.run(tensorflow.initialize_all_variables())
assert func(a).eval(session=s) == 0.5
def test_tensorflow_logical_operations():
if not tensorflow:
skip("tensorflow not installed.")
expr = Not(And(Or(x, y), y))
func = lambdify([x, y], expr, modules="tensorflow")
a = tensorflow.constant(False)
b = tensorflow.constant(True)
s = tensorflow.Session()
assert func(a, b).eval(session=s) == 0
def test_tensorflow_piecewise():
if not tensorflow:
skip("tensorflow not installed.")
expr = Piecewise((0, Eq(x,0)), (-1, x < 0), (1, x > 0))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -1}) == -1
assert func(a).eval(session=s, feed_dict={a: 0}) == 0
assert func(a).eval(session=s, feed_dict={a: 1}) == 1
def test_tensorflow_multi_max():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -2}) == 4
def test_tensorflow_multi_min():
if not tensorflow:
skip("tensorflow not installed.")
expr = Min(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -2}) == -2
def test_tensorflow_relational():
if not tensorflow:
skip("tensorflow not installed.")
expr = x >= 0
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: 1})
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_errors():
# Test errors that implemented functions can return, and still be able to
# form expressions.
# See: https://github.com/sympy/sympy/issues/10810
for val, error_class in product((0, 0., 2, 2.0),
(AttributeError, TypeError, ValueError)):
def myfunc(a):
if a == 0:
raise error_class
return 1
f = implemented_function('f', myfunc)
expr = f(val)
assert expr == f(val)
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z")
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +...")
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
def test_ITE():
assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5
assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3
def test_Min_Max():
# see gh-10375
assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1
assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3
def test_Indexed():
# Issue #10934
if not numpy:
skip("numpy not installed")
a = IndexedBase('a')
i, j = symbols('i j')
b = numpy.array([[1, 2], [3, 4]])
assert lambdify(a, Sum(a[x, y], (x, 0, 1), (y, 0, 1)))(b) == 10
|
|
from TopologyWeigher import utils as topoutils
__author__ = 'ash'
import socket
from struct import *
import pcapy
from time import sleep
from time import time
import shlex
import re
import commands
from subprocess import Popen, PIPE, STDOUT
from threading import Timer
from threading import Thread
from nova.conductor import api as conductor_api
from nova import context
from nova.openstack.common import log as logging
from oslo.config import cfg
traffic_opts = [
cfg.StrOpt('traffic_sniffing_interface',
default=None,
help='The interface to listen on for the traffic'),
cfg.BoolOpt('traffic_enable_topology_statistics',
default=False,
help='Collect the traffic and ping statistics for scheduling'),
cfg.IntOpt('refresh_traf_info',
default=10,
help='In seconds, how often to send traffic statistics to db'),
cfg.IntOpt('refresh_ping_info',
default=10,
help='In seconds, how often to send latency statistics to db'),
cfg.IntOpt('refresh_ping_make',
default=5,
help='In seconds, how often launch ping for known hosts'),
cfg.IntOpt('ping_count',
default=2,
help='How many times during one ping would be sended ICMP echoes')
]
CONF = cfg.CONF
CONF.register_opts(traffic_opts)
traffic_stat = dict()
LOG = logging.getLogger("nova-compute")
# Global param - used in ping class
refresh_ping_make = int(CONF.refresh_ping_make)
ping_count = int(CONF.ping_count)
class ClientTraffic(Thread):
def __init__(self,sniff_int,log):
Thread.__init__(self)
# The tread should work as a daemon
self.daemon = True
self.error = False
#self.topology_desc_path = topology_desc_path
# Get the context for conductor calls
self.context = context.get_admin_context()
self.conductor = conductor_api.API()
# Get the information about the topology from the db
topoinfo = topoutils.get_nodes(self.context, self.conductor)
# It might not be in the db - the scheduler have not yet written it
# Or the scheduler haven't yet started
if not topoinfo:
LOG.error("Couldn't get the info about topology")
self.error = True
return
else:
self.node_list = topoinfo
# Transforming the node list to dict <ip>:<id>
self.node_dict = topoutils.get_node_dict(self.node_list)
# Getting information about the router node id and IP
(self.router_id,self.router_ip) = topoutils.get_router_id_ip(self.node_list)
# The interface to listen for the traffic
self.interface = sniff_int
# This node's ip_addr
self.ip_addr = str(topoutils.get_ip_address(str(sniff_int)))
# Initial value of bw_id
# TODO: currently doesn't used
self.bw_id = 0
# Refresh time of sending the traffic info
self.refresh_time = int(CONF.refresh_traf_info)
# Refresh time of sending the ping info
self.refresh_ping_time = int(CONF.refresh_ping_info)
# The current node's id
self.my_id = topoutils.get_my_id(self.node_dict, self.ip_addr)
self.time_to_send = False
self.ping_info = dict()
self.log = log
def run(self):
if not self.error:
self.launch()
def eth_addr (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
def parse_packet(self,packet):
"""
Function that parses the sniffed packet
To obtain it's dst/src IPs and it's size
"""
#parse ethernet header
eth_length = 14
eth_header = packet[:eth_length]
eth = unpack('!6s6sH' , eth_header)
eth_protocol = socket.ntohs(eth[2])
#print 'Destination MAC : ' + eth_addr(packet[0:6]) + ' Source MAC : ' + eth_addr(packet[6:12]) + ' Protocol : ' + str(eth_protocol)
#Parse IP packets, IP Protocol number = 8
res = False
if eth_protocol == 8 :
#Parse IP header
#take first 20 characters for the ip header
ip_header = packet[eth_length:20+eth_length]
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s' , ip_header)
version_ihl = iph[0]
#version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
protocol = iph[6]
res = True
if protocol == 6:
t = iph_length + eth_length
tcp_header = packet[t:t+20]
#now unpack them :)
tcph = unpack('!HHLLBBHHH' , tcp_header)
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
h_size = eth_length + iph_length + tcph_length * 4
data = packet[h_size:]
#print 'Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr)
#print ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr) + ' Length : ' + str(len(packet))
return (s_addr,d_addr,len(packet),res)
return (0,0,0,False)
def process_ping(self):
self.send_ping()
def send_ping(self):
"""
Function sends ping to db via conductor
"""
for key in self.ping_info:
ping = self.ping_info[key]
# Transforming src/dst IPs to nodes' ids
(src_id,dst_id) = topoutils.get_hosts_id(ping.src, ping.dst, self.node_dict, self.router_id)
ping_value = ping.result
# constructing values for inserting into db
resources = {}
resources['src'] = self.my_id
resources['dst'] = dst_id
resources['latency'] = ping_value
self.conductor.ping_add(self.context,resources)
def handle_new_ips(self,packet):
"""
Function handles new IPs in src/dst packet
And creates a new task for ping command
"""
# Setting dst to be not the same as node's ip_addr
if packet.src == self.ip_addr:
dst = packet.dst
else:
dst = packet.src
# If there is no node with that IP
# We believe that it's an external traffic
if dst not in self.node_dict:
dst = self.router_ip
if not (self.ip_addr,dst) in self.ping_info:
# Creating the task and starting it
self.ping_info[self.ip_addr,dst] = ip_ping(self.ip_addr,dst)
self.ping_info[self.ip_addr,dst].start()
def handle_packet(self,packet):
"""
Handles new obtained packet
"""
self.handle_new_ips(packet)
# Transforming src/dst IPs to nodes' ids
(src_id,dst_id) = topoutils.get_hosts_id(packet.src,packet.dst,self.node_dict,self.router_id)
#print "Packet: Src: " + str(packet.src) + " Dst: " + str(packet.dst)
# If there is no info about it - saving
if not (src_id,dst_id) in traffic_stat:
#self.process_ping(packet)
nl = NetworkLoad()
nl.inc(packet.length)
traffic_stat[(src_id,dst_id)] = nl
else:
traffic_stat[(src_id,dst_id)].inc(packet.length)
def process_bandwidth(self):
"""
Function calculates the traffic before sending to db
"""
for k in traffic_stat.keys():
# (number of bytes in packets sniffed for refresh_time) / refresh_time = bytes/s
bandwidth = traffic_stat[k].count / self.refresh_time
(src,dst) = k
traffic_stat[k].bandwidth = bandwidth
def send_traffic(self):
"""
Function sends traffic to db via nova-conductor
"""
for link in traffic_stat.keys():
(src_id, dst_id) = link
bandwidth = traffic_stat[link].bandwidth
# constructing values for inserting into db
resources = {}
resources['src'] = src_id
resources['dst'] = dst_id
resources['bytes'] = bandwidth
resources['m_id'] = self.bw_id
self.conductor.traffic_add(self.context, resources)
# the variable that keeps the id number if the measurement
self.bw_id += 1
# clearing the history for new portion of data
traffic_stat.clear()
def launch(self):
"""
Main cycle of sniffing the traffic
"""
# opening pcap on the setted interface
cap = pcapy.open_live(self.interface,65536,1,0)
# fix time when we start
self.start_time = time()
# Timer that sends ping to db every refresh_ping_time seconds
rt_ping = RepeatedTimer(self.refresh_ping_time,self.process_ping)
LOG.debug("Starting traffic collector agent")
while (1) :
try:
(header, packet) = cap.next()
except: # timeout
continue
(src,dst,leng,res) = self.parse_packet(packet)
if not res:
#print "Not res"
continue
pk = Packet(src,dst,leng)
self.handle_packet(pk)
# if the elapsed time is more then refresh_time
# Send traffic information to db
if time() - self.start_time > self.refresh_time:
# reset timer
self.start_time = time()
self.process_bandwidth()
self.send_traffic()
class Packet:
"""
Class describes the packet info
"""
def __init__(self, src, dst, length):
self.src = src
self.dst = dst
self.length = length
# Class described the information of load
# between src and dst
# It's instance aggregates the traffic on the route
class NetworkLoad:
def __init__(self):
self.count = 0
self.error = 0
self.metric_ind = 0
self.error_ind = 0
self.metrics = ['B', 'KB', 'MB', 'GB', 'TB']
self.bandwidth = 0
self.ping = -1
def inc(self,leng):
self.count += leng
def sum_up(self):
while self.count >= 1024:
# error obtaining not significantly TO DO
self.count /= 1024
self.metric_ind += 1
class RepeatedTimer(object):
"""
Help-class for launching timer in a separate thread
"""
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class ip_ping(Thread):
"""
Class to launch ping
For each new topology node's IP launching an instance of this class
That pings the node every refresh_ping_make seconds
"""
def __init__ (self,src,dst):
Thread.__init__(self)
self.src = src
self.dst = dst
self.result = -1
self.repeat = refresh_ping_make
self.count = ping_count
def run(self):
p = re.compile('.*time\=([0-9\.]*)\ .*')
while 1:
src = self.src
host = self.dst
host = host.split(':')[0]
# if make -c > 1 then need to handle more output
# better to use fping - but not supported for default on systems
status, output = commands.getstatusoutput("ping -c {count} {host}".format(count=self.count, host=host))
lines = output.split('\n')
avg = 0
for i in range(1,self.count+1):
s = p.search(lines[i])
if s is None:
res = 999.0
break
avg += float(s.group(1))
avg /= self.count
self.result = avg
sleep(self.repeat)
def get_simple_cmd_output(self,cmd, stderr=STDOUT):
"""
Execute a simple external command and get its output.
"""
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0]
def ready(self):
return self.result != -1
# Checking if the traffic statistics sniffer is enabled
enable_stat = CONF.traffic_enable_topology_statistics
if enable_stat:
# Getting the interface to listen on
if CONF.traffic_sniffing_interface is None:
LOG.error("No traffic_sniffing_interface specified in /etc/nova/nova.conf")
else:
interface = str(CONF.traffic_sniffing_interface)
client = ClientTraffic(interface, LOG)
client.start()
|
|
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Session and API call management for VMware ESX/VC server.
Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls.
"""
from eventlet import event
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
LOG = logging.getLogger(__name__)
class Retry(object):
"""Decorator for retrying a function upon suggested exceptions.
The method retries for given number of times and the sleep
time increments till the max sleep time is reached.
If max retries is set to -1, then the decorated function is
invoked indefinitely till no exception is thrown or if
the caught exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
max_sleep_time=60, exceptions=()):
"""Initialize retry object based on input params.
:param max_retry_count: Max number of times, a function must be
retried when one of input 'exceptions'
is caught. The default -1 will always
retry the function till a non-exception
case, or an un-wanted error case arises.
:param inc_sleep_time: Incremental time in seconds for sleep time
between retrial
:param max_sleep_time: Max sleep time beyond which the sleep time will
not be incremented using param inc_sleep_time
and max_sleep_time will be used as sleep time
:param exceptions: Suggested exceptions for which the function must be
retried
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
def _func(done, *args, **kwargs):
try:
result = f(*args, **kwargs)
done.send(result)
except self._exceptions as excep:
LOG.exception(_("Failure while invoking function: "
"%(func)s. Error: %(excep)s.") %
{'func': f.__name__, 'excep': excep})
if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count):
done.send_exception(excep)
else:
self._retry_count += 1
self._sleep_time += self._inc_sleep_time
return self._sleep_time
except Exception as excep:
done.send_exception(excep)
return 0
def func(*args, **kwargs):
done = event.Event()
loop = loopingcall.DynamicLoopingCall(_func, done, *args, **kwargs)
loop.start(periodic_interval_max=self._max_sleep_time)
result = done.wait()
loop.stop()
return result
return func
class VMwareAPISession(object):
"""Sets up a session with the server and handles all calls made to it."""
@Retry(exceptions=(Exception))
def __init__(self, server_ip, server_username, server_password,
api_retry_count, task_poll_interval, scheme='https',
create_session=True, wsdl_loc=None):
"""Constructs session object.
:param server_ip: IP address of ESX/VC server
:param server_username: Username of ESX/VC server admin user
:param server_password: Password for param server_username
:param api_retry_count: Number of times an API must be retried upon
session/connection related errors
:param task_poll_interval: Sleep time in seconds for polling an
on-going async task as part of the API call
:param scheme: http or https protocol
:param create_session: Boolean whether to set up connection at the
time of instance creation
:param wsdl_loc: WSDL file location for invoking SOAP calls on server
using suds
"""
self._server_ip = server_ip
self._server_username = server_username
self._server_password = server_password
self._wsdl_loc = wsdl_loc
self._api_retry_count = api_retry_count
self._task_poll_interval = task_poll_interval
self._scheme = scheme
self._session_id = None
self._vim = None
if create_session:
self.create_session()
@property
def vim(self):
if not self._vim:
self._vim = vim.Vim(protocol=self._scheme, host=self._server_ip,
wsdl_loc=self._wsdl_loc)
return self._vim
def create_session(self):
"""Establish session with the server."""
# Login and setup the session with the server for making
# API calls
session_manager = self.vim.service_content.sessionManager
session = self.vim.Login(session_manager,
userName=self._server_username,
password=self._server_password)
# Terminate the earlier session, if possible (For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have)
if self._session_id:
try:
self.vim.TerminateSession(session_manager,
sessionId=[self._session_id])
except Exception as excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.exception(_("Error while terminating session: %s.") %
excep)
self._session_id = session.key
LOG.info(_("Successfully established connection to the server."))
def __del__(self):
"""Logs-out the session."""
try:
self.vim.Logout(self.vim.service_content.sessionManager)
except Exception as excep:
LOG.exception(_("Error while logging out the user: %s.") %
excep)
def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs.
Here we retry the API calls for exceptions which may come because
of session overload.
Make sure if a Vim instance is being passed here, this session's
Vim (self.vim) instance is used, as we retry establishing session
in case of session timedout.
:param module: Module invoking the VI SDK calls
:param method: Method in the module that invokes the VI SDK call
:param args: Arguments to the method
:param kwargs: Keyword arguments to the method
:return: Response of the API call
"""
@Retry(max_retry_count=self._api_retry_count,
exceptions=(error_util.VimException))
def _invoke_api(module, method, *args, **kwargs):
last_fault_list = []
while True:
try:
api_method = getattr(module, method)
return api_method(*args, **kwargs)
except error_util.VimFaultException as excep:
if error_util.NOT_AUTHENTICATED not in excep.fault_list:
raise excep
# If it is a not-authenticated fault, we re-authenticate
# the user and retry the API invocation.
# Because of the idle session returning an empty
# RetrieveProperties response and also the same is
# returned when there is an empty answer to a query
# (e.g. no VMs on the host), we have no way to
# differentiate.
# So if the previous response was also an empty
# response and after creating a new session, we get
# the same empty response, then we are sure of the
# response being an empty response.
if error_util.NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
LOG.exception(_("Not authenticated error occurred. "
"Will create session and try "
"API call again: %s.") % excep)
self.create_session()
return _invoke_api(module, method, *args, **kwargs)
def wait_for_task(self, task):
"""Return a deferred that will give the result of the given task.
The task is polled until it completes. The method returns the task
information upon successful completion.
:param task: Managed object reference of the task
:return: Task info upon successful completion of the task
"""
done = event.Event()
loop = loopingcall.FixedIntervalLoopingCall(self._poll_task,
task, done)
loop.start(self._task_poll_interval)
task_info = done.wait()
loop.stop()
return task_info
def _poll_task(self, task, done):
"""Poll the given task.
If the task completes successfully then returns task info.
In case of error sends back appropriate error.
:param task: Managed object reference of the task
:param event: Event that captures task status
"""
try:
task_info = self.invoke_api(vim_util, 'get_object_property',
self.vim, task, 'info')
if task_info.state in ['queued', 'running']:
# If task already completed on server, it will not return
# the progress.
if hasattr(task_info, 'progress'):
LOG.debug(_("Task: %(task)s progress: %(prog)s.") %
{'task': task, 'prog': task_info.progress})
return
elif task_info.state == 'success':
LOG.debug(_("Task %s status: success.") % task)
done.send(task_info)
else:
error_msg = str(task_info.error.localizedMessage)
LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
{'task': task, 'err': error_msg})
done.send_exception(error_util.VimFaultException([],
error_msg))
except Exception as excep:
LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
{'task': task, 'err': excep})
done.send_exception(excep)
|
|
from keras.layers.core import Reshape, Lambda
from keras.layers import TimeDistributed
from keras.models import Sequential,load_model,Model
from keras.layers import Dense, MaxPooling2D, MaxPooling3D, Input, merge, UpSampling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping,ModelCheckpoint,TensorBoard
from keras.layers.convolutional import Convolution2D, Convolution3D
from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D
from keras.optimizers import SGD , Adam, RMSprop
from keras.callbacks import Callback
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.layers.noise import GaussianNoise
def create_model(name):
if name=="model3" or name == "model3_mean" or name == "model3_nomean2":
return create_model_3()
if name=="model6":
return create_model_6()
if name=="model7":
return create_model_6()
if name=="model8":
return create_model_8()
if name=="model3_noise" or name == "model3_noise_comb" or name == "model3_noise_comb2" or name == "model3_noise_comb_dsb" or name == "model3_noise_comb3":
return create_model_3_noise()
if name == "model3_noise2":
return create_model_3_noise2()
def create_model_3_noise2():
inputs = Input((32, 32, 32, 1))
noise = GaussianNoise(sigma=0.02)(inputs)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(noise)
conv1 = SpatialDropout3D(0.4)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.4)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(128, init='normal')(x)
x = Dropout(0.5)(x)
x = Dense(64, init='normal')(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_3_noise():
inputs = Input((32, 32, 32, 1))
noise = GaussianNoise(sigma=0.05)(inputs)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(noise)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.000001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_8():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.2)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
conv1 = SpatialDropout3D(0.2)(conv1)
conv1 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.2)(conv2)
conv2 = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_7():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 5, 5, 5, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 5, 5, 5, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 5, 5, 5, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 5, 5, 5, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_6():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_5():
inputs = Input((32, 32, 32, 1))
noise = GaussianNoise(sigma=0.05)(inputs)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(noise)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_4():
inputs1 = Input((32, 32, 32, 1))
inputs2 = Input((6,))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs1)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = merge([x, inputs2], mode='concat')
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=[inputs1,inputs2], output=predictions)
model.summary()
optimizer = Adam()
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_3():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = SpatialDropout3D(0.1)(conv2)
conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)
x = Flatten()(pool2)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_2():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
x = Flatten()(pool1)
x = Dense(64, init='normal')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=1e-5)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
def create_model_1():
inputs = Input((32, 32, 32, 1))
#noise = GaussianNoise(sigma=0.1)(x)
conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = SpatialDropout3D(0.1)(conv1)
conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)
x = Flatten()(pool1)
x = Dense(64, init='normal')(x)
predictions = Dense(1, init='normal', activation='sigmoid')(x)
model = Model(input=inputs, output=predictions)
model.summary()
optimizer = Adam(lr=1e-5)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])
return model
|
|
#!/usr/bin/python3
# ftpClientApp.py
"""
Project : Network Programming Assignment Question
Filename : ftpClientApp.py
Author : Jeffrey Nursalim
Student No: TP031319
Module Code & Title: CE00731-M Network Systems and Technologies
Due Date : 09 December 2013
Tested with ftplib changeset 38db4d0726bd found at
http://hg.python.org/cpython/file/38db4d0726bd/Lib/ftplib.py
and pyftpdlib ver 1.3.0 released on 2013-11-07 found at
http://pyftpdlib.googlecode.com/files/pyftpdlib-1.3.0.tar.gz
The MIT License (MIT)
Copyright (c) 2014 Jeffrey Nursalim
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
VERSION = "1.0 RELEASE"
BLOCK_SIZE = 1024
LOWEST_PORT_NO = 1025
HIGHEST_PORT_NO = 65533
import os, tkinter, threading, sys, time, re, socket, ipaddress
from functools import partial
from tkinter import ttk, constants, filedialog
import tkinter.messagebox as mbox
import ftplib
from ftpGUI import RootTree, StdoutRedirector
class FTPClientApp(tkinter.Frame):
root_dir = dict()
root_dir_tree = dict()
dir_tree_frame = dict()
ftp_conn = ftplib.FTP()
def __init__(self, master=None):
tkinter.Frame.__init__(self, master)
self.grid(row=0, column=0)
# Main Frame
master.minsize(960,640)
self.local_ip_addr = socket.gethostbyname(socket.getfqdn())
master.title("FTP Client by TP031319 at %s" % self.local_ip_addr)
self.initialise()
self.create_control_frame(rw=0, cl=0)
self.create_state_frame(rw=1, cl=0)
self.create_input_frame(rw=0, cl=3)
self.create_remote_dir_button(rw=1, cl=3)
self.create_dir_frame(rw=3, cl=0)
self.create_dir_tree_frame(rw=7, cl=0, tit="Local")
self.create_push_file_button(rw=7, cl=1)
self.create_dir_tree_frame(rw=7, cl=3, tit="Remote")
self.create_pull_file_button(rw=7, cl=2)
self.create_connect_button(rw=0, cl=1)
self.create_disconnect_button(rw=0, cl=2)
self.create_browse_button(rw=4, cl=3)
self.create_share_button(rw=4, cl=4)
self.create_stdout_frame(rw=8, cl=0)
def initialise(self):
# Initial values
self.username = tkinter.StringVar()
self.username.set("user")
self.password = tkinter.StringVar()
self.password.set("passwd")
self.root_dir['Local'] = tkinter.StringVar()
self.root_dir['Local'].set(os.getcwd() + os.sep)
self.current_state = tkinter.StringVar()
self.current_state.set("DISCONNECTED")
self.listen_ip = tkinter.StringVar()
self.listen_ip.set("127.0.0.1")
self.listen_port = tkinter.StringVar()
self.listen_port.set(str(LOWEST_PORT_NO))
self.root_dir['Remote'] = tkinter.StringVar()
self.root_dir['Remote'].set(os.sep)
self.transferred_up_to_now = 0
self.filesize_in_transfer = 0
def create_control_frame(self, rw, cl):
# Control Frame
self.control_frame = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
self.control_frame.grid(row=rw, column=cl, columnspan=4, sticky=constants.W, pady=4, padx=5)
ttk.Label(self.control_frame, text="Client Control").grid(row=rw, column=cl)
def create_connect_button(self, rw, cl):
self.connect_button = ttk.Button(self.control_frame, text="Connect", command=self.connect)
self.connect_button.grid(row=rw, column=cl+1)
def create_disconnect_button(self, rw, cl):
self.disconnect_button = ttk.Button(self.control_frame, text="Disconnect",
state=['disabled'], command=partial(self.disconnect, self.root_dir_tree['Remote']))
self.disconnect_button.grid(row=rw, column=cl+2)
def create_input_frame(self, rw, cl):
self.input_frame = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
self.input_frame.grid(row=rw, column=cl, columnspan=3, sticky=constants.W, pady=4, padx=5)
ttk.Label(self.input_frame, text="Server Address").grid(row=rw, column=cl,
sticky=constants.W)
self.listen_ip_input = ttk.Entry(self.input_frame, width=40, textvariable=self.listen_ip)
self.listen_ip_input.grid(row=rw+1, column=cl)
ttk.Label(self.input_frame, text="Server Port").grid(row=rw, column=cl+1,
sticky=constants.W)
self.listen_port_input = ttk.Entry(self.input_frame, width=8, textvariable=self.listen_port)
self.listen_port_input.grid(row=rw+1, column=cl+1)
def create_state_frame(self, rw, cl):
# State Frame
state_frame = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
state_frame.grid(row=rw, column=cl, columnspan=3, sticky=constants.W, pady=4, padx=5)
ttk.Label(state_frame, text="FTP Connection State").grid(row=rw, column=cl)
state_value = ttk.Label(state_frame, textvariable=self.current_state, foreground='blue')
state_value.grid(row=rw, column=cl+1)
def create_browse_button(self, rw, cl):
self.browse_button = ttk.Button(self.dir_frame, text="Browse",
command=partial(self.select_dir, self.root_dir_tree['Local']))
self.browse_button.grid(row=rw, column=cl)
def create_share_button(self, rw, cl):
self.share_button = ttk.Button(self.dir_frame, text="Refresh Local",
command=partial(self.share_dir, self.root_dir_tree['Local']))
self.share_button.grid(row=rw, column=cl)
def create_dir_frame(self, rw, cl):
self.dir_frame = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
self.dir_frame.grid(row=rw, column=cl, columnspan=2, sticky=constants.W, pady=4, padx=5)
ttk.Label(self.dir_frame, text="Local Directory").grid(row=rw, column=cl,
sticky=constants.W)
self.root_dir_input = ttk.Entry(self.dir_frame, width=64,
textvariable=self.root_dir['Local'])
self.root_dir_input.grid(row=rw+1, column=cl)
def create_push_file_button(self, rw, cl):
self.push_file_button = ttk.Button(self, text="Upload >>", state=['disabled'],
command=self.push_file)
self.push_file_button.grid(row=rw, column=cl)
def create_pull_file_button(self, rw, cl):
self.pull_file_button = ttk.Button(self, text="Download <<", state=['disabled'],
command=self.pull_file)
self.pull_file_button.grid(row=rw, column=cl)
def list_remote_dir(self):
assert isinstance(self.root_dir_tree['Remote'], RootTree)
self.reconnect()
self.root_dir['Remote'].set(self.ftp_conn.pwd())
self.root_dir_tree['Remote'].root_directory = self.root_dir['Remote']
self.root_dir_tree['Remote'].populate_parent()
def connect(self):
user = self.username.get()
pswd = self.password.get()
port_no = 0
msg = "Please type a valid IP and a port number between 1025 and 65533 inclusive."
try:
host = self.listen_ip.get()
port_no = int(self.listen_port.get())
if port_no < LOWEST_PORT_NO or port_no > HIGHEST_PORT_NO:
msg += " Port {0} is not valid.".format(port_no)
raise Exception(msg)
msg += " IPv4 and IPv6 are accepted, but {0} is not valid.".format(host)
ipaddress.ip_address(host) # throw Exception for invalid IP
except:
mbox.showinfo(message=msg)
return
try:
self.ftp_conn = ftplib.FTP(user=user, passwd=pswd)
print("FTP instance created")
self.ftp_conn.connect(host=host, port=port_no)
print((host, port_no))
self.ftp_conn.login(user, pswd)
print("Logged in")
except:
mbox.showinfo(message="Connecting failed, please check IP and port.")
return
self.share_dir(self.root_dir_tree['Local'])
self.list_remote_dir()
self.connect_button.state(['disabled'])
self.disconnect_button.state(['!disabled'])
self.remote_dir_button.state(['!disabled'])
self.push_file_button.state(['!disabled'])
self.pull_file_button.state(['!disabled'])
self.current_state.set("CONNECTED!")
def disconnect(self, dir_tree_view):
try:
self.ftp_conn.quit()
except:
print("Already disconnected")
finally:
self.connect_button.state(['!disabled'])
self.disconnect_button.state(['disabled'])
self.remote_dir_button.state(['disabled'])
self.push_file_button.state(['disabled'])
self.pull_file_button.state(['disabled'])
self.current_state.set("DISCONNECTED")
if isinstance(dir_tree_view, RootTree):
children = dir_tree_view.get_children('')
if children:
dir_tree_view.delete(children)
def reconnect(self):
user = self.username.get()
pswd = self.password.get()
host = self.listen_ip.get()
port = int(self.listen_port.get())
# print(self.root_dir_tree['Remote'].ftp_conn.sock) # TO ANSWER: why the sock go missing?!
if not self.root_dir_tree['Remote'].ftp_conn.sock:
self.root_dir_tree['Remote'].ftp_conn.connect(host=host, port=port)
# print("Reconnected!")
self.root_dir_tree['Remote'].ftp_conn.login(user=user, passwd=pswd)
# print("Reloggedin!")
def create_remote_dir_button(self, rw, cl):
self.remote_dir_button = ttk.Button(self, text="Refresh Remote", state=['disabled'],
command=self.list_remote_dir)
self.remote_dir_button.grid(row=rw, column=cl, sticky=constants.W, pady=4, padx=5)
def share_dir(self, dir_tree_view):
if isinstance(dir_tree_view, RootTree):
try:
os.chdir(self.root_dir['Local'].get())
dir_tree_view.root_directory = self.root_dir['Local']
if dir_tree_view.ftp_conn:
dir_tree_view.ftp_conn.reconnect()
dir_tree_view.ftp_conn.cwd(self.root_dir['Local'].get())
dir_tree_view.populate_parent()
except FileNotFoundError:
mbox.showinfo(message="Invalid Directory!")
def select_dir(self, dir_tree_view):
if isinstance(dir_tree_view, RootTree):
children = dir_tree_view.get_children('')
if children:
dir_tree_view.delete(children)
old_dir_tree_view_root_dir = dir_tree_view.root_directory.get()
dir_tree_view.root_directory.set(filedialog.askdirectory().replace("/" , str(os.sep)))
if not dir_tree_view.root_directory.get():
dir_tree_view.root_directory.set(old_dir_tree_view_root_dir)
def progress_counter(self, buf):
# Do nothing to the buf being transferred
self.transferred_up_to_now += self.block_size
print("{0:.2f}% completed".format(
min(self.transferred_up_to_now/self.filesize_in_transfer*100.00, 100.00)))
def upload_file(self, filename, outfile=None):
self.block_size = BLOCK_SIZE
start_time = 0
if not outfile:
outfile = sys.stdout
self.reconnect()
print("Uploading {0}".format(filename))
filename_without_path = os.path.split(filename)[1]
if re.search('\.txt$', filename):
self.ftp_conn.storlines("STOR " + filename_without_path, open(filename),
self.progress_counter)
# default mode for open() is "rt" so not mentioned
else:
start_time = time.time()
self.ftp_conn.storbinary("STOR " + filename_without_path, open(filename, "rb"),
BLOCK_SIZE, self.progress_counter)
print("{0} bytes uploaded in {1:.2f} seconds".format(self.filesize_in_transfer,
(time.time() - start_time)))
self.transferred_up_to_now = 0
def download_file(self, filename, outfile=None):
self.block_size = 8*BLOCK_SIZE # It's possible to download 8* upload speed
start_time = 0
if not outfile:
outfile = sys.stdout
else:
# TODO: Check if file already exists and append number?/alert user?
pass
self.reconnect()
print("Downloading {0}".format(filename))
if re.search('\.txt$', filename):
outfile = open(filename, 'w')
self.ftp_conn.retrlines("RETR " + filename,
lambda s, w=outfile.write: w(s+"\r\n"))
else:
outfile = open(filename, 'wb')
def write_and_count(buf):
outfile.write(buf)
self.progress_counter(buf)
start_time = time.time()
self.ftp_conn.retrbinary("RETR " + filename, write_and_count, self.block_size)
outfile.close()
print("{0} bytes downloaded in {1:.2f} seconds".format(self.filesize_in_transfer,
(time.time() - start_time)))
self.transferred_up_to_now = 0
def push_file(self):
files = self.root_dir_tree['Local'].selection()
for fileinfo in files:
file_details = self.root_dir_tree['Local'].item(fileinfo, 'values')
self.filesize_in_transfer = int(file_details[2])
self.upload_file(file_details[0])
self.filesize_in_transfer = 0
def pull_file(self):
files = self.root_dir_tree['Remote'].selection()
for fileinfo in files:
file_details = self.root_dir_tree['Remote'].item(fileinfo, 'values')
self.filesize_in_transfer = int(file_details[2])
self.download_file(file_details[0])
self.filesize_in_transfer = 0
def create_dir_tree_frame(self, rw, cl, tit):
self.dir_tree_frame[tit] = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
self.root_dir_tree[tit] = RootTree(self, columns=('fullpath','type','size'),
displaycolumns='size', root_dir=self.root_dir[tit],
conn=self.ftp_conn if tit=='Remote' else None)
self.root_dir_tree[tit].heading('#0', text='Directory', anchor=constants.W)
self.root_dir_tree[tit].heading('size', text='Size', anchor=constants.W)
self.root_dir_tree[tit].column('#0', stretch=0, minwidth=120, width=280)
self.root_dir_tree[tit].column('size', stretch=1, minwidth=40, width=80)
self.dir_tree_frame[tit].grid(row=rw, column=cl, sticky=constants.W, pady=4, padx=5)
ttk.Label(self.dir_tree_frame[tit], text=tit).grid(row=rw, column=cl, sticky=constants.W)
self.root_dir_tree[tit].grid(in_=self.dir_tree_frame[tit], row=rw+1, column=cl,
sticky=constants.NSEW)
yScrollBar = ttk.Scrollbar(self.dir_tree_frame[tit], orient=constants.VERTICAL,
command=self.root_dir_tree[tit].yview)
xScrollBar = ttk.Scrollbar(self.dir_tree_frame[tit], orient=constants.HORIZONTAL,
command=self.root_dir_tree[tit].xview)
self.root_dir_tree[tit]['yscroll'] = yScrollBar.set
self.root_dir_tree[tit]['xscroll'] = xScrollBar.set
yScrollBar.grid(row=rw, column=cl+2, rowspan=3, sticky=constants.NS)
xScrollBar.grid(row=rw+3, column=cl, rowspan=1, sticky=constants.EW)
# set frame resizing priorities
self.dir_tree_frame[tit].rowconfigure(0, weight=1)
self.dir_tree_frame[tit].columnconfigure(0, weight=1)
# Enable this frame later
def create_stdout_frame(self, rw, cl):
self.stdout_frame = ttk.Frame(self, relief=constants.SOLID, borderwidth=1)
self.stdout_frame.grid(row=rw, column=cl)
self.old_stdout = sys.stdout
self.text = tkinter.Text(self, width=64, height=12, wrap='none')
self.text.grid(row=rw+1, column=cl, pady=4, padx=5)
sys.stdout = StdoutRedirector(self.text)
if __name__ == '__main__':
root = tkinter.Tk()
app = FTPClientApp(master=root)
app.mainloop()
try:
app.ftp_conn.disconnect(self.root_dir_tree['Local'])
except:
pass
sys.stdout = app.old_stdout
|
|
#Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__=''' $Id$ '''
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = apply(Canvas,(fn,),kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print 'myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in xrange(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in xrange(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=map(str.strip,text[0:i0].split('\n'))
L=map(str.strip,text[i0:i1].split('\n'))
POSTAMBLE=map(str.strip,text[i1:].split('\n'))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print "open(%s,'r').read() took %.4f seconds" %(fn,t1-t0)
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print "Parsing into memory took %.4f seconds" %(t3-t1)
del L
t4 = time()
print "Deleting list of lines took %.4f seconds" %(t4-t3)
for i in xrange(len(E)):
apply(E[i][0],E[i][1:])
t5 = time()
print "Moving into platypus took %.4f seconds" %(t5-t4)
del E
t6 = time()
print "Deleting list of actions took %.4f seconds" %(t6-t5)
go()
t7 = time()
print "saving to PDF took %.4f seconds" %(t7-t6)
print "Total run took %.4f seconds"%(t7-t0)
import md5
print 'file digest: %s' % md5.md5(open('dodyssey.pdf','rb').read()).hexdigest()
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
|
|
from __future__ import division
from sympy import (Symbol, Wild, sin, cos, exp, sqrt, pi, Function, Derivative,
abc, Integer, Eq, symbols, Add, I, Float, log, Rational, Lambda, atan2,
cse, cot, tan, S, Tuple, Basic, Dict, Piecewise, oo, Mul,
factor, nsimplify, zoo)
from sympy.core.basic import _aresame
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y
def test_subs():
n3 = Rational(3)
e = x
e = e.subs(x, n3)
assert e == Rational(3)
e = 2*x
assert e == 2*x
e = e.subs(x, n3)
assert e == Rational(6)
def test_trigonometric():
n3 = Rational(3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(x, n3)
assert e == 2*cos(n3)*sin(n3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(sin(x), cos(x))
assert e == 2*cos(x)**2
assert exp(pi).subs(exp, sin) == 0
assert cos(exp(pi)).subs(exp, sin) == 1
i = Symbol('i', integer=True)
zoo = S.ComplexInfinity
assert tan(x).subs(x, pi/2) is zoo
assert cot(x).subs(x, pi) is zoo
assert cot(i*x).subs(x, pi) is zoo
assert tan(i*x).subs(x, pi/2) == tan(i*pi/2)
assert tan(i*x).subs(x, pi/2).subs(i, 1) is zoo
o = Symbol('o', odd=True)
assert tan(o*x).subs(x, pi/2) == tan(o*pi/2)
def test_powers():
assert sqrt(1 - sqrt(x)).subs(x, 4) == I
assert (sqrt(1 - x**2)**3).subs(x, 2) == - 3*I*sqrt(3)
assert (x**Rational(1, 3)).subs(x, 27) == 3
assert (x**Rational(1, 3)).subs(x, -27) == 3*(-1)**Rational(1, 3)
assert ((-x)**Rational(1, 3)).subs(x, 27) == 3*(-1)**Rational(1, 3)
n = Symbol('n', negative=True)
assert (x**n).subs(x, 0) is S.ComplexInfinity
assert exp(-1).subs(S.Exp1, 0) is S.ComplexInfinity
assert (x**(4.0*y)).subs(x**(2.0*y), n) == n**2.0
def test_logexppow(): # no eval()
x = Symbol('x', real=True)
w = Symbol('w')
e = (3**(1 + x) + 2**(1 + x))/(3**x + 2**x)
assert e.subs(2**x, w) != e
assert e.subs(exp(x*log(Rational(2))), w) != e
def test_bug():
x1 = Symbol('x1')
x2 = Symbol('x2')
y = x1*x2
assert y.subs(x1, Float(3.0)) == Float(3.0)*x2
def test_subbug1():
# see that they don't fail
(x**x).subs(x, 1)
(x**x).subs(x, 1.0)
def test_subbug2():
# Ensure this does not cause infinite recursion
assert Float(7.7).epsilon_eq(abs(x).subs(x, -7.7))
def test_dict_set():
a, b, c = map(Wild, 'abc')
f = 3*cos(4*x)
r = f.match(a*cos(b*x))
assert r == {a: 3, b: 4}
e = a/b*sin(b*x)
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
s = set(r.items())
assert e.subs(s) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(s) == 3*sin(4*x) / 4
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
assert x.subs(Dict((x, 1))) == 1
def test_dict_ambigous(): # see issue 3566
y = Symbol('y')
z = Symbol('z')
f = x*exp(x)
g = z*exp(z)
df = {x: y, exp(x): y}
dg = {z: y, exp(z): y}
assert f.subs(df) == y**2
assert g.subs(dg) == y**2
# and this is how order can affect the result
assert f.subs(x, y).subs(exp(x), y) == y*exp(y)
assert f.subs(exp(x), y).subs(x, y) == y**2
# length of args and count_ops are the same so
# default_sort_key resolves ordering...if one
# doesn't want this result then an unordered
# sequence should not be used.
e = 1 + x*y
assert e.subs({x: y, y: 2}) == 5
# here, there are no obviously clashing keys or values
# but the results depend on the order
assert exp(x/2 + y).subs(dict([(exp(y + 1), 2), (x, 2)])) == exp(y + 1)
def test_deriv_sub_bug3():
y = Symbol('y')
f = Function('f')
pat = Derivative(f(x), x, x)
assert pat.subs(y, y**2) == Derivative(f(x), x, x)
assert pat.subs(y, y**2) != Derivative(f(x), x)
def test_equality_subs1():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, x)
res = Eq(Integer(16), x)
assert eq.subs(f(x), 4) == res
def test_equality_subs2():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, 16)
assert bool(eq.subs(f(x), 3)) is False
assert bool(eq.subs(f(x), 4)) is True
def test_issue_3742():
y = Symbol('y')
e = sqrt(x)*exp(y)
assert e.subs(sqrt(x), 1) == exp(y)
def test_subs_dict1():
x, y = symbols('x y')
assert (1 + x*y).subs(x, pi) == 1 + pi*y
assert (1 + x*y).subs({x: pi, y: 2}) == 1 + 2*pi
c2, c3, q1p, q2p, c1, s1, s2, s3 = symbols('c2 c3 q1p q2p c1 s1 s2 s3')
test = (c2**2*q2p*c3 + c1**2*s2**2*q2p*c3 + s1**2*s2**2*q2p*c3
- c1**2*q1p*c2*s3 - s1**2*q1p*c2*s3)
assert (test.subs({c1**2: 1 - s1**2, c2**2: 1 - s2**2, c3**3: 1 - s3**2})
== c3*q2p*(1 - s2**2) + c3*q2p*s2**2*(1 - s1**2)
- c2*q1p*s3*(1 - s1**2) + c3*q2p*s1**2*s2**2 - c2*q1p*s3*s1**2)
def test_mul():
x, y, z, a, b, c = symbols('x y z a b c')
A, B, C = symbols('A B C', commutative=0)
assert (x*y*z).subs(z*x, y) == y**2
assert (z*x).subs(1/x, z) == z*x
assert (x*y/z).subs(1/z, a) == a*x*y
assert (x*y/z).subs(x/z, a) == a*y
assert (x*y/z).subs(y/z, a) == a*x
assert (x*y/z).subs(x/z, 1/a) == y/a
assert (x*y/z).subs(x, 1/a) == y/(z*a)
assert (2*x*y).subs(5*x*y, z) != 2*z/5
assert (x*y*A).subs(x*y, a) == a*A
assert (x**2*y**(3*x/2)).subs(x*y**(x/2), 2) == 4*y**(x/2)
assert (x*exp(x*2)).subs(x*exp(x), 2) == 2*exp(x)
assert ((x**(2*y))**3).subs(x**y, 2) == 64
assert (x*A*B).subs(x*A, y) == y*B
assert (x*y*(1 + x)*(1 + x*y)).subs(x*y, 2) == 6*(1 + x)
assert ((1 + A*B)*A*B).subs(A*B, x*A*B)
assert (x*a/z).subs(x/z, A) == a*A
assert (x**3*A).subs(x**2*A, a) == a*x
assert (x**2*A*B).subs(x**2*B, a) == a*A
assert (x**2*A*B).subs(x**2*A, a) == a*B
assert (b*A**3/(a**3*c**3)).subs(a**4*c**3*A**3/b**4, z) == \
b*A**3/(a**3*c**3)
assert (6*x).subs(2*x, y) == 3*y
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (A**2*B*A**2*B*A**2).subs(A*B*A, C) == A*C**2*A
assert (x*A**3).subs(x*A, y) == y*A**2
assert (x**2*A**3).subs(x*A, y) == y**2*A
assert (x*A**3).subs(x*A, B) == B*A**2
assert (x*A*B*A*exp(x*A*B)).subs(x*A, B) == B**2*A*exp(B*B)
assert (x**2*A*B*A*exp(x*A*B)).subs(x*A, B) == B**3*exp(B**2)
assert (x**3*A*exp(x*A*B)*A*exp(x*A*B)).subs(x*A, B) == \
x*B*exp(B**2)*B*exp(B**2)
assert (x*A*B*C*A*B).subs(x*A*B, C) == C**2*A*B
assert (-I*a*b).subs(a*b, 2) == -2*I
# issue 6361
assert (-8*I*a).subs(-2*a, 1) == 4*I
assert (-I*a).subs(-a, 1) == I
# issue 6441
assert (4*x**2).subs(2*x, y) == y**2
assert (2*4*x**2).subs(2*x, y) == 2*y**2
assert (-x**3/9).subs(-x/3, z) == -z**2*x
assert (-x**3/9).subs(x/3, z) == -z**2*x
assert (-2*x**3/9).subs(x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-2*x, z) == z*x**2/9
assert (-2*x**3/9).subs(2*x, z) == -z*x**2/9
assert (2*(3*x/5/7)**2).subs(3*x/5, z) == 2*(S(1)/7)**2*z**2
assert (4*x).subs(-2*x, z) == 4*x # try keep subs literal
def test_subs_simple():
a = symbols('a', commutative=True)
x = symbols('x', commutative=False)
assert (2*a).subs(1, 3) == 2*a
assert (2*a).subs(2, 3) == 3*a
assert (2*a).subs(a, 3) == 6
assert sin(2).subs(1, 3) == sin(2)
assert sin(2).subs(2, 3) == sin(3)
assert sin(a).subs(a, 3) == sin(3)
assert (2*x).subs(1, 3) == 2*x
assert (2*x).subs(2, 3) == 3*x
assert (2*x).subs(x, 3) == 6
assert sin(x).subs(x, 3) == sin(3)
def test_subs_constants():
a, b = symbols('a b', commutative=True)
x, y = symbols('x y', commutative=False)
assert (a*b).subs(2*a, 1) == a*b
assert (1.5*a*b).subs(a, 1) == 1.5*b
assert (2*a*b).subs(2*a, 1) == b
assert (2*a*b).subs(4*a, 1) == 2*a*b
assert (x*y).subs(2*x, 1) == x*y
assert (1.5*x*y).subs(x, 1) == 1.5*y
assert (2*x*y).subs(2*x, 1) == y
assert (2*x*y).subs(4*x, 1) == 2*x*y
def test_subs_commutative():
a, b, c, d, K = symbols('a b c d K', commutative=True)
assert (a*b).subs(a*b, K) == K
assert (a*b*a*b).subs(a*b, K) == K**2
assert (a*a*b*b).subs(a*b, K) == K**2
assert (a*b*c*d).subs(a*b*c, K) == d*K
assert (a*b**c).subs(a, K) == K*b**c
assert (a*b**c).subs(b, K) == a*K**c
assert (a*b**c).subs(c, K) == a*b**K
assert (a*b*c*b*a).subs(a*b, K) == c*K**2
assert (a**3*b**2*a).subs(a*b, K) == a**2*K**2
def test_subs_noncommutative():
w, x, y, z, L = symbols('w x y z L', commutative=False)
assert (x*y).subs(x*y, L) == L
assert (w*y*x).subs(x*y, L) == w*y*x
assert (w*x*y*z).subs(x*y, L) == w*L*z
assert (x*y*x*y).subs(x*y, L) == L**2
assert (x*x*y).subs(x*y, L) == x*L
assert (x*x*y*y).subs(x*y, L) == x*L*y
assert (w*x*y).subs(x*y*z, L) == w*x*y
assert (x*y**z).subs(x, L) == L*y**z
assert (x*y**z).subs(y, L) == x*L**z
assert (x*y**z).subs(z, L) == x*y**L
assert (w*x*y*z*x*y).subs(x*y*z, L) == w*L*x*y
assert (w*x*y*y*w*x*x*y*x*y*y*x*y).subs(x*y, L) == w*L*y*w*x*L**2*y*L
def test_subs_basic_funcs():
a, b, c, d, K = symbols('a b c d K', commutative=True)
w, x, y, z, L = symbols('w x y z L', commutative=False)
assert (x + y).subs(x + y, L) == L
assert (x - y).subs(x - y, L) == L
assert (x/y).subs(x, L) == L/y
assert (x**y).subs(x, L) == L**y
assert (x**y).subs(y, L) == x**L
assert ((a - c)/b).subs(b, K) == (a - c)/K
assert (exp(x*y - z)).subs(x*y, L) == exp(L - z)
assert (a*exp(x*y - w*z) + b*exp(x*y + w*z)).subs(z, 0) == \
a*exp(x*y) + b*exp(x*y)
assert ((a - b)/(c*d - a*b)).subs(c*d - a*b, K) == (a - b)/K
assert (w*exp(a*b - c)*x*y/4).subs(x*y, L) == w*exp(a*b - c)*L/4
def test_subs_wild():
R, S, T, U = symbols('R S T U', cls=Wild)
assert (R*S).subs(R*S, T) == T
assert (S*R).subs(R*S, T) == T
assert (R + S).subs(R + S, T) == T
assert (R**S).subs(R, T) == T**S
assert (R**S).subs(S, T) == R**T
assert (R*S**T).subs(R, U) == U*S**T
assert (R*S**T).subs(S, U) == R*U**T
assert (R*S**T).subs(T, U) == R*S**U
def test_subs_mixed():
a, b, c, d, K = symbols('a b c d K', commutative=True)
w, x, y, z, L = symbols('w x y z L', commutative=False)
R, S, T, U = symbols('R S T U', cls=Wild)
assert (a*x*y).subs(x*y, L) == a*L
assert (a*b*x*y*x).subs(x*y, L) == a*b*L*x
assert (R*x*y*exp(x*y)).subs(x*y, L) == R*L*exp(L)
assert (a*x*y*y*x - x*y*z*exp(a*b)).subs(x*y, L) == a*L*y*x - L*z*exp(a*b)
e = c*y*x*y*x**(R*S - a*b) - T*(a*R*b*S)
assert e.subs(x*y, L).subs(a*b, K).subs(R*S, U) == \
c*y*L*x**(U - K) - T*(U*K)
def test_division():
a, b, c = symbols('a b c', commutative=True)
x, y, z = symbols('x y z', commutative=True)
assert (1/a).subs(a, c) == 1/c
assert (1/a**2).subs(a, c) == 1/c**2
assert (1/a**2).subs(a, -2) == Rational(1, 4)
assert (-(1/a**2)).subs(a, -2) == -Rational(1, 4)
assert (1/x).subs(x, z) == 1/z
assert (1/x**2).subs(x, z) == 1/z**2
assert (1/x**2).subs(x, -2) == Rational(1, 4)
assert (-(1/x**2)).subs(x, -2) == -Rational(1, 4)
#issue 5360
assert (1/x).subs(x, 0) == 1/S(0)
def test_add():
a, b, c, d, x, y, t = symbols('a b c d x y t')
assert (a**2 - b - c).subs(a**2 - b, d) in [d - c, a**2 - b - c]
assert (a**2 - c).subs(a**2 - c, d) == d
assert (a**2 - b - c).subs(a**2 - c, d) in [d - b, a**2 - b - c]
assert (a**2 - x - c).subs(a**2 - c, d) in [d - x, a**2 - x - c]
assert (a**2 - b - sqrt(a)).subs(a**2 - sqrt(a), c) == c - b
assert (a + b + exp(a + b)).subs(a + b, c) == c + exp(c)
assert (c + b + exp(c + b)).subs(c + b, a) == a + exp(a)
assert (a + b + c + d).subs(b + c, x) == a + d + x
assert (a + b + c + d).subs(-b - c, x) == a + d - x
assert ((x + 1)*y).subs(x + 1, t) == t*y
assert ((-x - 1)*y).subs(x + 1, t) == -t*y
assert ((x - 1)*y).subs(x + 1, t) == y*(t - 2)
assert ((-x + 1)*y).subs(x + 1, t) == y*(-t + 2)
# this should work everytime:
e = a**2 - b - c
assert e.subs(Add(*e.args[:2]), d) == d + e.args[2]
assert e.subs(a**2 - c, d) == d - b
# the fallback should recognize when a change has
# been made; while .1 == Rational(1, 10) they are not the same
# and the change should be made
assert (0.1 + a).subs(0.1, Rational(1, 10)) == Rational(1, 10) + a
e = (-x*(-y + 1) - y*(y - 1))
ans = (-x*(x) - y*(-x)).expand()
assert e.subs(-y + 1, x) == ans
def test_subs_issue910():
assert (I*Symbol('a')).subs(1, 2) == I*Symbol('a')
def test_functions_subs():
x, y = symbols('x y')
f, g = symbols('f g', cls=Function)
l = Lambda((x, y), sin(x) + y)
assert (g(y, x) + cos(x)).subs(g, l) == sin(y) + x + cos(x)
assert (f(x)**2).subs(f, sin) == sin(x)**2
assert (f(x, y)).subs(f, log) == log(x, y)
assert (f(x, y)).subs(f, sin) == f(x, y)
assert (sin(x) + atan2(x, y)).subs([[atan2, f], [sin, g]]) == \
f(x, y) + g(x)
assert (g(f(x + y, x))).subs([[f, l], [g, exp]]) == exp(x + sin(x + y))
def test_derivative_subs():
y = Symbol('y')
f = Function('f')
assert Derivative(f(x), x).subs(f(x), y) != 0
assert Derivative(f(x), x).subs(f(x), y).subs(y, f(x)) == \
Derivative(f(x), x)
# issues 1986, 1938
assert cse(Derivative(f(x), x) + f(x))[1][0].has(Derivative)
assert cse(Derivative(f(x, y), x) +
Derivative(f(x, y), y))[1][0].has(Derivative)
def test_derivative_subs2():
x, y, z = symbols('x y z')
f, g = symbols('f g', cls=Function)
assert Derivative(f, x, y).subs(Derivative(f, x, y), g) == g
assert Derivative(f, y, x).subs(Derivative(f, x, y), g) == g
assert Derivative(f, x, y).subs(Derivative(f, x), g) == Derivative(g, y)
assert Derivative(f, x, y).subs(Derivative(f, y), g) == Derivative(g, x)
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), x, z), g) == Derivative(g, y))
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), z, y), g) == Derivative(g, x))
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), z, y, x), g) == g)
def test_derivative_subs3():
x = Symbol('x')
dex = Derivative(exp(x), x)
assert Derivative(dex, x).subs(dex, exp(x)) == dex
assert dex.subs(exp(x), dex) == Derivative(exp(x), x, x)
def test_issue_5284():
A, B = symbols('A B', commutative=False)
assert (x*A).subs(x**2*A, B) == x*A
assert (A**2).subs(A**3, B) == A**2
assert (A**6).subs(A**3, B) == B**2
def test_subs_iter():
assert x.subs(reversed([[x, y]])) == y
it = iter([[x, y]])
assert x.subs(it) == y
assert x.subs(Tuple((x, y))) == y
def test_subs_dict():
a, b, c, d, e = symbols('a b c d e')
z = symbols('z')
assert (2*x + y + z).subs(dict(x=1, y=2)) == 4 + z
l = [(sin(x), 2), (x, 1)]
assert (sin(x)).subs(l) == \
(sin(x)).subs(dict(l)) == 2
assert sin(x).subs(reversed(l)) == sin(1)
expr = sin(2*x) + sqrt(sin(2*x))*cos(2*x)*sin(exp(x)*x)
reps = dict([
(sin(2*x), c),
(sqrt(sin(2*x)), a),
(cos(2*x), b),
(exp(x), e),
(x, d),
])
assert expr.subs(reps) == c + a*b*sin(d*e)
l = [(x, 3), (y, x**2)]
assert (x + y).subs(l) == 3 + x**2
assert (x + y).subs(reversed(l)) == 12
# If changes are made to convert lists into dictionaries and do
# a dictionary-lookup replacement, these tests will help to catch
# some logical errors that might occur
l = [(y, z + 2), (1 + z, 5), (z, 2)]
assert (y - 1 + 3*x).subs(l) == 5 + 3*x
l = [(y, z + 2), (z, 3)]
assert (y - 2).subs(l) == 3
def test_no_arith_subs_on_floats():
a, x, y = symbols('a x y')
assert (x + 3).subs(x + 3, a) == a
assert (x + 3).subs(x + 2, a) == a + 1
assert (x + y + 3).subs(x + 3, a) == a + y
assert (x + y + 3).subs(x + 2, a) == a + y + 1
assert (x + 3.0).subs(x + 3.0, a) == a
assert (x + 3.0).subs(x + 2.0, a) == x + 3.0
assert (x + y + 3.0).subs(x + 3.0, a) == a + y
assert (x + y + 3.0).subs(x + 2.0, a) == x + y + 3.0
def test_issue_5651():
a, b, c, K = symbols('a b c K', commutative=True)
x, y, z = symbols('x y z')
assert (a/(b*c)).subs(b*c, K) == a/K
assert (a/(b**2*c**3)).subs(b*c, K) == a/(c*K**2)
assert (1/(x*y)).subs(x*y, 2) == S.Half
assert ((1 + x*y)/(x*y)).subs(x*y, 1) == 2
assert (x*y*z).subs(x*y, 2) == 2*z
assert ((1 + x*y)/(x*y)/z).subs(x*y, 1) == 2/z
def test_issue_6075():
assert Tuple(1, True).subs(1, 2) == Tuple(2, True)
def test_issue_6079():
# since x + 2.0 == x + 2 we can't do a simple equality test
x = symbols('x')
assert _aresame((x + 2.0).subs(2, 3), x + 2.0)
assert _aresame((x + 2.0).subs(2.0, 3), x + 3)
assert not _aresame(x + 2, x + 2.0)
assert not _aresame(Basic(cos, 1), Basic(cos, 1.))
assert _aresame(cos, cos)
assert not _aresame(1, S(1))
assert not _aresame(x, symbols('x', positive=True))
def test_issue_4680():
N = Symbol('N')
assert N.subs(dict(N=3)) == 3
def test_issue_6158():
assert (x - 1).subs(1, y) == x - y
assert (x - 1).subs(-1, y) == x + y
assert (x - oo).subs(oo, y) == x - y
assert (x - oo).subs(-oo, y) == x + y
def test_Function_subs():
from sympy.abc import x, y
f, g, h, i = symbols('f g h i', cls=Function)
p = Piecewise((g(f(x, y)), x < -1), (g(x), x <= 1))
assert p.subs(g, h) == Piecewise((h(f(x, y)), x < -1), (h(x), x <= 1))
assert (f(y) + g(x)).subs({f: h, g: i}) == i(x) + h(y)
def test_simultaneous_subs():
reps = {x: 0, y: 0}
assert (x/y).subs(reps) != (y/x).subs(reps)
assert (x/y).subs(reps, simultaneous=True) == \
(y/x).subs(reps, simultaneous=True)
reps = reps.items()
assert (x/y).subs(reps) != (y/x).subs(reps)
assert (x/y).subs(reps, simultaneous=True) == \
(y/x).subs(reps, simultaneous=True)
def test_issue_6419_6421():
assert (1/(1 + x/y)).subs(x/y, x) == 1/(1 + x)
assert (-2*I).subs(2*I, x) == -x
assert (-I*x).subs(I*x, x) == -x
assert (-3*I*y**4).subs(3*I*y**2, x) == -x*y**2
def test_issue_6559():
assert (-12*x + y).subs(-x, 1) == 12 + y
# though this involves cse it generated a failure in Mul._eval_subs
x0, x1 = symbols('x0 x1')
e = -log(-12*sqrt(2) + 17)/24 - log(-2*sqrt(2) + 3)/12 + sqrt(2)/3
# XXX modify cse so x1 is eliminated and x0 = -sqrt(2)?
assert cse(e) == (
[(x0, sqrt(2))], [x0/3 - log(-12*x0 + 17)/24 - log(-2*x0 + 3)/12])
def test_issue_5261():
e = I*x
assert exp(e).subs(exp(x), y) == y**I
assert (2**e).subs(2**x, y) == y**I
eq = (-2)**e
assert eq.subs((-2)**x, y) == eq
def test_issue_6923():
assert (-2*x*sqrt(2)).subs(2*x, y) == -sqrt(2)*y
def test_2arg_hack():
N = Symbol('N', commutative=False)
ans = Mul(2, y + 1, evaluate=False)
assert (2*x*(y + 1)).subs(x, 1, hack2=True) == ans
assert (2*(y + 1 + N)).subs(N, 0, hack2=True) == ans
@XFAIL
def test_mul2():
"""When this fails, remove things labelled "2-arg hack"
1) remove special handling in the fallback of subs that
was added in the same commit as this test
2) remove the special handling in Mul.flatten
"""
assert (2*(x + 1)).is_Mul
def test_noncommutative_subs():
x,y = symbols('x,y', commutative=False)
assert (x*y*x).subs([(x,x*y),(y,x)],simultaneous=True) == (x*y*x**2*y)
def test_issue_2877():
f = Float(2.0)
assert (x + f).subs({f: 2}) == x + 2
def r(a,b,c):
return factor(a*x**2 + b*x + c)
e = r(5/6, 10, 5)
assert nsimplify(e) == 5*x**2/6 + 10*x + 5
def test_issue_5910():
t = Symbol('t')
assert (1/(1 - t)).subs(t, 1) == zoo
n = t
d = t - 1
assert (n/d).subs(t, 1) == zoo
assert (-n/-d).subs(t, 1) == zoo
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from xmlrpc.client import ServerProxy
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QPushButton
from electrum import bitcoin, util
from electrum import transaction
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
from electrum.wallet import Multisig_Wallet
from electrum.util import bh2u, bfh
from electrum_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
PORT = 12344
HOST = 'cosigner.electrum.org'
server = ServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.cosigner_receive_signal.emit(
keyhash, message)
# poll every 30 seconds
time.sleep(30)
class QReceiveSignalObject(QObject):
cosigner_receive_signal = pyqtSignal(object, object)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QReceiveSignalObject()
self.obj.cosigner_receive_signal.connect(self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1]
_hash = bh2u(bitcoin.Hash(K))
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(bfh(tx.raw), bh2u(K)).decode('ascii')
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.has_password():
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bh2u(bitcoin.deserialize_xprv(xprv)[-1])
EC = bitcoin.EC_KEY(bfh(k))
message = bh2u(EC.decrypt_message(message))
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
|
|
# Game Architecture Network Layer (ganet)
#
# Copyright (c) 2014 Roman Divotkey, Univ. of Applied Sciences Upper Austria.
# All rights reserved.
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE', which is part of this source code package.
#
# THIS CODE IS PROVIDED AS EDUCATIONAL MATERIAL AND NOT INTENDED TO ADDRESS
# ALL REAL WORLD PROBLEMS AND ISSUES IN DETAIL.
"""Provides a basic implementation of the protocol used by the Game Terminal.
This is the server side implementation of the protocol. This module uses
the network module for communication.
"""
import network, message
_num_clients = None
_session_listener = []
_session_started = False
_decoder = message.MessageDecoder()
_peers = {}
_connection_listeners = set()
_axis_listeners = set()
_button_listeners = set()
class Peer(object):
def __init__(self, addr, encoder):
self.addr = addr
self.encoder = encoder
def accept(self):
self.encoder.reset()
self.encoder.write_string('welcome')
network.send_message((self.encoder.get_message(), self.addr))
def refuse(self):
self.encoder.reset()
self.encoder.write_string('goaway')
network.send_message((self.encoder.get_message(), self.addr))
def disconnect(self, instant = False):
self.encoder.reset()
self.encoder.write_string('disconnect')
if instant:
network.fire_message((self.encoder.get_message(), self.addr))
else:
network.send_message((self.encoder.get_message(), self.addr))
def destroy_entity(self, entity_id):
self.encoder.reset()
self.encoder.write_string('destroy')
self.encoder.write_string(entity_id)
network.send_message((self.encoder.get_message(), self.addr))
def set_world_width(self, width):
self.encoder.reset()
self.encoder.write_string('worldwidth')
self.encoder.write_float(width)
network.send_message((self.encoder.get_message(), self.addr))
def spawn_entity(self, entity_id, x, y, angle, vertices):
self.encoder.reset()
self.encoder.write_string('spawn')
self.encoder.write_string(entity_id)
self.encoder.write_float(x)
self.encoder.write_float(y)
self.encoder.write_float(angle)
self.encoder.write_int(len(vertices))
for vertex in vertices:
self.encoder.write_float(vertex[0])
self.encoder.write_float(vertex[1])
network.send_message((self.encoder.get_message(), self.addr))
def move_entity(self, entity_id, x, y, angle):
self.encoder.reset()
self.encoder.write_string('move')
self.encoder.write_string(entity_id)
self.encoder.write_float(x)
self.encoder.write_float(y)
self.encoder.write_float(angle)
network.send_message((self.encoder.get_message(), self.addr))
def add_connection_listener(listener):
"""Adds the given connection listener.
connection listener must have the following signature:
listener(peer, connected)
- peer: the client that has connected or disconnected.
- connected: True if the client has connected, False
if the client has disconnected.
"""
_connection_listeners.add(listener)
def remove_connection_listener(listener):
"""Removes the specified connection listener."""
_connection_listeners.remove(listener)
def add_axis_listener(listener):
"""Adds the given axis listener.
Axis listener must have the following signature:
listener(peer, axis_number, axis_value)
- peer: the client that is the origin of the message
- axis_number: integer value >0 that identifies the axis.
- axis_value: float value between -1.0 and 1.0. Specified
the new position of the axis.
"""
_axis_listeners.add(listener)
def remove_axis_listener(listener):
"""Removes the specified axis listener."""
_axis_listeners.discard(listener)
def add_button_listener(listener):
"""Adds the given button listener.
button listener must have the following signature:
listener(peer, button_number, pressed)
- peer: the client that is the origin of the message
- button_number: integer value >0 that identifies the button.
- pressed: True if the button has been pressed, False if
the button has been released.
"""
_button_listeners.add(listener)
def remove_button_listener(listener):
"""Removes the specified button listener."""
_button_listeners.discard(listener)
def add_session_listener(listener):
"""Adds the given session listener.
Session listener must accept an boolean argument indicating if the
session has been started or stopped.
Example:
--------
def my_session_listener(started):
if started:
print 'session has been started'
else:
print 'session has been stopped'
xprotocol.add_session_listener(my_session_listener)
"""
_session_listener.append(listener)
def remove_session_listener(listener):
"""Removes the given session listener."""
_session_listener.remove(listener)
def set_world_width(width):
"""Sets the with of the game world to be displayed."""
for peer in _peers.itervalues():
peer.set_world_width(width)
def destroy_entity(entity_id):
"""Destroys the entity representation with the specified id."""
for peer in _peers.itervalues():
peer.destroy_entity(entity_id)
def spawn_entity(entity_id, x, y, angle, vertices):
"""Spans a new entity representation.
Required arguments are:
- id of the entity (typically a string),
- the position within the game world (x, y),
- the orientation (angle)
- a list of vertices (to be rendered as polygon).
"""
for peer in _peers.itervalues():
peer.spawn_entity(entity_id, x, y, angle, vertices)
def move_entity(entity_id, x, y, angle):
"""Moves the entity representation to the specified position."""
for peer in _peers.itervalues():
peer.move_entity(entity_id, x, y, angle)
def _start_session():
global _session_started
assert(not _session_started)
_session_started = True
for listener in _session_listener:
listener(True)
def _stop_session():
global _session_started
assert(_session_started)
_session_started = False
_session_started = False
for listener in _session_listener:
listener(False)
def disconnect():
"""Disconnects all client and terminates active session."""
for peer in _peers.itervalues():
peer.disconnect(True)
if _session_started:
_stop_session()
_peers.clear()
def startup(num_clients = 1, port = 12345):
"""Initializes the network and protocol."""
global _num_clients
assert not _num_clients, 'network already started'
network.startup(port = port)
_num_clients = num_clients
def shutdown():
"""Shuts the nerwork down and disconnects from clients."""
global _num_clients
assert _num_clients, 'network not started'
disconnect()
_num_clients = False
network.shutdown()
def _process(msg):
_decoder.reset(msg[0])
addr = msg[1]
# get peer and decode message id (mid)
peer = _peers.get(addr, None)
mid = _decoder.read_string()
if not peer:
if mid == 'connect':
peer = Peer(addr, _decoder.create_encoder())
if not _session_started:
_peers[addr] = peer
peer.accept()
else:
peer.refuse()
for listener in _connection_listeners:
listener(peer.addr, True)
else:
# ignore messages if peer is not connected
return
if mid == 'disconnect':
_peers.pop(addr)
for listener in _connection_listeners:
listener(peer.addr, False)
elif mid == 'axis':
axis = _decoder.read_int()
value = _decoder.read_float()
for listener in _axis_listeners:
listener(peer.addr, axis, value)
elif mid == 'button':
button = _decoder.read_int()
pressed = _decoder.read_bool()
for listener in _button_listeners:
listener(peer.addr, button, pressed)
# maintain session state
if _session_started and len(_peers) < _num_clients:
_stop_session()
elif not _session_started and len(_peers) >= _num_clients:
_start_session()
def update():
"""Call this method within your game loop."""
network.update()
messages = network.get_messages()
for msg in messages:
_process(msg)
# example usage
if __name__ == '__main__':
import time
num_players = 1
pos_x = 5
def run_idle():
pass
def run_demo():
global pos_x
move_entity('foo', pos_x, 0, 0)
pos_x *= -1
def on_session(started):
global worker
if started:
print 'session has started'
set_world_width(50)
vertices = []
vertices.append((-1, -1))
vertices.append((1, -1))
vertices.append((0, 1))
spawn_entity("foo", 0, 0, 0, vertices)
worker = run_demo
else:
print 'session has stopped'
print "wating for %d players to connect" % num_players
worker = run_idle
add_session_listener(on_session)
worker = run_idle
print "starting test game server"
print "wating for %d player(s) to connect" % num_players
startup(num_players)
try:
while True:
update()
worker()
time.sleep(0.5)
except KeyboardInterrupt:
print "shuting down test game server"
shutdown()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_05_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_05_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_05_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
|
import atexit
import glob
import logging
import os
import random
import re
import signal
import subprocess
import sys
import time
import irc.client
class Client(irc.client.SimpleIRCClient):
def __init__(self):
irc.client.SimpleIRCClient.__init__(self)
self.flags = {
'queued': False,
'finished': False,
'ident': None,
}
def on_nicknameinuse(self, connection, event):
connection.nick('{}{}'.format(connection.get_nickname(),
random.randint(0, 99))
)
def on_welcome(self, connection, event):
connection.join('#atbot-test')
def on_join(self, connection, event):
channel = event.target
nickname = event.source.nick
if nickname == 'atbot':
connection.privmsg(
channel,
'{}?{}'.format('!ao http://localhost:8866',
random.randint(0, 1000))
)
def on_part(self, connection, event):
channel = event.target
nickname = event.source.nick
def on_quit(self, connection, event):
nickname = event.source.nick
def on_kick(self, connection, event):
channel = event.target
nickname = self.get_nick_if_possible(event.source)
kicked_nickname = event.arguments[0]
def on_mode(self, connection, event):
channel = event.target
modes_str = ' '.join(event.arguments)
nickname = self.get_nick_if_possible(event.source)
def on_pubmsg(self, connection, event):
channel = event.target
if not irc.client.is_channel(channel):
return
text = event.arguments[0]
nickname = self.get_nick_if_possible(event.source)
if 'Queued' in text:
self.flags['queued'] = True
elif 'finished' in text:
self.flags['finished'] = True
elif '!status' in text:
match = re.search(r'!status ([a-z0-9]+)', text)
self.flags['ident'] = match.group(1)
def on_pubnotice(self, connection, event):
channel = event.target
if not irc.client.is_channel(channel):
return
text = event.arguments[0]
nickname = self.get_nick_if_possible(event.source)
def on_topic(self, connection, event):
channel = event.target
nickname = self.get_nick_if_possible(event.source)
text = event.arguments[0]
def on_nick(self, connection, event):
nickname = event.source.nick
text = event.arguments[0]
@classmethod
def get_nick_if_possible(cls, source):
try:
return source.nick
except AttributeError:
return source
def main():
logging.basicConfig(level=logging.DEBUG)
script_dir = os.path.dirname(__file__)
bot_script = os.path.join(script_dir, 'run_bot.sh')
firehose_script = os.path.join(script_dir, 'run_firehose.sh')
dashboard_script = os.path.join(script_dir, 'run_dashboard.sh')
pipeline_script = os.path.join(script_dir, 'run_pipeline.sh')
cogs_script = os.path.join(script_dir, 'run_cogs.sh')
irc_client = Client()
irc_client.connect('127.0.0.1', 6667, 'obsessive')
print('Wait to avoid reconnect flooding')
for dummy in range(100):
irc_client.reactor.process_once(timeout=0.1)
time.sleep(0.1)
print('.', end='')
sys.stdout.flush()
print()
bot_proc = subprocess.Popen([bot_script], preexec_fn=os.setpgrp)
firehose_proc = subprocess.Popen([firehose_script], preexec_fn=os.setpgrp)
dashboard_proc = subprocess.Popen([dashboard_script], preexec_fn=os.setpgrp)
pipeline_proc = subprocess.Popen([pipeline_script], preexec_fn=os.setpgrp)
cogs_proc = subprocess.Popen([cogs_script], preexec_fn=os.setpgrp)
web_proc = subprocess.Popen(
['python3.4', '-m', 'huhhttp', '--port', '8866'],
preexec_fn=os.setpgrp
)
all_procs = [bot_proc, firehose_proc, dashboard_proc, pipeline_proc, cogs_proc, web_proc]
@atexit.register
def cleanup():
for proc in all_procs:
print('Terminate', proc)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except OSError as error:
print(error)
time.sleep(1)
for proc in all_procs:
print('Kill', proc)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
except OSError as error:
print(error)
def check_alive():
bot_proc.poll()
dashboard_proc.poll()
pipeline_proc.poll()
web_proc.poll()
cogs_proc.poll()
assert bot_proc.returncode is None, bot_proc.returncode
assert firehose_proc.returncode is None, firehose_proc.returncode
assert dashboard_proc.returncode is None, dashboard_proc.returncode
assert pipeline_proc.returncode is None, pipeline_proc.returncode
assert web_proc.returncode is None, web_proc.returncode
assert cogs_proc.returncode is None, cogs_proc.returncode
time.sleep(2)
check_alive()
start_time = time.time()
while True:
irc_client.reactor.process_once(timeout=0.2)
time_now = time.time()
if time_now - start_time > 5 * 60:
break
if all(irc_client.flags.values()):
break
flags = irc_client.flags
short_ident = flags['ident'][:5]
flags['warc_dir'] = tuple(
glob.glob('/tmp/warc/*{}*.gz'.format(short_ident))
)
flags['rsync_dir'] = tuple(
glob.glob('/tmp/rsync/*{}*.json'.format(short_ident))
)
print('---FIN---')
print(flags)
if not all(flags.values()):
print('FAIL!')
sys.exit(42)
check_alive()
if __name__ == '__main__':
main()
|
|
"""Contains the core functionality that manages merging of assets.
"""
from __future__ import with_statement
import contextlib
try:
from urllib.request import Request as URLRequest, urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import Request as URLRequest, urlopen
from urllib2 import HTTPError
import logging
from io import open
from webassets.six.moves import filter
from .utils import cmp_debug_levels, StringIO
__all__ = ('FileHunk', 'MemoryHunk', 'merge', 'FilterTool',
'MoreThanOneFilterError', 'NoFilters')
# Log which is used to output low-level information about what the build does.
# This is setup such that it does not output just because the root level
# "webassets" logger is set to level DEBUG (for example via the commandline
# --verbose option). Instead, the messages are only shown when an environment
# variable is set.
# However, we might want to change this in the future. The CLI --verbose option
# could instead just set the level to NOTICE, for example.
log = logging.getLogger('webassets.debug')
log.addHandler(logging.StreamHandler())
import os
if os.environ.get('WEBASSETS_DEBUG'):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.ERROR)
class BaseHunk(object):
"""Abstract base class.
"""
def mtime(self):
raise NotImplementedError()
def __hash__(self):
return hash(self.data())
def __eq__(self, other):
if isinstance(other, BaseHunk):
# Allow class to be used as a unique dict key.
return hash(self) == hash(other)
return False
def data(self):
raise NotImplementedError()
def save(self, filename):
with open(filename, 'w') as f:
f.write(self.data())
class FileHunk(BaseHunk):
"""Exposes a single file through as a hunk.
"""
def __init__(self, filename):
self.filename = filename
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.filename)
def mtime(self):
pass
def data(self):
f = open(self.filename, 'r', encoding='utf-8')
try:
return f.read()
finally:
f.close()
class UrlHunk(BaseHunk):
"""Represents a file that is referenced by an Url.
If an environment is given, it's cache will be used to cache the url
contents, and to access it, as allowed by the etag/last modified headers.
"""
def __init__(self, url, env=None):
self.url = url
self.env = env
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.url)
def data(self):
if not hasattr(self, '_data'):
request = URLRequest(self.url)
# Look in the cache for etag / last modified headers to use
# TODO: "expires" header could be supported
if self.env and self.env.cache:
headers = self.env.cache.get(
('url', 'headers', self.url))
if headers:
etag, lmod = headers
if etag: request.add_header('If-None-Match', etag)
if lmod: request.add_header('If-Modified-Since', lmod)
# Make a request
try:
response = urlopen(request)
except HTTPError as e:
if e.code != 304:
raise
# Use the cached version of the url
self._data = self.env.cache.get(('url', 'contents', self.url))
else:
with contextlib.closing(response):
self._data = response.read()
# Cache the info from this request
if self.env and self.env.cache:
self.env.cache.set(
('url', 'headers', self.url),
(response.headers.getheader("ETag"),
response.headers.getheader("Last-Modified")))
self.env.cache.set(('url', 'contents', self.url), self._data)
return self._data
class MemoryHunk(BaseHunk):
"""Content that is no longer a direct representation of a source file. It
might have filters applied, and is probably the result of merging multiple
individual source files together.
"""
def __init__(self, data, files=None):
self._data = data
self.files = files or []
def __repr__(self):
# Include a has of the data. We want this during logging, so we
# can see which hunks contain identical content. Because this is
# a question of performance, make sure to log in such a way that
# when logging is disabled, this won't be called, i.e.: don't
# %s-format yourself, let logging do it as needed.
# TODO: Add a test to ensure this isn't called.
return '<%s %s>' % (self.__class__.__name__, hash(self.data))
def mtime(self):
pass
def data(self):
if hasattr(self._data, 'read'):
return self._data.read()
return self._data
def save(self, filename):
f = open(filename, 'w', encoding='utf-8')
try:
f.write(self.data())
finally:
f.close()
def merge(hunks, separator=None):
"""Merge the given list of hunks, returning a new ``MemoryHunk`` object.
"""
# TODO: combine the list of source files, we'd like to collect them
# The linebreak is important in certain cases for Javascript
# files, like when a last line is a //-comment.
if not separator:
separator = '\n'
return MemoryHunk(separator.join([h.data() for h in hunks]))
class MoreThanOneFilterError(Exception):
def __init__(self, message, filters):
Exception.__init__(self, message)
self.filters = filters
class NoFilters(Exception):
pass
class FilterTool(object):
"""Can apply filters to hunk objects, while using the cache.
If ``no_cache_read`` is given, then the cache will not be considered for
this operation (though the result will still be written to the cache).
``kwargs`` are options that should be passed along to the filters.
"""
VALID_TRANSFORMS = ('input', 'output',)
VALID_FUNCS = ('open', 'concat',)
def __init__(self, cache=None, no_cache_read=False, kwargs=None):
self.cache = cache
self.no_cache_read = no_cache_read
self.kwargs = kwargs or {}
def _wrap_cache(self, key, func):
"""Return cache value ``key``, or run ``func``.
"""
if self.cache:
if not self.no_cache_read:
log.debug('Checking cache for key %s', key)
content = self.cache.get(key)
if not content in (False, None):
log.debug('Using cached result for %s', key)
return MemoryHunk(content)
content = func().getvalue()
if self.cache:
log.debug('Storing result in cache with key %s', key,)
self.cache.set(key, content)
return MemoryHunk(content)
def apply(self, hunk, filters, type, kwargs=None):
"""Apply the given list of filters to the hunk, returning a new
``MemoryHunk`` object.
``kwargs`` are options that should be passed along to the filters.
If ``hunk`` is a file hunk, a ``source_path`` key will automatically
be added to ``kwargs``.
"""
assert type in self.VALID_TRANSFORMS
log.debug('Need to run method "%s" of filters (%s) on hunk %s with '
'kwargs=%s', type, filters, hunk, kwargs)
filters = [f for f in filters if getattr(f, type, None)]
if not filters: # Short-circuit
log.debug('No filters have "%s" methods, returning hunk '
'unchanged' % (type,))
return hunk
def func():
kwargs_final = self.kwargs.copy()
kwargs_final.update(kwargs or {})
data = StringIO(hunk.data())
for filter in filters:
log.debug('Running method "%s" of %s with kwargs=%s',
type, filter, kwargs_final)
out = StringIO(u'') # For 2.x, StringIO().getvalue() returns str
getattr(filter, type)(data, out, **kwargs_final)
data = out
data.seek(0)
return data
# Note that the key used to cache this hunk is different from the key
# the hunk will expose to subsequent merges, i.e. hunk.key() is always
# based on the actual content, and does not match the cache key. The
# latter also includes information about for example the filters used.
#
# It wouldn't have to be this way. Hunk could subsequently expose their
# cache key through hunk.key(). This would work as well, but would be
# an inferior solution: Imagine a source file which receives
# non-substantial changes, in the sense that they do not affect the
# filter output, for example whitespace. If a hunk's key is the cache
# key, such a change would invalidate the caches for all subsequent
# operations on this hunk as well, even though it didn't actually
# change after all.
key = ("hunk", hunk, tuple(filters), type)
return self._wrap_cache(key, func)
def apply_func(self, filters, type, args, kwargs=None, cache_key=None):
"""Apply a filter that is not a "stream in, stream out" transform (i.e.
like the input() and output() filter methods). Instead, the filter
method is given the arguments in ``args`` and should then produce an
output stream. This is used, e.g., for the concat() and open() filter
methods.
Only one such filter can run per operation.
``cache_key`` may be a list of additional values to use as the cache
key, in addition to the default key (the filter and arguments).
"""
assert type in self.VALID_FUNCS
log.debug('Need to run method "%s" of one of the filters (%s) '
'with args=%s, kwargs=%s', type, filters, args, kwargs)
filters = [f for f in filters if getattr(f, type, None)]
if not filters: # Short-circuit
log.debug('No filters have a "%s" method' % type)
raise NoFilters()
if len(filters) > 1:
raise MoreThanOneFilterError(
'These filters cannot be combined: %s' % (
', '.join([f.name for f in filters])), filters)
def func():
filter = filters[0]
out = StringIO(u'') # For 2.x, StringIO().getvalue() returns str
kwargs_final = self.kwargs.copy()
kwargs_final.update(kwargs or {})
log.debug('Running method "%s" of %s with args=%s, kwargs=%s',
type, filter, args, kwargs)
getattr(filter, type)(out, *args, **kwargs_final)
return out
key = ("hunk", args, tuple(filters), type, cache_key or [])
return self._wrap_cache(key, func)
def merge_filters(filters1, filters2):
"""Merge two filter lists into one.
Duplicate filters are removed. Since filter order is important, the order
of the arguments to this function also matter. Duplicates are always
removed from the second filter set if they exist in the first.
The result will always be ``filters1``, with additional unique filters
from ``filters2`` appended. Within the context of a hierarchy, you want
``filters2`` to be the parent.
This function presumes that all the given filters inherit from ``Filter``,
which properly implements operators to determine duplicate filters.
"""
result = list(filters1[:])
if filters2:
for f in filters2:
if not f in result:
result.append(f)
return result
def select_filters(filters, level):
"""Return from the list in ``filters`` those filters which indicate that
they should run for the given debug level.
"""
return [f for f in filters
if f.max_debug_level is None or
cmp_debug_levels(level, f.max_debug_level) <= 0]
|
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import pytest
import numpy as np
from itertools import product
try:
import cPickle as pickle
except ImportError:
import pickle
import celerite
from celerite import GP, terms
from celerite.solver import get_kernel_value, CARMASolver
__all__ = ["test_carma", "test_log_determinant", "test_solve", "test_dot",
"test_pickle", "test_build_gp", "test_log_likelihood",
"test_predict", "test_nyquist_singularity"]
def test_carma(seed=42):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.uniform(0, 5, 100))
yerr = 0.1 + np.zeros_like(t)
y = np.sin(t) + yerr * np.random.randn(len(t))
carma_solver = CARMASolver(-0.5, np.array([0.1, 0.05, 0.01]),
np.array([0.2, 0.1]))
carma_ll = carma_solver.log_likelihood(t, y, yerr)
params = carma_solver.get_celerite_coeffs()
solver.compute(
0.0, params[0], params[1], params[2], params[3], params[4], params[5],
np.empty(0), np.empty((0, 0)), np.empty((0, 0)),
t, yerr**2
)
celerite_ll = -0.5*(
solver.dot_solve(y) + solver.log_determinant() + len(t)*np.log(2*np.pi)
)
assert np.allclose(carma_ll, celerite_ll)
def _test_log_determinant(alpha_real, beta_real, alpha_complex_real,
alpha_complex_imag, beta_complex_real,
beta_complex_imag, seed=42):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.rand(5))
diag = np.random.uniform(0.1, 0.5, len(t))
solver.compute(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
np.empty(0), np.empty((0, 0)), np.empty((0, 0)),
t, diag
)
K = get_kernel_value(
alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, t[:, None] - t[None, :]
)
K[np.diag_indices_from(K)] += diag
assert np.allclose(solver.log_determinant(), np.linalg.slogdet(K)[1])
def test_log_determinant(seed=42):
alpha_real = np.array([1.5, 0.1])
beta_real = np.array([1.0, 0.3])
alpha_complex_real = np.array([1.0])
alpha_complex_imag = np.array([0.1])
beta_complex_real = np.array([1.0])
beta_complex_imag = np.array([1.0])
_test_log_determinant(alpha_real, beta_real, alpha_complex_real,
alpha_complex_imag, beta_complex_real,
beta_complex_imag, seed=seed)
alpha_real = np.array([1.5, 0.1, 0.6, 0.3, 0.8, 0.7])
beta_real = np.array([1.0, 0.3, 0.05, 0.01, 0.1, 0.2])
alpha_complex_real = np.array([1.0, 2.0])
alpha_complex_imag = np.array([0.1, 0.5])
beta_complex_real = np.array([1.0, 1.0])
beta_complex_imag = np.array([1.0, 1.0])
_test_log_determinant(alpha_real, beta_real, alpha_complex_real,
alpha_complex_imag, beta_complex_real,
beta_complex_imag, seed=seed)
def _test_solve(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, seed=42,
with_general=False):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.rand(500))
diag = np.random.uniform(0.1, 0.5, len(t))
b = np.random.randn(len(t))
with pytest.raises(RuntimeError):
solver.log_determinant()
with pytest.raises(RuntimeError):
solver.dot_solve(b)
if with_general:
U = np.vander(t - np.mean(t), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
solver.compute(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
A, U, V, t, diag
)
K = get_kernel_value(
alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, t[:, None] - t[None, :]
)
K[np.diag_indices_from(K)] += diag
if len(A):
K[np.diag_indices_from(K)] += A
K += np.tril(np.dot(U.T, V), -1) + np.triu(np.dot(V.T, U), 1)
assert np.allclose(solver.solve(b).T, np.linalg.solve(K, b))
b = np.random.randn(len(t), 5)
assert np.allclose(solver.solve(b), np.linalg.solve(K, b))
@pytest.mark.parametrize("with_general", [True, False])
def test_solve(with_general, seed=42):
alpha_real = np.array([1.5, 0.1])
beta_real = np.array([1.0, 0.3])
alpha_complex_real = np.array([1.0])
alpha_complex_imag = np.array([0.1])
beta_complex_real = np.array([1.0])
beta_complex_imag = np.array([1.0])
_test_solve(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, seed=seed,
with_general=with_general)
alpha_real = np.array([1.5, 0.1, 0.6, 0.3, 0.8, 0.7])
beta_real = np.array([1.0, 0.3, 0.05, 0.01, 0.1, 0.2])
alpha_complex_real = np.array([1.0, 2.0])
alpha_complex_imag = np.array([0.1, 0.5])
beta_complex_real = np.array([1.0, 1.0])
beta_complex_imag = np.array([1.0, 1.0])
_test_solve(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, seed=seed,
with_general=with_general)
@pytest.mark.parametrize("with_general", [True, False])
def test_dot(with_general, seed=42):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.rand(500))
b = np.random.randn(len(t), 5)
alpha_real = np.array([1.3, 0.2])
beta_real = np.array([0.5, 0.8])
alpha_complex_real = np.array([0.1])
alpha_complex_imag = np.array([0.0])
beta_complex_real = np.array([1.5])
beta_complex_imag = np.array([0.1])
K = get_kernel_value(
alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, t[:, None] - t[None, :]
)
if with_general:
U = np.vander(t - np.mean(t), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
K[np.diag_indices_from(K)] += A
K += np.tril(np.dot(U.T, V), -1) + np.triu(np.dot(V.T, U), 1)
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
x0 = np.dot(K, b)
x = solver.dot(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
A, U, V, t, b
)
assert np.allclose(x0, x)
@pytest.mark.parametrize("with_general", [True, False])
def test_dot_L(with_general, seed=42):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.rand(5))
b = np.random.randn(len(t), 5)
yerr = np.random.uniform(0.1, 0.5, len(t))
alpha_real = np.array([1.3, 0.2])
beta_real = np.array([0.5, 0.8])
alpha_complex_real = np.array([0.1])
alpha_complex_imag = np.array([0.0])
beta_complex_real = np.array([1.5])
beta_complex_imag = np.array([0.1])
K = get_kernel_value(
alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag, t[:, None] - t[None, :]
)
K[np.diag_indices_from(K)] += yerr**2
if with_general:
U = np.vander(t - np.mean(t), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
K[np.diag_indices_from(K)] += A
K += np.tril(np.dot(U.T, V), -1) + np.triu(np.dot(V.T, U), 1)
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
L = np.linalg.cholesky(K)
x0 = np.dot(L, b)
solver.compute(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
A, U, V, t, yerr**2)
x = solver.dot_L(b)
assert np.allclose(x0, x)
@pytest.mark.parametrize("with_general", [True, False])
def test_pickle(with_general, seed=42):
solver = celerite.CholeskySolver()
np.random.seed(seed)
t = np.sort(np.random.rand(500))
diag = np.random.uniform(0.1, 0.5, len(t))
y = np.sin(t)
if with_general:
U = np.vander(t - np.mean(t), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
alpha_real = np.array([1.3, 1.5])
beta_real = np.array([0.5, 0.2])
alpha_complex_real = np.array([1.0])
alpha_complex_imag = np.array([0.1])
beta_complex_real = np.array([1.0])
beta_complex_imag = np.array([1.0])
def compare(solver1, solver2):
assert solver1.computed() == solver2.computed()
if not solver1.computed():
return
assert np.allclose(solver1.log_determinant(),
solver2.log_determinant())
assert np.allclose(solver1.dot_solve(y),
solver2.dot_solve(y))
s = pickle.dumps(solver, -1)
solver2 = pickle.loads(s)
compare(solver, solver2)
solver.compute(
0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
A, U, V, t, diag
)
solver2 = pickle.loads(pickle.dumps(solver, -1))
compare(solver, solver2)
# Test that models can be pickled too.
kernel = terms.RealTerm(0.5, 0.1)
kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
gp1 = GP(kernel)
gp1.compute(t, diag)
s = pickle.dumps(gp1, -1)
gp2 = pickle.loads(s)
assert np.allclose(gp1.log_likelihood(y), gp2.log_likelihood(y))
def test_build_gp(seed=42):
kernel = terms.RealTerm(0.5, 0.1)
kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
gp = GP(kernel)
assert gp.vector_size == 5
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.1, 0.6, 0.7, 1.0])
gp.set_parameter_vector([0.5, 0.8, 0.6, 0.7, 2.0])
p = gp.get_parameter_vector()
assert np.allclose(p, [0.5, 0.8, 0.6, 0.7, 2.0])
with pytest.raises(ValueError):
gp.set_parameter_vector([0.5, 0.8, -0.6])
with pytest.raises(ValueError):
gp.set_parameter_vector("face1")
@pytest.mark.parametrize("with_general", [True, False])
def test_log_likelihood(with_general, seed=42):
np.random.seed(seed)
x = np.sort(np.random.rand(10))
yerr = np.random.uniform(0.1, 0.5, len(x))
y = np.sin(x)
if with_general:
U = np.vander(x - np.mean(x), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
# Check quiet argument with a non-positive definite kernel.
class NPDTerm(terms.Term):
parameter_names = ("par1", )
def get_real_coefficients(self, params): # NOQA
return [params[0]], [0.1]
gp = GP(NPDTerm(-1.0))
with pytest.raises(celerite.solver.LinAlgError):
gp.compute(x, 0.0)
with pytest.raises(celerite.solver.LinAlgError):
gp.log_likelihood(y)
assert np.isinf(gp.log_likelihood(y, quiet=True))
if terms.HAS_AUTOGRAD:
assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])
kernel = terms.RealTerm(0.1, 0.5)
gp = GP(kernel)
with pytest.raises(RuntimeError):
gp.log_likelihood(y)
termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
for term in termlist:
if len(term) > 2:
kernel += terms.ComplexTerm(*term)
else:
kernel += terms.RealTerm(*term)
gp = GP(kernel)
assert gp.computed is False
with pytest.raises(ValueError):
gp.compute(np.random.rand(len(x)), yerr)
gp.compute(x, yerr, A=A, U=U, V=V)
assert gp.computed is True
assert gp.dirty is False
ll = gp.log_likelihood(y)
K = gp.get_matrix(include_diagonal=True)
ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
ll0 -= 0.5 * np.linalg.slogdet(K)[1]
ll0 -= 0.5 * len(x) * np.log(2*np.pi)
assert np.allclose(ll, ll0)
# Check that changing the parameters "un-computes" the likelihood.
gp.set_parameter_vector(gp.get_parameter_vector())
assert gp.dirty is True
assert gp.computed is False
# Check that changing the parameters changes the likelihood.
gp.compute(x, yerr, A=A, U=U, V=V)
ll1 = gp.log_likelihood(y)
params = gp.get_parameter_vector()
params[0] += 10.0
gp.set_parameter_vector(params)
gp.compute(x, yerr, A=A, U=U, V=V)
ll2 = gp.log_likelihood(y)
assert not np.allclose(ll1, ll2)
gp[1] += 10.0
assert gp.dirty is True
gp.compute(x, yerr, A=A, U=U, V=V)
ll3 = gp.log_likelihood(y)
assert not np.allclose(ll2, ll3)
# Test zero delta t
ind = len(x) // 2
x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
gp.compute(x, yerr)
ll = gp.log_likelihood(y)
K = gp.get_matrix(include_diagonal=True)
ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
ll0 -= 0.5 * np.linalg.slogdet(K)[1]
ll0 -= 0.5 * len(x) * np.log(2*np.pi)
assert np.allclose(ll, ll0)
@pytest.mark.parametrize(
"kernel,with_general",
product([
terms.RealTerm(log_a=0.1, log_c=0.5),
terms.RealTerm(log_a=0.1, log_c=0.5) +
terms.RealTerm(log_a=-0.1, log_c=0.7),
terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
terms.JitterTerm(log_sigma=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5) +
terms.JitterTerm(log_sigma=0.1),
terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
terms.RealTerm(log_a=0.1, log_c=0.4),
terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
terms.RealTerm(log_a=0.1, log_c=0.4),
], [False, True])
)
def test_grad_log_likelihood(kernel, with_general, seed=42, eps=1.34e-7):
np.random.seed(seed)
x = np.sort(np.random.rand(100))
yerr = np.random.uniform(0.1, 0.5, len(x))
y = np.sin(x)
if with_general:
U = np.vander(x - np.mean(x), 4).T
V = U * np.random.rand(4)[:, None]
A = np.sum(U * V, axis=0) + 1e-8
else:
A = np.empty(0)
U = np.empty((0, 0))
V = np.empty((0, 0))
if not terms.HAS_AUTOGRAD:
gp = GP(kernel)
gp.compute(x, yerr, A=A, U=U, V=V)
with pytest.raises(ImportError):
_, grad = gp.grad_log_likelihood(y)
return
for fit_mean in [True, False]:
gp = GP(kernel, fit_mean=fit_mean)
gp.compute(x, yerr, A=A, U=U, V=V)
_, grad = gp.grad_log_likelihood(y)
grad0 = np.empty_like(grad)
v = gp.get_parameter_vector()
for i, pval in enumerate(v):
v[i] = pval + eps
gp.set_parameter_vector(v)
ll = gp.log_likelihood(y)
v[i] = pval - eps
gp.set_parameter_vector(v)
ll -= gp.log_likelihood(y)
grad0[i] = 0.5 * ll / eps
v[i] = pval
assert np.allclose(grad, grad0)
def test_predict(seed=42):
np.random.seed(seed)
x = np.linspace(1, 59, 300)
t = np.sort(np.random.uniform(10, 50, 100))
yerr = np.random.uniform(0.1, 0.5, len(t))
y = np.sin(t)
kernel = terms.RealTerm(0.1, 0.5)
for term in [(0.6, 0.7, 1.0), (0.1, 0.05, 0.5, -0.1)]:
kernel += terms.ComplexTerm(*term)
gp = GP(kernel)
gp.compute(t, yerr)
K = gp.get_matrix(include_diagonal=True)
Ks = gp.get_matrix(x, t)
true_mu = np.dot(Ks, np.linalg.solve(K, y))
true_cov = gp.get_matrix(x, x) - np.dot(Ks, np.linalg.solve(K, Ks.T))
mu, cov = gp.predict(y, x)
_, var = gp.predict(y, x, return_var=True)
assert np.allclose(mu, true_mu)
assert np.allclose(cov, true_cov)
assert np.allclose(var, np.diag(true_cov))
mu0, cov0 = gp.predict(y, t)
mu, cov = gp.predict(y)
assert np.allclose(mu0, mu)
assert np.allclose(cov0, cov)
# Test whether the GP can properly handle the case where the Lorentzian has a
# very large quality factor and the time samples are almost exactly at Nyquist
# sampling. This can frustrate Green's-function-based CARMA solvers.
def test_nyquist_singularity(seed=4220):
np.random.seed(seed)
kernel = terms.ComplexTerm(1.0, np.log(1e-6), np.log(1.0))
gp = GP(kernel)
# Samples are very close to Nyquist with f = 1.0
ts = np.array([0.0, 0.5, 1.0, 1.5])
ts[1] = ts[1]+1e-9*np.random.randn()
ts[2] = ts[2]+1e-8*np.random.randn()
ts[3] = ts[3]+1e-7*np.random.randn()
yerr = np.random.uniform(low=0.1, high=0.2, size=len(ts))
y = np.random.randn(len(ts))
gp.compute(ts, yerr)
llgp = gp.log_likelihood(y)
K = gp.get_matrix(ts)
K[np.diag_indices_from(K)] += yerr**2.0
ll = (-0.5*np.dot(y, np.linalg.solve(K, y)) - 0.5*np.linalg.slogdet(K)[1] -
0.5*len(y)*np.log(2.0*np.pi))
assert np.allclose(ll, llgp)
|
|
#
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Remote API connection map.
"""
# pylint: disable=C0103
# C0103: Invalid name, since the R_* names are not conforming
import re
import urlparse
from ganeti import constants
from ganeti import http
from ganeti import utils
from ganeti.rapi import rlib2
_NAME_PATTERN = r"[\w\._-]+"
_DISK_PATTERN = r"\d+"
# the connection map is created at the end of this file
CONNECTOR = {}
class Mapper(object):
"""Map resource to method.
"""
def __init__(self, connector=None):
"""Resource mapper constructor.
@param connector: a dictionary, mapping method name with URL path regexp
"""
if connector is None:
connector = CONNECTOR
self._connector = connector
def getController(self, uri):
"""Find method for a given URI.
@param uri: string with URI
@return: None if no method is found or a tuple containing
the following fields:
- method: name of method mapped to URI
- items: a list of variable intems in the path
- args: a dictionary with additional parameters from URL
"""
if "?" in uri:
(path, query) = uri.split("?", 1)
args = urlparse.parse_qs(query)
else:
path = uri
query = None
args = {}
# Try to find handler for request path
result = utils.FindMatch(self._connector, path)
if result is None:
raise http.HttpNotFound()
(handler, groups) = result
return (handler, groups, args)
def _ConvertPattern(value):
"""Converts URI pattern into a regular expression group.
Used by L{_CompileHandlerPath}.
"""
if isinstance(value, UriPattern):
return "(%s)" % value.content
else:
return value
def _CompileHandlerPath(*args):
"""Compiles path for RAPI resource into regular expression.
@return: Compiled regular expression object
"""
return re.compile("^%s$" % "".join(map(_ConvertPattern, args)))
class UriPattern(object):
__slots__ = [
"content",
]
def __init__(self, content):
self.content = content
def GetHandlers(node_name_pattern, instance_name_pattern,
group_name_pattern, network_name_pattern,
job_id_pattern, disk_pattern,
query_res_pattern,
translate=None):
"""Returns all supported resources and their handlers.
C{node_name_pattern} and the other C{*_pattern} parameters are wrapped in
L{UriPattern} and, if used in a URI, passed to the function specified using
C{translate}. C{translate} receives 1..N parameters which are either plain
strings or instances of L{UriPattern} and returns a dictionary key suitable
for the caller of C{GetHandlers}. The default implementation in
L{_CompileHandlerPath} returns a compiled regular expression in which each
pattern is a group.
@rtype: dict
"""
if translate is None:
translate_fn = _CompileHandlerPath
else:
translate_fn = translate
node_name = UriPattern(node_name_pattern)
instance_name = UriPattern(instance_name_pattern)
group_name = UriPattern(group_name_pattern)
network_name = UriPattern(network_name_pattern)
job_id = UriPattern(job_id_pattern)
disk = UriPattern(disk_pattern)
query_res = UriPattern(query_res_pattern)
# Important note: New resources should always be added under /2. During a
# discussion in July 2010 it was decided that having per-resource versions
# is more flexible and future-compatible than versioning the whole remote
# API.
# TODO: Consider a different data structure where all keys are of the same
# type. Strings are faster to look up in a dictionary than iterating and
# matching regular expressions, therefore maybe two separate dictionaries
# should be used.
return {
"/": rlib2.R_root,
"/2": rlib2.R_2,
"/version": rlib2.R_version,
"/2/nodes": rlib2.R_2_nodes,
translate_fn("/2/nodes/", node_name):
rlib2.R_2_nodes_name,
translate_fn("/2/nodes/", node_name, "/powercycle"):
rlib2.R_2_nodes_name_powercycle,
translate_fn("/2/nodes/", node_name, "/tags"):
rlib2.R_2_nodes_name_tags,
translate_fn("/2/nodes/", node_name, "/role"):
rlib2.R_2_nodes_name_role,
translate_fn("/2/nodes/", node_name, "/evacuate"):
rlib2.R_2_nodes_name_evacuate,
translate_fn("/2/nodes/", node_name, "/migrate"):
rlib2.R_2_nodes_name_migrate,
translate_fn("/2/nodes/", node_name, "/modify"):
rlib2.R_2_nodes_name_modify,
translate_fn("/2/nodes/", node_name, "/storage"):
rlib2.R_2_nodes_name_storage,
translate_fn("/2/nodes/", node_name, "/storage/modify"):
rlib2.R_2_nodes_name_storage_modify,
translate_fn("/2/nodes/", node_name, "/storage/repair"):
rlib2.R_2_nodes_name_storage_repair,
"/2/instances": rlib2.R_2_instances,
translate_fn("/2/instances/", instance_name):
rlib2.R_2_instances_name,
translate_fn("/2/instances/", instance_name, "/info"):
rlib2.R_2_instances_name_info,
translate_fn("/2/instances/", instance_name, "/tags"):
rlib2.R_2_instances_name_tags,
translate_fn("/2/instances/", instance_name, "/reboot"):
rlib2.R_2_instances_name_reboot,
translate_fn("/2/instances/", instance_name, "/reinstall"):
rlib2.R_2_instances_name_reinstall,
translate_fn("/2/instances/", instance_name, "/snapshot"):
rlib2.R_2_instances_name_snapshot,
translate_fn("/2/instances/", instance_name, "/replace-disks"):
rlib2.R_2_instances_name_replace_disks,
translate_fn("/2/instances/", instance_name, "/shutdown"):
rlib2.R_2_instances_name_shutdown,
translate_fn("/2/instances/", instance_name, "/startup"):
rlib2.R_2_instances_name_startup,
translate_fn("/2/instances/", instance_name, "/activate-disks"):
rlib2.R_2_instances_name_activate_disks,
translate_fn("/2/instances/", instance_name, "/deactivate-disks"):
rlib2.R_2_instances_name_deactivate_disks,
translate_fn("/2/instances/", instance_name, "/recreate-disks"):
rlib2.R_2_instances_name_recreate_disks,
translate_fn("/2/instances/", instance_name, "/prepare-export"):
rlib2.R_2_instances_name_prepare_export,
translate_fn("/2/instances/", instance_name, "/export"):
rlib2.R_2_instances_name_export,
translate_fn("/2/instances/", instance_name, "/migrate"):
rlib2.R_2_instances_name_migrate,
translate_fn("/2/instances/", instance_name, "/failover"):
rlib2.R_2_instances_name_failover,
translate_fn("/2/instances/", instance_name, "/rename"):
rlib2.R_2_instances_name_rename,
translate_fn("/2/instances/", instance_name, "/modify"):
rlib2.R_2_instances_name_modify,
translate_fn("/2/instances/", instance_name, "/disk/", disk, "/grow"):
rlib2.R_2_instances_name_disk_grow,
translate_fn("/2/instances/", instance_name, "/console"):
rlib2.R_2_instances_name_console,
"/2/networks": rlib2.R_2_networks,
translate_fn("/2/networks/", network_name):
rlib2.R_2_networks_name,
translate_fn("/2/networks/", network_name, "/connect"):
rlib2.R_2_networks_name_connect,
translate_fn("/2/networks/", network_name, "/disconnect"):
rlib2.R_2_networks_name_disconnect,
translate_fn("/2/networks/", network_name, "/modify"):
rlib2.R_2_networks_name_modify,
translate_fn("/2/networks/", network_name, "/tags"):
rlib2.R_2_networks_name_tags,
"/2/groups": rlib2.R_2_groups,
translate_fn("/2/groups/", group_name):
rlib2.R_2_groups_name,
translate_fn("/2/groups/", group_name, "/modify"):
rlib2.R_2_groups_name_modify,
translate_fn("/2/groups/", group_name, "/rename"):
rlib2.R_2_groups_name_rename,
translate_fn("/2/groups/", group_name, "/assign-nodes"):
rlib2.R_2_groups_name_assign_nodes,
translate_fn("/2/groups/", group_name, "/tags"):
rlib2.R_2_groups_name_tags,
"/2/jobs": rlib2.R_2_jobs,
translate_fn("/2/jobs/", job_id):
rlib2.R_2_jobs_id,
translate_fn("/2/jobs/", job_id, "/wait"):
rlib2.R_2_jobs_id_wait,
"/2/instances-multi-alloc": rlib2.R_2_instances_multi_alloc,
"/2/tags": rlib2.R_2_tags,
"/2/info": rlib2.R_2_info,
"/2/os": rlib2.R_2_os,
"/2/redistribute-config": rlib2.R_2_redist_config,
"/2/features": rlib2.R_2_features,
"/2/modify": rlib2.R_2_cluster_modify,
translate_fn("/2/query/", query_res):
rlib2.R_2_query,
translate_fn("/2/query/", query_res, "/fields"):
rlib2.R_2_query_fields,
}
CONNECTOR.update(GetHandlers(_NAME_PATTERN, _NAME_PATTERN,
_NAME_PATTERN, _NAME_PATTERN,
constants.JOB_ID_TEMPLATE, _DISK_PATTERN,
_NAME_PATTERN))
|
|
"""Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import os
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_COMPONENT_LOADED)
import homeassistant.config as config_util
from homeassistant import setup
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers import discovery
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, get_test_config_dir, mock_integration, \
mock_entity_platform
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
mock_integration(
self.hass,
MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform(platform_schema=platform_schema))
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
'hello': 'world',
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema=platform_schema,
platform_schema_base=platform_schema_base))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema_base
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({
'hello': str,
})
platform_schema = PLATFORM_SCHEMA.extend({
'cheers': str,
'hello': 'world',
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema=component_schema))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema_base=component_schema))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform(platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
'platform': 'whatever',
'entity_namespace': 'yummy',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, 'non_existing', {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = mock.MagicMock(return_value=True)
mock_integration(
self.hass,
MockModule('comp', setup=mock_setup))
assert setup.setup_component(self.hass, 'comp', {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, 'comp', {})
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(
self.hass,
MockModule('comp', requirements=['package==0.0.1']))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
@asyncio.coroutine
def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(
self.hass,
MockModule('comp', async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, 'comp', {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, 'comp', {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ['maybe_existing']
mock_integration(self.hass, MockModule('comp', dependencies=deps))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule('comp2', dependencies=deps))
mock_integration(self.hass, MockModule('maybe_existing'))
assert setup.setup_component(self.hass, 'comp2', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass,
MockModule('comp', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception('fail!')
mock_integration(self.hass,
MockModule('comp', setup=exception_setup))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
platform = MockPlatform()
mock_integration(self.hass,
MockModule('comp_a', setup=config_check_setup))
mock_integration(
self.hass,
MockModule('platform_a',
setup=config_check_setup,
dependencies=['comp_a']),
)
mock_entity_platform(self.hass, 'switch.platform_a', platform)
setup.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
mock_entity_platform(
self.hass,
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: None))
assert not setup.setup_component(self.hass, 'disabled_component', {})
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'disabled_component', {})
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: True))
assert setup.setup_component(self.hass, 'disabled_component', {})
assert 'disabled_component' in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Set up mock component."""
discovery.discover(
hass, 'test_component2', {}, 'test_component2', {})
discovery.discover(
hass, 'test_component3', {}, 'test_component3', {})
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass,
MockModule('test_component1', setup=component1_setup))
mock_integration(
self.hass,
MockModule('test_component2', setup=component_track_setup))
mock_integration(
self.hass,
MockModule('test_component3', setup=component_track_setup))
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(
self.hass, 'test_component1', {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
@asyncio.coroutine
def test_component_cannot_depend_config(hass):
"""Test config is not allowed to be a dependency."""
result = yield from setup._async_process_dependencies(
hass, None, 'test', ['config'])
assert not result
@asyncio.coroutine
def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule('test_component1'))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
@asyncio.coroutine
def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass,
MockModule('test_component1', platform_schema=PLATFORM_SCHEMA))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert not mock_call.called
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add('test')
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Should be called right away
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == ['test', 'test']
|
|
"""
Support for Google travel time sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.google_travel_time/
"""
from datetime import datetime
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, EVENT_HOMEASSISTANT_START, ATTR_LATITUDE,
ATTR_LONGITUDE)
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.location as location
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['googlemaps==2.4.6']
_LOGGER = logging.getLogger(__name__)
CONF_DESTINATION = 'destination'
CONF_MODE = 'mode'
CONF_OPTIONS = 'options'
CONF_ORIGIN = 'origin'
CONF_TRAVEL_MODE = 'travel_mode'
DEFAULT_NAME = 'Google Travel Time'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es',
'eu', 'fa', 'fi', 'fr', 'gl', 'gu', 'hi', 'hr', 'hu', 'id',
'it', 'iw', 'ja', 'kn', 'ko', 'lt', 'lv', 'ml', 'mr', 'nl',
'no', 'pl', 'pt', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl',
'sr', 'sv', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'vi',
'zh-CN', 'zh-TW']
AVOID = ['tolls', 'highways', 'ferries', 'indoor']
TRANSIT_PREFS = ['less_walking', 'fewer_transfers']
TRANSPORT_TYPE = ['bus', 'subway', 'train', 'tram', 'rail']
TRAVEL_MODE = ['driving', 'walking', 'bicycling', 'transit']
TRAVEL_MODEL = ['best_guess', 'pessimistic', 'optimistic']
UNITS = ['metric', 'imperial']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: 'driving'}): vol.All(
dict, vol.Schema({
vol.Optional(CONF_MODE, default='driving'): vol.In(TRAVEL_MODE),
vol.Optional('language'): vol.In(ALL_LANGUAGES),
vol.Optional('avoid'): vol.In(AVOID),
vol.Optional('units'): vol.In(UNITS),
vol.Exclusive('arrival_time', 'time'): cv.string,
vol.Exclusive('departure_time', 'time'): cv.string,
vol.Optional('traffic_model'): vol.In(TRAVEL_MODEL),
vol.Optional('transit_mode'): vol.In(TRANSPORT_TYPE),
vol.Optional('transit_routing_preference'): vol.In(TRANSIT_PREFS)
}))
})
TRACKABLE_DOMAINS = ['device_tracker', 'sensor', 'zone']
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr))
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
options = config.get(CONF_OPTIONS)
if options.get('units') is None:
options['units'] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = ("Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!")
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(hass, name, api_key, origin,
destination, options)
if sensor.valid_api_connection:
add_devices_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = 'min'
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
import googlemaps
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER .error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
return round(_data['duration_in_traffic']['value']/60)
if 'duration' in _data:
return round(_data['duration']['value']/60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res['rows']
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
res['duration_in_traffic'] = _data['duration_in_traffic']['text']
if 'duration' in _data:
res['duration'] = _data['duration']['text']
if 'distance' in _data:
res['distance'] = _data['distance']['text']
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get('departure_time')
atime = options_copy.get('arrival_time')
if dtime is not None and ':' in dtime:
options_copy['departure_time'] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy['departure_time'] = dtime
elif atime is None:
options_copy['departure_time'] = 'now'
if atime is not None and ':' in atime:
options_copy['arrival_time'] = convert_time_to_utc(atime)
elif atime is not None:
options_copy['arrival_time'] = atime
# Convert device_trackers to google friendly location
if hasattr(self, '_origin_entity_id'):
self._origin = self._get_location_from_entity(
self._origin_entity_id
)
if hasattr(self, '_destination_entity_id'):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location",
entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == 'zone' and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
|
from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models import signals
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from picklefield.fields import PickledObjectField
from polymodels.models import BasePolymorphicModel
from polymodels.utils import copy_fields
from ...db.fields import (
FieldDefinitionTypeField, LazilyTranslatedField, PythonIdentifierField,
)
from ...utils import lazy_string_format, popattr
from ..model import ModelDefinitionAttribute
from ..ordered import OrderedModel
from .managers import FieldDefinitionChoiceManager, FieldDefinitionManager
def NOT_PROVIDED():
return models.NOT_PROVIDED
class FieldDefinitionBase(models.base.ModelBase):
FIELD_CLASS_ATTR = 'defined_field_class'
FIELD_OPTIONS_ATTR = 'defined_field_options'
FIELD_DESCRIPTION_ATTR = 'defined_field_description'
FIELD_CATEGORY_ATTR = 'defined_field_category'
DEFAULT_VERBOSE_NAME = _("%s field definition")
DEFAULT_VERBOSE_NAME_PLURAL = _("%s field definitions")
_base_definition = None
_field_definitions = {}
_proxies = {}
_lookups = {}
def __new__(cls, name, parents, attrs):
super_new = super(FieldDefinitionBase, cls).__new__
if 'Meta' in attrs:
Meta = attrs['Meta']
field_description = popattr(Meta, cls.FIELD_DESCRIPTION_ATTR, None)
field_class = popattr(Meta, cls.FIELD_CLASS_ATTR, None)
if field_class:
if not issubclass(field_class, models.Field):
msg = ("Meta's defined_field_class must be a subclass of "
"django.db.models.fields.Field")
raise ImproperlyConfigured(msg)
elif field_description is None:
field_description = getattr(field_class, 'description', None)
field_options = popattr(Meta, cls.FIELD_OPTIONS_ATTR, ())
if field_options:
if not isinstance(field_options, tuple):
msg = "Meta's defined_field_options must be a tuple"
raise ImproperlyConfigured(msg)
field_category = popattr(Meta, cls.FIELD_CATEGORY_ATTR, None)
has_verbose_name = hasattr(Meta, 'verbose_name')
has_verbose_name_plural = hasattr(Meta, 'verbose_name_plural')
else:
field_class = None
field_options = ()
field_description = None
field_category = None
has_verbose_name = False
has_verbose_name_plural = False
definition = super_new(cls, name, parents, attrs)
# Store the FieldDefinition cls
if cls._base_definition is None:
cls._base_definition = definition
else:
base_definition = cls._base_definition
parents = [definition]
while parents:
parent = parents.pop(0)
if isinstance(parent, cls):
parent_opts = parent._meta
if field_description is None:
field_description = getattr(parent_opts, cls.FIELD_DESCRIPTION_ATTR, None)
if field_class is None:
field_class = getattr(parent_opts, cls.FIELD_CLASS_ATTR, None)
if field_class and field_description is None:
field_description = field_class.description
field_options += getattr(parent_opts, cls.FIELD_OPTIONS_ATTR, ())
if field_category is None:
field_category = getattr(parent_opts, cls.FIELD_CATEGORY_ATTR, None)
if parent is not base_definition:
parents = list(parent.__bases__) + parents # mimic mro
from ...management import (
field_definition_post_save, FIELD_DEFINITION_POST_SAVE_UID
)
post_save_dispatch_uid = FIELD_DEFINITION_POST_SAVE_UID % definition._meta.model_name
signals.post_save.connect(field_definition_post_save, definition,
dispatch_uid=post_save_dispatch_uid)
# Warn the user that they should rely on signals instead of
# overriding the delete methods since it might not be called
# when deleting the associated model definition.
if definition.delete != base_definition.delete:
def_name = definition.__name__
warnings.warn("Avoid overriding the `delete` method on "
"`FieldDefinition` subclass `%s` since it won't "
"be called when the associated `ModelDefinition` "
"is deleted. If you want to perform actions on "
"deletion, add hooks to the `pre_delete` and "
"`post_delete` signals." % def_name, UserWarning)
setattr(definition._meta, cls.FIELD_CLASS_ATTR, field_class)
setattr(definition._meta, cls.FIELD_OPTIONS_ATTR, tuple(set(field_options)))
setattr(definition._meta, cls.FIELD_DESCRIPTION_ATTR, field_description)
setattr(definition._meta, cls.FIELD_CATEGORY_ATTR, field_category)
if field_description is not None:
if not has_verbose_name:
verbose_name = lazy_string_format(cls.DEFAULT_VERBOSE_NAME, field_description)
definition._meta.verbose_name = verbose_name
if not has_verbose_name_plural:
verbose_name_plural = lazy_string_format(cls.DEFAULT_VERBOSE_NAME_PLURAL, field_description)
definition._meta.verbose_name_plural = verbose_name_plural
if field_class is not None:
cls._field_definitions[field_class] = definition
return definition
class FieldDefinition(six.with_metaclass(FieldDefinitionBase, BasePolymorphicModel,
ModelDefinitionAttribute)):
CONTENT_TYPE_FIELD = 'content_type'
content_type = FieldDefinitionTypeField()
name = PythonIdentifierField(_('name'))
verbose_name = LazilyTranslatedField(_('verbose name'), blank=True, null=True)
help_text = LazilyTranslatedField(_('help text'), blank=True, null=True)
null = models.BooleanField(_('null'), default=False)
blank = models.BooleanField(_('blank'), default=False)
db_column = models.SlugField(_('db column'), max_length=30, blank=True, null=True)
db_index = models.BooleanField(_('db index'), default=False)
editable = models.BooleanField(_('editable'), default=True)
default = PickledObjectField(_('default'), null=True, default=NOT_PROVIDED)
primary_key = models.BooleanField(_('primary key'), default=False)
unique = models.BooleanField(_('unique'), default=False)
unique_for_date = PythonIdentifierField(_('unique for date'), blank=True, null=True)
unique_for_month = PythonIdentifierField(_('unique for month'), blank=True, null=True)
unique_for_year = PythonIdentifierField(_('unique for year'), blank=True, null=True)
objects = FieldDefinitionManager()
class Meta:
app_label = 'mutant'
verbose_name = _('field')
verbose_name_plural = _('fields')
unique_together = (('model_def', 'name'),)
defined_field_options = (
'name', 'verbose_name', 'help_text',
'null', 'blank', 'db_column', 'db_index',
'editable', 'default', 'primary_key', 'unique',
'unique_for_date', 'unique_for_month', 'unique_for_year'
)
def __init__(self, *args, **kwargs):
super(FieldDefinition, self).__init__(*args, **kwargs)
if self.pk:
self._saved_name = self.name
def natural_key(self):
return self.model_def.natural_key() + (self.name,)
natural_key.dependencies = ('mutant.modeldefinition',)
def save(self, *args, **kwargs):
if self._state.adding:
self.content_type = self.get_content_type()
if self.pk:
self._state._pre_save_field = self.get_bound_field()
saved = super(FieldDefinition, self).save(*args, **kwargs)
self._saved_name = self.name
return saved
def delete(self, *args, **kwargs):
opts = self._meta
if opts.proxy:
# TODO: #18083
# Ok so this is a big issue: proxy model deletion is completely
# broken. When you delete a inherited model proxy only the proxied
# model is deleted, plus deletion signals are not sent for the
# proxied model and it's subclasses. Here we attempt to fix this by
# getting the concrete model instance of the proxy and deleting it
# while sending proxy model signals.
concrete_model = opts.concrete_model
concrete_model_instance = copy_fields(self, concrete_model)
# Send proxy pre_delete
signals.pre_delete.send(self.__class__, instance=self)
# Delete the concrete model
delete = concrete_model_instance.delete(*args, **kwargs)
# This should be sent before the subclasses post_delete but we
# cannot venture into deletion.Collector to much. Better wait until
# #18083 is fixed.
signals.post_delete.send(self.__class__, instance=self)
return delete
return super(FieldDefinition, self).delete(*args, **kwargs)
def clone(self):
options = dict(
(name, getattr(self, name))
for name in self.get_field_option_names()
)
return self.__class__(**options)
@classmethod
def get_field_class(cls):
field_class = getattr(cls._meta, FieldDefinitionBase.FIELD_CLASS_ATTR)
if not field_class:
raise NotImplementedError(
"%s didn't define any `field_class`." % cls.__name__
)
return field_class
@classmethod
def get_field_option_names(cls):
return getattr(cls._meta, FieldDefinitionBase.FIELD_OPTIONS_ATTR)
@classmethod
def get_field_description(cls):
return getattr(cls._meta, FieldDefinitionBase.FIELD_DESCRIPTION_ATTR)
@classmethod
def get_field_category(cls):
return getattr(cls._meta, FieldDefinitionBase.FIELD_CATEGORY_ATTR)
@classmethod
def get_content_type(cls):
return ContentType.objects.get_for_model(cls, for_concrete_model=False)
def get_field_options(self, **overrides):
model_opts = self._meta
options = {}
for name in self.get_field_option_names():
if name in overrides: # Avoid fetching if it's overridden
continue
value = getattr(self, name)
field = model_opts.get_field(name)
default = field.to_python(field.get_default())
if value != default:
options[name] = value
if 'choices' not in overrides: # Avoid fetching if it's overridden
choices = self.choices.construct()
if choices:
options['choices'] = choices
return options
def construct(self, **overrides):
cls = self.get_field_class()
options = self.get_field_options(**overrides)
options.update(overrides)
instance = cls(**options)
instance.set_attributes_from_name(self.name)
return instance
def get_bound_field(self):
opts = self.model_def.model_class()._meta
for field in opts.fields:
if field.name == self._saved_name:
return field
def construct_for_migrate(self):
"""
Provide a suitable field to be used in migrations.
"""
return self.construct()
def clean(self):
# Make sure we can build the field
try:
field = self.construct()
except NotImplementedError:
pass # `get_field_class` is not implemented
except Exception as e:
raise ValidationError(e)
else:
# Test the specified default value
if field.has_default():
default = field.get_default()
try:
field.clean(default, None)
except Exception:
msg = _("%r is not a valid default value") % default
raise ValidationError({'default': [msg]})
class FieldDefinitionChoice(OrderedModel):
"""
A Model to allow specifying choices for a field definition instance
"""
field_def = models.ForeignKey(FieldDefinition, on_delete=models.CASCADE, related_name='choices')
group = LazilyTranslatedField(_('group'), blank=True, null=True)
value = PickledObjectField(_('value'), editable=True)
label = LazilyTranslatedField(_('label'))
objects = FieldDefinitionChoiceManager()
class Meta:
app_label = 'mutant'
verbose_name = _('field definition choice')
verbose_name_plural = _('field definition choices')
ordering = ['order']
unique_together = (
('field_def', 'order'),
('field_def', 'group', 'value')
)
def clean(self):
try:
# Make sure to create a field instance with no choices to avoid
# validating against existing ones.
field = self.field_def.type_cast().construct(choices=None)
field.clean(self.value, None)
except ValidationError as e:
raise ValidationError({'value': e.messages})
def save(self, *args, **kwargs):
save = super(FieldDefinitionChoice, self).save(*args, **kwargs)
self.field_def.model_def.model_class(force_create=True)
return save
def get_ordering_queryset(self):
qs = super(FieldDefinitionChoice, self).get_ordering_queryset()
return qs.filter(field_def_id=self.field_def_id)
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from fabric.api import local
from lib import base
from lib.gobgp import *
from lib.quagga import *
import sys
import os
import time
import nose
from noseplugin import OptionParser, parser_option
class GoBGPTestBase(unittest.TestCase):
wait_per_retry = 5
retry_limit = 15
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
rs_clients = [QuaggaBGPContainer(name='q{0}'.format(i+1), asn=65001+i,
router_id='192.168.0.{0}'.format(i+2))
for i in range(3)]
ctns = [g1] + rs_clients
q1 = rs_clients[0]
q2 = rs_clients[1]
q3 = rs_clients[2]
# advertise a route from route-server-clients
routes = []
for idx, rs_client in enumerate(rs_clients):
route = '10.0.{0}.0/24'.format(idx+1)
rs_client.add_route(route)
routes.append(route)
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for rs_client in rs_clients:
g1.add_peer(rs_client, is_rs_client=True, passwd='passwd', passive=True, prefix_limit=10)
rs_client.add_peer(g1, passwd='passwd')
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
def check_gobgp_local_rib(self):
for rs_client in self.quaggas.itervalues():
done = False
for _ in range(self.retry_limit):
if done:
break
local_rib = self.gobgp.get_local_rib(rs_client)
local_rib = [p['prefix'] for p in local_rib]
state = self.gobgp.get_neighbor_state(rs_client)
self.assertEqual(state, BGP_FSM_ESTABLISHED)
if len(local_rib) < len(self.quaggas)-1:
time.sleep(self.wait_per_retry)
continue
self.assertTrue(len(local_rib) == (len(self.quaggas)-1))
for c in self.quaggas.itervalues():
if rs_client != c:
for r in c.routes:
self.assertTrue(r in local_rib)
done = True
if done:
continue
# should not reach here
self.assertTrue(False)
def check_rs_client_rib(self):
for rs_client in self.quaggas.itervalues():
done = False
for _ in range(self.retry_limit):
if done:
break
global_rib = rs_client.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(self.wait_per_retry)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
self.assertTrue(False)
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
# check advertised routes are stored in route-server's local-rib
def test_02_check_gobgp_local_rib(self):
self.check_gobgp_local_rib()
# check gobgp's global rib. when configured as route-server, global rib
# must be empty
def test_03_check_gobgp_global_rib(self):
self.assertTrue(len(self.gobgp.get_global_rib()) == 0)
# check routes are properly advertised to route-server-client
def test_04_check_rs_clients_rib(self):
self.check_rs_client_rib()
# check if quagga that is appended can establish connection with gobgp
def test_05_add_rs_client(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
route = '10.0.4.0/24'
q4.add_route(route)
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4, is_rs_client=True)
q4.add_peer(self.gobgp)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
# check advertised routes are stored in gobgp's local-rib
def test_05_check_gobgp_local_rib(self):
self.check_gobgp_local_rib()
# check routes are properly advertised to quagga
def test_06_check_rs_clients_rib(self):
self.check_rs_client_rib()
def test_07_stop_one_rs_client(self):
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
del self.quaggas['q4']
# check a route advertised from q4 is deleted from gobgp's local-rib
def test_08_check_gobgp_local_rib(self):
self.check_gobgp_local_rib()
# check whether gobgp properly sent withdrawal message with q4's route
def test_09_check_rs_clients_rib(self):
self.check_rs_client_rib()
@unittest.skip("med shouldn't work with different AS peers by default")
def test_10_add_distant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 200,
'priority': 10}
q2.add_policy(med200, self.gobgp, 'out')
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 100,
'priority': 10}
q3.add_policy(med100, self.gobgp, 'out')
q5.add_route('10.0.6.0/24')
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
def check_nexthop(target_prefix, expected_nexthop):
done = False
for _ in range(self.retry_limit):
if done:
break
time.sleep(self.wait_per_retry)
for path in q1.get_global_rib():
if path['prefix'] == target_prefix:
print "{0}'s nexthop is {1}".format(path['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in
expected_nexthop.ip_addrs]
if path['nexthop'] in n_addrs:
done = True
break
return done
done = check_nexthop('10.0.6.0/24', q3)
self.assertTrue(done)
med300 = {'name': 'med300',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 300,
'priority': 5}
q3.add_policy(med300, self.gobgp, 'out')
time.sleep(self.wait_per_retry)
done = check_nexthop('10.0.6.0/24', q2)
self.assertTrue(done)
if __name__ == '__main__':
if os.geteuid() is not 0:
print "you are not root."
sys.exit(1)
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2017-2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import binascii
import ipaddress
import ipv6
import network_data
import network_layer
import common
import config
import mesh_cop
import mle
from enum import IntEnum
class CheckType(IntEnum):
CONTAIN = 0
NOT_CONTAIN = 1
OPTIONAL = 2
class NetworkDataCheckType:
PREFIX_CNT = 1
PREFIX_CONTENT = 2
def check_address_query(command_msg, source_node, destination_address):
"""Verify source_node sent a properly formatted Address Query Request message to the destination_address.
"""
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert (ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address), (
"Error: The IPv6 source address is not the RLOC of the originator. The source node's rloc is: " +
str(ipv6.ip_address(source_rloc)) + ", but the source_address in command msg is: " +
str(command_msg.ipv6_packet.ipv6_header.source_address))
if isinstance(destination_address, bytearray):
destination_address = bytes(destination_address)
assert (ipv6.ip_address(destination_address) == command_msg.ipv6_packet.ipv6_header.destination_address
), "Error: The IPv6 destination address is not expected."
def check_address_notification(command_msg, source_node, destination_node):
"""Verify source_node sent a properly formatted Address Notification command message to destination_node.
"""
command_msg.assertCoapMessageRequestUriPath('/a/an')
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
command_msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
command_msg.assertCoapMessageContainsTlv(network_layer.MlEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert (ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address
), "Error: The IPv6 source address is not the RLOC of the originator."
destination_rloc = destination_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert (ipv6.ip_address(destination_rloc) == command_msg.ipv6_packet.ipv6_header.destination_address
), "Error: The IPv6 destination address is not the RLOC of the destination."
def check_address_error_notification(command_msg, source_node, destination_address):
"""Verify source_node sent a properly formatted Address Error Notification command message to destination_address.
"""
command_msg.assertCoapMessageRequestUriPath('/a/ae')
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
command_msg.assertCoapMessageContainsTlv(network_layer.MlEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert (ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address), (
"Error: The IPv6 source address is not the RLOC of the originator. The source node's rloc is: " +
str(ipv6.ip_address(source_rloc)) + ", but the source_address in command msg is: " +
str(command_msg.ipv6_packet.ipv6_header.source_address))
if isinstance(destination_address, bytearray):
destination_address = bytes(destination_address)
assert (ipv6.ip_address(destination_address) == command_msg.ipv6_packet.ipv6_header.destination_address), (
"Error: The IPv6 destination address is not expected. The destination node's rloc is: " +
str(ipv6.ip_address(destination_address)) + ", but the destination_address in command msg is: " +
str(command_msg.ipv6_packet.ipv6_header.destination_address))
def check_address_solicit(command_msg, was_router):
command_msg.assertCoapMessageRequestUriPath('/a/as')
command_msg.assertCoapMessageContainsTlv(network_layer.MacExtendedAddress)
command_msg.assertCoapMessageContainsTlv(network_layer.Status)
if was_router:
command_msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
else:
command_msg.assertMleMessageDoesNotContainTlv(network_layer.Rloc16)
def check_address_release(command_msg, destination_node):
"""Verify the message is a properly formatted address release destined to the given node.
"""
command_msg.assertCoapMessageRequestUriPath('/a/ar')
command_msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
command_msg.assertCoapMessageContainsTlv(network_layer.MacExtendedAddress)
destination_rloc = destination_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert (ipv6.ip_address(destination_rloc) == command_msg.ipv6_packet.ipv6_header.destination_address
), "Error: The destination is not RLOC address"
def check_tlv_request_tlv(command_msg, check_type, tlv_id):
"""Verify if TLV Request TLV contains specified TLV ID
"""
tlv_request_tlv = command_msg.get_mle_message_tlv(mle.TlvRequest)
if check_type == CheckType.CONTAIN:
assert (tlv_request_tlv is not None), "Error: The msg doesn't contain TLV Request TLV"
assert any(
tlv_id == tlv
for tlv in tlv_request_tlv.tlvs), "Error: The msg doesn't contain TLV Request TLV ID: {}".format(tlv_id)
elif check_type == CheckType.NOT_CONTAIN:
if tlv_request_tlv is not None:
assert (any(tlv_id == tlv for tlv in tlv_request_tlv.tlvs) is
False), "Error: The msg contains TLV Request TLV ID: {}".format(tlv_id)
elif check_type == CheckType.OPTIONAL:
if tlv_request_tlv is not None:
if any(tlv_id == tlv for tlv in tlv_request_tlv.tlvs):
print("TLV Request TLV contains TLV ID: {}".format(tlv_id))
else:
print("TLV Request TLV doesn't contain TLV ID: {}".format(tlv_id))
else:
print("The msg doesn't contain TLV Request TLV")
else:
raise ValueError("Invalid check type")
def check_link_request(
command_msg,
source_address=CheckType.OPTIONAL,
leader_data=CheckType.OPTIONAL,
tlv_request_address16=CheckType.OPTIONAL,
tlv_request_route64=CheckType.OPTIONAL,
tlv_request_link_margin=CheckType.OPTIONAL,
):
"""Verify a properly formatted Link Request command message.
"""
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, source_address, mle.SourceAddress)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_tlv_request_tlv(command_msg, tlv_request_address16, mle.TlvType.ADDRESS16)
check_tlv_request_tlv(command_msg, tlv_request_route64, mle.TlvType.ROUTE64)
check_tlv_request_tlv(command_msg, tlv_request_link_margin, mle.TlvType.LINK_MARGIN)
def check_link_accept(
command_msg,
destination_node,
leader_data=CheckType.OPTIONAL,
link_margin=CheckType.OPTIONAL,
mle_frame_counter=CheckType.OPTIONAL,
challenge=CheckType.OPTIONAL,
address16=CheckType.OPTIONAL,
route64=CheckType.OPTIONAL,
tlv_request_link_margin=CheckType.OPTIONAL,
):
"""verify a properly formatted link accept command message.
"""
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, link_margin, mle.LinkMargin)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
check_mle_optional_tlv(command_msg, challenge, mle.Challenge)
check_mle_optional_tlv(command_msg, address16, mle.Address16)
check_mle_optional_tlv(command_msg, route64, mle.Route64)
check_tlv_request_tlv(command_msg, tlv_request_link_margin, mle.TlvType.LINK_MARGIN)
destination_link_local = destination_node.get_ip6_address(config.ADDRESS_TYPE.LINK_LOCAL)
assert (ipv6.ip_address(destination_link_local) == command_msg.ipv6_packet.ipv6_header.destination_address
), "Error: The destination is unexpected"
def check_icmp_path(sniffer, path, nodes, icmp_type=ipv6.ICMP_ECHO_REQUEST):
"""Verify icmp message is forwarded along the path.
"""
len_path = len(path)
# Verify icmp message is forwarded to the next node of the path.
for i in range(0, len_path):
node_msg = sniffer.get_messages_sent_by(path[i])
node_icmp_msg = node_msg.get_icmp_message(icmp_type)
if i < len_path - 1:
next_node = nodes[path[i + 1]]
next_node_rloc16 = next_node.get_addr16()
assert (next_node_rloc16 == node_icmp_msg.mac_header.dest_address.rloc), "Error: The path is unexpected."
else:
return True
return False
def check_id_set(command_msg, router_id):
"""Check the command_msg's Route64 tlv to verify router_id is an active router.
"""
tlv = command_msg.assertMleMessageContainsTlv(mle.Route64)
return (tlv.router_id_mask >> (63 - router_id)) & 1
def get_routing_cost(command_msg, router_id):
"""Check the command_msg's Route64 tlv to get the routing cost to router.
"""
tlv = command_msg.assertMleMessageContainsTlv(mle.Route64)
# Get router's mask pos
# Turn the number into binary string. Need to consider the preceding 0
# omitted during conversion.
router_id_mask_str = bin(tlv.router_id_mask).replace('0b', '')
prefix_len = 64 - len(router_id_mask_str)
routing_entry_pos = 0
for i in range(0, router_id - prefix_len):
if router_id_mask_str[i] == '1':
routing_entry_pos += 1
assert router_id_mask_str[router_id - prefix_len] == '1', \
(("Error: The router isn't in the topology. \n",
"route64 tlv is: %s. \nrouter_id is: %s. \nrouting_entry_pos is: %s. \nrouter_id_mask_str is: %s.") %
(tlv, router_id, routing_entry_pos, router_id_mask_str))
return tlv.link_quality_and_route_data[routing_entry_pos].route
def check_mle_optional_tlv(command_msg, type, tlv):
if type == CheckType.CONTAIN:
command_msg.assertMleMessageContainsTlv(tlv)
elif type == CheckType.NOT_CONTAIN:
command_msg.assertMleMessageDoesNotContainTlv(tlv)
elif type == CheckType.OPTIONAL:
command_msg.assertMleMessageContainsOptionalTlv(tlv)
else:
raise ValueError("Invalid check type")
def check_mle_advertisement(command_msg):
command_msg.assertSentWithHopLimit(255)
command_msg.assertSentToDestinationAddress(config.LINK_LOCAL_ALL_NODES_ADDRESS)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.Route64)
def check_parent_request(command_msg, is_first_request):
"""Verify a properly formatted Parent Request command message.
"""
if command_msg.mle.aux_sec_hdr.key_id_mode != 0x2:
raise ValueError("The Key Identifier Mode of the Security Control Field SHALL be set to 0x02")
command_msg.assertSentWithHopLimit(255)
command_msg.assertSentToDestinationAddress(config.LINK_LOCAL_ALL_ROUTERS_ADDRESS)
command_msg.assertMleMessageContainsTlv(mle.Mode)
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.Version)
scan_mask = command_msg.assertMleMessageContainsTlv(mle.ScanMask)
if not scan_mask.router:
raise ValueError("Parent request without R bit set")
if is_first_request:
if scan_mask.end_device:
raise ValueError("First parent request with E bit set")
elif not scan_mask.end_device:
raise ValueError("Second parent request without E bit set")
def check_parent_response(command_msg, mle_frame_counter=CheckType.OPTIONAL):
"""Verify a properly formatted Parent Response command message.
"""
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.Connectivity)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.LinkMargin)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
def check_child_id_request(
command_msg,
tlv_request=CheckType.OPTIONAL,
mle_frame_counter=CheckType.OPTIONAL,
address_registration=CheckType.OPTIONAL,
active_timestamp=CheckType.OPTIONAL,
pending_timestamp=CheckType.OPTIONAL,
route64=CheckType.OPTIONAL,
):
"""Verify a properly formatted Child Id Request command message.
"""
if command_msg.mle.aux_sec_hdr.key_id_mode != 0x2:
raise ValueError("The Key Identifier Mode of the Security Control Field SHALL be set to 0x02")
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.Mode)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.Timeout)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, tlv_request, mle.TlvRequest)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
check_mle_optional_tlv(command_msg, pending_timestamp, mle.PendingTimestamp)
check_mle_optional_tlv(command_msg, route64, mle.Route64)
check_tlv_request_tlv(command_msg, CheckType.CONTAIN, mle.TlvType.ADDRESS16)
check_tlv_request_tlv(command_msg, CheckType.CONTAIN, mle.TlvType.NETWORK_DATA)
def check_child_id_response(
command_msg,
route64=CheckType.OPTIONAL,
network_data=CheckType.OPTIONAL,
address_registration=CheckType.OPTIONAL,
active_timestamp=CheckType.OPTIONAL,
pending_timestamp=CheckType.OPTIONAL,
active_operational_dataset=CheckType.OPTIONAL,
pending_operational_dataset=CheckType.OPTIONAL,
network_data_check=None,
):
"""Verify a properly formatted Child Id Response command message.
"""
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.Address16)
check_mle_optional_tlv(command_msg, route64, mle.Route64)
check_mle_optional_tlv(command_msg, network_data, mle.NetworkData)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
check_mle_optional_tlv(command_msg, pending_timestamp, mle.PendingTimestamp)
check_mle_optional_tlv(command_msg, active_operational_dataset, mle.ActiveOperationalDataset)
check_mle_optional_tlv(command_msg, pending_operational_dataset, mle.PendingOperationalDataset)
if network_data_check is not None:
network_data_tlv = command_msg.assertMleMessageContainsTlv(mle.NetworkData)
network_data_check.check(network_data_tlv)
def check_prefix(prefix):
"""Verify if a prefix contains 6loWPAN sub-TLV and border router sub-TLV
"""
assert contains_tlv(prefix.sub_tlvs, network_data.BorderRouter), 'Prefix doesn\'t contain a border router sub-TLV!'
assert contains_tlv(prefix.sub_tlvs, network_data.LowpanId), 'Prefix doesn\'t contain a LowpanId sub-TLV!'
def check_child_update_request_from_child(
command_msg,
source_address=CheckType.OPTIONAL,
leader_data=CheckType.OPTIONAL,
challenge=CheckType.OPTIONAL,
time_out=CheckType.OPTIONAL,
address_registration=CheckType.OPTIONAL,
tlv_request_tlv=CheckType.OPTIONAL,
active_timestamp=CheckType.OPTIONAL,
CIDs=(),
):
command_msg.assertMleMessageContainsTlv(mle.Mode)
check_mle_optional_tlv(command_msg, source_address, mle.SourceAddress)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, challenge, mle.Challenge)
check_mle_optional_tlv(command_msg, time_out, mle.Timeout)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, tlv_request_tlv, mle.TlvRequest)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
if (address_registration == CheckType.CONTAIN) and len(CIDs) > 0:
_check_address_registration(command_msg, CIDs)
def check_coap_optional_tlv(coap_msg, type, tlv):
if type == CheckType.CONTAIN:
coap_msg.assertCoapMessageContainsTlv(tlv)
elif type == CheckType.NOT_CONTAIN:
coap_msg.assertCoapMessageDoesNotContainTlv(tlv)
elif type == CheckType.OPTIONAL:
coap_msg.assertCoapMessageContainsOptionalTlv(tlv)
else:
raise ValueError("Invalid check type")
def check_router_id_cached(node, router_id, cached=True):
"""Verify if the node has cached any entries based on the router ID
"""
eidcaches = node.get_eidcaches()
if cached:
assert any(router_id == (int(rloc, 16) >> 10) for (_, rloc) in eidcaches)
else:
assert (any(router_id == (int(rloc, 16) >> 10) for (_, rloc) in eidcaches) is False)
def contains_tlv(sub_tlvs, tlv_type):
"""Verify if a specific type of tlv is included in a sub-tlv list.
"""
return any(isinstance(sub_tlv, tlv_type) for sub_tlv in sub_tlvs)
def contains_tlvs(sub_tlvs, tlv_types):
"""Verify if all types of tlv in a list are included in a sub-tlv list.
"""
return all((any(isinstance(sub_tlv, tlv_type) for sub_tlv in sub_tlvs)) for tlv_type in tlv_types)
def check_secure_mle_key_id_mode(command_msg, key_id_mode):
"""Verify if the mle command message sets the right key id mode.
"""
assert isinstance(command_msg.mle, mle.MleMessageSecured)
assert command_msg.mle.aux_sec_hdr.key_id_mode == key_id_mode
def check_data_response(command_msg, network_data_check=None, active_timestamp=CheckType.OPTIONAL):
"""Verify a properly formatted Data Response command message.
"""
check_secure_mle_key_id_mode(command_msg, 0x02)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
if network_data_check is not None:
network_data_tlv = command_msg.assertMleMessageContainsTlv(mle.NetworkData)
network_data_check.check(network_data_tlv)
def check_child_update_request_from_parent(
command_msg,
leader_data=CheckType.OPTIONAL,
network_data=CheckType.OPTIONAL,
challenge=CheckType.OPTIONAL,
tlv_request=CheckType.OPTIONAL,
active_timestamp=CheckType.OPTIONAL,
):
"""Verify a properly formatted Child Update Request(from parent) command message.
"""
check_secure_mle_key_id_mode(command_msg, 0x02)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, network_data, mle.NetworkData)
check_mle_optional_tlv(command_msg, challenge, mle.Challenge)
check_mle_optional_tlv(command_msg, tlv_request, mle.TlvRequest)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
def check_child_update_response(
command_msg,
timeout=CheckType.OPTIONAL,
address_registration=CheckType.OPTIONAL,
address16=CheckType.OPTIONAL,
leader_data=CheckType.OPTIONAL,
network_data=CheckType.OPTIONAL,
response=CheckType.OPTIONAL,
link_layer_frame_counter=CheckType.OPTIONAL,
mle_frame_counter=CheckType.OPTIONAL,
CIDs=(),
):
"""Verify a properly formatted Child Update Response from parent
"""
check_secure_mle_key_id_mode(command_msg, 0x02)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.Mode)
check_mle_optional_tlv(command_msg, timeout, mle.Timeout)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, address16, mle.Address16)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, network_data, mle.NetworkData)
check_mle_optional_tlv(command_msg, response, mle.Response)
check_mle_optional_tlv(command_msg, link_layer_frame_counter, mle.LinkLayerFrameCounter)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
if (address_registration == CheckType.CONTAIN) and len(CIDs) > 0:
_check_address_registration(command_msg, CIDs)
def _check_address_registration(command_msg, CIDs=()):
addresses = command_msg.assertMleMessageContainsTlv(mle.AddressRegistration).addresses
for cid in CIDs:
found = False
for address in addresses:
if isinstance(address, mle.AddressCompressed):
if cid == address.cid:
found = True
break
assert found, "AddressRegistration TLV doesn't have CID {} ".format(cid)
def get_sub_tlv(tlvs, tlv_type):
for sub_tlv in tlvs:
if isinstance(sub_tlv, tlv_type):
return sub_tlv
def check_address_registration_tlv(
command_msg,
full_address,
):
"""Check whether or not a full IPv6 address in AddressRegistrationTlv.
"""
found = False
addr = ipaddress.ip_address(full_address)
addresses = command_msg.assertMleMessageContainsTlv(mle.AddressRegistration).addresses
for item in addresses:
if isinstance(item, mle.AddressFull) and ipaddress.ip_address(item.ipv6_address) == addr:
found = True
break
return found
def check_compressed_address_registration_tlv(command_msg, cid, iid, cid_present_once=False):
'''Check whether or not a compressed IPv6 address in AddressRegistrationTlv.
note: only compare the iid part of the address.
Args:
command_msg (MleMessage) : The Mle message to check.
cid (int): The context id of the domain prefix.
iid (string): The Interface Identifier.
cid_present_once(boolean): True if cid entry should apprear only once in AR Tlv.
False otherwise.
'''
found = False
cid_cnt = 0
addresses = command_msg.assertMleMessageContainsTlv(mle.AddressRegistration).addresses
for item in addresses:
if isinstance(item, mle.AddressCompressed):
if cid == item.cid:
cid_cnt = cid_cnt + 1
if iid == item.iid.hex():
found = True
break
assert found, 'Error: Expected (cid, iid):({},{}) Not Found'.format(cid, iid)
assert cid_present_once == (cid_cnt == 1), 'Error: Expected cid present {} but present {}'.format(
'once' if cid_present_once else '', cid_cnt)
def assert_contains_tlv(tlvs, check_type, tlv_type):
"""Assert a tlv list contains specific tlv and return the first qualified.
"""
tlvs = [tlv for tlv in tlvs if isinstance(tlv, tlv_type)]
if check_type is CheckType.CONTAIN:
assert tlvs
return tlvs[0]
elif check_type is CheckType.NOT_CONTAIN:
assert not tlvs
return None
elif check_type is CheckType.OPTIONAL:
return None
else:
raise ValueError("Invalid check type: {}".format(check_type))
def check_discovery_request(command_msg, thread_version: str = None):
"""Verify a properly formatted Thread Discovery Request command message.
"""
assert not isinstance(command_msg.mle, mle.MleMessageSecured)
tlvs = command_msg.assertMleMessageContainsTlv(mle.ThreadDiscovery).tlvs
request = assert_contains_tlv(tlvs, CheckType.CONTAIN, mesh_cop.DiscoveryRequest)
assert not thread_version or thread_version in ['1.1', '1.2']
if thread_version == '1.1':
assert request.version == config.THREAD_VERSION_1_1
elif thread_version == '1.2':
assert request.version == config.THREAD_VERSION_1_2
def check_discovery_response(command_msg,
request_src_addr,
steering_data=CheckType.OPTIONAL,
thread_version: str = None):
"""Verify a properly formatted Thread Discovery Response command message.
"""
assert not isinstance(command_msg.mle, mle.MleMessageSecured)
assert (command_msg.mac_header.src_address.type == common.MacAddressType.LONG)
assert command_msg.mac_header.dest_address == request_src_addr
tlvs = command_msg.assertMleMessageContainsTlv(mle.ThreadDiscovery).tlvs
response = assert_contains_tlv(tlvs, CheckType.CONTAIN, mesh_cop.DiscoveryResponse)
assert not thread_version or thread_version in ['1.1', '1.2']
if thread_version == '1.1':
assert response.version == config.THREAD_VERSION_1_1
elif thread_version == '1.2':
assert response.version == config.THREAD_VERSION_1_2
assert_contains_tlv(tlvs, CheckType.CONTAIN, mesh_cop.ExtendedPanid)
assert_contains_tlv(tlvs, CheckType.CONTAIN, mesh_cop.NetworkName)
assert_contains_tlv(tlvs, steering_data, mesh_cop.SteeringData)
assert_contains_tlv(tlvs, steering_data, mesh_cop.JoinerUdpPort)
check_type = (CheckType.CONTAIN if response.native_flag else CheckType.OPTIONAL)
assert_contains_tlv(tlvs, check_type, mesh_cop.CommissionerUdpPort)
def get_joiner_udp_port_in_discovery_response(command_msg):
"""Get the udp port specified in a DISCOVERY RESPONSE message
"""
tlvs = command_msg.assertMleMessageContainsTlv(mle.ThreadDiscovery).tlvs
udp_port_tlv = assert_contains_tlv(tlvs, CheckType.CONTAIN, mesh_cop.JoinerUdpPort)
return udp_port_tlv.udp_port
def check_joiner_commissioning_messages(commissioning_messages, url=''):
"""Verify COAP messages sent by joiner while commissioning process.
"""
print(commissioning_messages)
assert len(commissioning_messages) >= 4
join_fin_req = commissioning_messages[0]
assert join_fin_req.type == mesh_cop.MeshCopMessageType.JOIN_FIN_REQ
if url:
provisioning_url = assert_contains_tlv(join_fin_req.tlvs, CheckType.CONTAIN, mesh_cop.ProvisioningUrl)
assert url == provisioning_url.url
else:
assert_contains_tlv(join_fin_req.tlvs, CheckType.NOT_CONTAIN, mesh_cop.ProvisioningUrl)
join_ent_rsp = commissioning_messages[3]
assert join_ent_rsp.type == mesh_cop.MeshCopMessageType.JOIN_ENT_RSP
def check_commissioner_commissioning_messages(commissioning_messages, state=mesh_cop.MeshCopState.ACCEPT):
"""Verify COAP messages sent by commissioner while commissioning process.
"""
assert len(commissioning_messages) >= 2
join_fin_rsq = commissioning_messages[1]
assert join_fin_rsq.type == mesh_cop.MeshCopMessageType.JOIN_FIN_RSP
rsq_state = assert_contains_tlv(join_fin_rsq.tlvs, CheckType.CONTAIN, mesh_cop.State)
assert rsq_state.state == state
def check_joiner_router_commissioning_messages(commissioning_messages):
"""Verify COAP messages sent by joiner router while commissioning process.
"""
if len(commissioning_messages) >= 4:
join_ent_ntf = commissioning_messages[2]
else:
join_ent_ntf = commissioning_messages[0]
assert join_ent_ntf.type == mesh_cop.MeshCopMessageType.JOIN_ENT_NTF
return None
def check_payload_same(tp1, tp2):
"""Verfiy two payloads are totally the same.
A payload is a tuple of tlvs.
"""
assert len(tp1) == len(tp2)
for tlv in tp2:
peer_tlv = get_sub_tlv(tp1, type(tlv))
assert (peer_tlv is not None and
peer_tlv == tlv), 'peer_tlv:{}, tlv:{} type:{}'.format(peer_tlv, tlv, type(tlv))
def check_coap_message(msg, payloads, dest_addrs=None):
if dest_addrs is not None:
found = False
for dest in dest_addrs:
if msg.ipv6_packet.ipv6_header.destination_address == dest:
found = True
break
assert found, 'Destination address incorrect'
check_payload_same(msg.coap.payload, payloads)
class SinglePrefixCheck:
def __init__(self, prefix=None, border_router_16=None):
self._prefix = prefix
self._border_router_16 = border_router_16
def check(self, prefix_tlv):
border_router_tlv = assert_contains_tlv(prefix_tlv.sub_tlvs, CheckType.CONTAIN, network_data.BorderRouter)
assert_contains_tlv(prefix_tlv.sub_tlvs, CheckType.CONTAIN, network_data.LowpanId)
result = True
if self._prefix is not None:
result &= self._prefix == binascii.hexlify(prefix_tlv.prefix)
if self._border_router_16 is not None:
result &= (self._border_router_16 == border_router_tlv.border_router_16)
return result
class PrefixesCheck:
def __init__(self, prefix_cnt=0, prefix_check_list=()):
self._prefix_cnt = prefix_cnt
self._prefix_check_list = prefix_check_list
def check(self, prefix_tlvs):
# if prefix_cnt is given, then check count only
if self._prefix_cnt > 0:
assert (len(prefix_tlvs) >= self._prefix_cnt), 'prefix count is less than expected'
else:
for prefix_check in self._prefix_check_list:
found = False
for prefix_tlv in prefix_tlvs:
if prefix_check.check(prefix_tlv):
found = True
break
assert found, 'Some prefix is absent: {}'.format(prefix_check)
class CommissioningDataCheck:
def __init__(self, stable=None, sub_tlv_type_list=()):
self._stable = stable
self._sub_tlv_type_list = sub_tlv_type_list
def check(self, commissioning_data_tlv):
if self._stable is not None:
assert (self._stable == commissioning_data_tlv.stable), 'Commissioning Data stable flag is not correct'
assert contains_tlvs(commissioning_data_tlv.sub_tlvs,
self._sub_tlv_type_list), 'Some sub tlvs are missing in Commissioning Data'
class NetworkDataCheck:
def __init__(self, prefixes_check=None, commissioning_data_check=None):
self._prefixes_check = prefixes_check
self._commissioning_data_check = commissioning_data_check
def check(self, network_data_tlv):
if self._prefixes_check is not None:
prefix_tlvs = [tlv for tlv in network_data_tlv.tlvs if isinstance(tlv, network_data.Prefix)]
self._prefixes_check.check(prefix_tlvs)
if self._commissioning_data_check is not None:
commissioning_data_tlv = assert_contains_tlv(
network_data_tlv.tlvs,
CheckType.CONTAIN,
network_data.CommissioningData,
)
self._commissioning_data_check.check(commissioning_data_tlv)
|
|
# """EasyEngine site controller."""
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.variables import EEVariables
from ee.core.domainvalidate import ValidateDomain
from ee.core.fileutils import EEFileUtils
from ee.cli.plugins.site_functions import *
from ee.core.services import EEService
from ee.cli.plugins.sitedb import *
from ee.core.git import EEGit
from subprocess import Popen
from ee.core.nginxhashbucket import hashbucket
import sys
import os
import glob
import subprocess
def ee_site_hook(app):
# do something with the ``app`` object here.
from ee.core.database import init_db
import ee.cli.plugins.models
init_db(app)
class EESiteController(CementBaseController):
class Meta:
label = 'site'
stacked_on = 'base'
stacked_type = 'nested'
description = ('Performs website specific operations')
arguments = [
(['site_name'],
dict(help='Website name', nargs='?')),
]
usage = "ee site (command) <site_name> [options]"
@expose(hide=True)
def default(self):
self.app.args.print_help()
@expose(help="Enable site example.com")
def enable(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'could not input site name')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
# validate domain name
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
# check if site exists
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
Log.info(self, "Enable domain {0:10} \t".format(ee_domain), end='')
EEFileUtils.create_symlink(self,
['/etc/nginx/sites-available/{0}'
.format(ee_domain),
'/etc/nginx/sites-enabled/{0}'
.format(ee_domain)])
EEGit.add(self, ["/etc/nginx"],
msg="Enabled {0} "
.format(ee_domain))
updateSiteInfo(self, ee_domain, enabled=True)
Log.info(self, "[" + Log.ENDC + "OK" + Log.OKBLUE + "]")
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
else:
Log.error(self, "nginx configuration file does not exist"
.format(ee_domain))
@expose(help="Disable site example.com")
def disable(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'could not input site name')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
# check if site exists
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
Log.info(self, "Disable domain {0:10} \t"
.format(ee_domain), end='')
if not os.path.isfile('/etc/nginx/sites-enabled/{0}'
.format(ee_domain)):
Log.debug(self, "Site {0} already disabled".format(ee_domain))
Log.info(self, "[" + Log.FAIL + "Failed" + Log.OKBLUE+"]")
else:
EEFileUtils.remove_symlink(self,
'/etc/nginx/sites-enabled/{0}'
.format(ee_domain))
EEGit.add(self, ["/etc/nginx"],
msg="Disabled {0} "
.format(ee_domain))
updateSiteInfo(self, ee_domain, enabled=False)
Log.info(self, "[" + Log.ENDC + "OK" + Log.OKBLUE + "]")
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
else:
Log.error(self, "nginx configuration file does not exist"
.format(ee_domain))
@expose(help="Get example.com information")
def info(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'could not input site name')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
ee_db_name = ''
ee_db_user = ''
ee_db_pass = ''
hhvm = ''
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
siteinfo = getSiteInfo(self, ee_domain)
sitetype = siteinfo.site_type
cachetype = siteinfo.cache_type
ee_site_webroot = siteinfo.site_path
access_log = (ee_site_webroot + '/logs/access.log')
error_log = (ee_site_webroot + '/logs/error.log')
ee_db_name = siteinfo.db_name
ee_db_user = siteinfo.db_user
ee_db_pass = siteinfo.db_password
ee_db_host = siteinfo.db_host
if sitetype != "html":
hhvm = ("enabled" if siteinfo.is_hhvm else "disabled")
if sitetype == "proxy":
access_log = "/var/log/nginx/{0}.access.log".format(ee_domain)
error_log = "/var/log/nginx/{0}.error.log".format(ee_domain)
ee_site_webroot = ''
pagespeed = ("enabled" if siteinfo.is_pagespeed else "disabled")
data = dict(domain=ee_domain, webroot=ee_site_webroot,
accesslog=access_log, errorlog=error_log,
dbname=ee_db_name, dbuser=ee_db_user,
dbpass=ee_db_pass, hhvm=hhvm, pagespeed=pagespeed,
type=sitetype + " " + cachetype + " ({0})"
.format("enabled" if siteinfo.is_enabled else
"disabled"))
self.app.render((data), 'siteinfo.mustache')
else:
Log.error(self, "nginx configuration file does not exist"
.format(ee_domain))
@expose(help="Monitor example.com logs")
def log(self):
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
ee_site_webroot = getSiteInfo(self, ee_domain).site_path
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
logfiles = glob.glob(ee_site_webroot + '/logs/*.log')
if logfiles:
logwatch(self, logfiles)
@expose(help="Display Nginx configuration of example.com")
def show(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'could not input site name')
# TODO Write code for ee site edit command here
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
Log.info(self, "Display NGINX configuration for {0}"
.format(ee_domain))
f = open('/etc/nginx/sites-available/{0}'.format(ee_domain),
encoding='utf-8', mode='r')
text = f.read()
Log.info(self, Log.ENDC + text)
f.close()
else:
Log.error(self, "nginx configuration file does not exists"
.format(ee_domain))
@expose(help="Change directory to site webroot")
def cd(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'Unable to read input, please try again')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
ee_site_webroot = getSiteInfo(self, ee_domain).site_path
EEFileUtils.chdir(self, ee_site_webroot)
try:
subprocess.call(['bash'])
except OSError as e:
Log.debug(self, "{0}{1}".format(e.errno, e.strerror))
Log.error(self, "unable to change directory")
class EESiteEditController(CementBaseController):
class Meta:
label = 'edit'
stacked_on = 'site'
stacked_type = 'nested'
description = ('Edit Nginx configuration of site')
arguments = [
(['site_name'],
dict(help='domain name for the site',
nargs='?')),
(['--pagespeed'],
dict(help="edit pagespeed configuration for site",
action='store_true')),
]
@expose(hide=True)
def default(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'Unable to read input, Please try again')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
ee_site_webroot = EEVariables.ee_webroot + ee_domain
if not self.app.pargs.pagespeed:
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
try:
EEShellExec.invoke_editor(self, '/etc/nginx/sites-availa'
'ble/{0}'.format(ee_domain))
except CommandExecutionError as e:
Log.error(self, "Failed invoke editor")
if (EEGit.checkfilestatus(self, "/etc/nginx",
'/etc/nginx/sites-available/{0}'.format(ee_domain))):
EEGit.add(self, ["/etc/nginx"], msg="Edit website: {0}"
.format(ee_domain))
# Reload NGINX
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
else:
Log.error(self, "nginx configuration file does not exists"
.format(ee_domain))
elif self.app.pargs.pagespeed:
if os.path.isfile('{0}/conf/nginx/pagespeed.conf'
.format(ee_site_webroot)):
try:
EEShellExec.invoke_editor(self, '{0}/conf/nginx/'
'pagespeed.conf'
.format(ee_site_webroot))
except CommandExecutionError as e:
Log.error(self, "Failed invoke editor")
if (EEGit.checkfilestatus(self, "{0}/conf/nginx"
.format(ee_site_webroot),
'{0}/conf/nginx/pagespeed.conf'.format(ee_site_webroot))):
EEGit.add(self, ["{0}/conf/nginx".format(ee_site_webroot)],
msg="Edit Pagespped config of site: {0}"
.format(ee_domain))
# Reload NGINX
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
else:
Log.error(self, "Pagespeed configuration file does not exists"
.format(ee_domain))
class EESiteCreateController(CementBaseController):
class Meta:
label = 'create'
stacked_on = 'site'
stacked_type = 'nested'
description = ('this commands set up configuration and installs '
'required files as options are provided')
arguments = [
(['site_name'],
dict(help='domain name for the site to be created.',
nargs='?')),
(['--html'],
dict(help="create html site", action='store_true')),
(['--php'],
dict(help="create php site", action='store_true')),
(['--mysql'],
dict(help="create mysql site", action='store_true')),
(['--wp'],
dict(help="create wordpress single site",
action='store_true')),
(['--wpsubdir'],
dict(help="create wordpress multisite with subdirectory setup",
action='store_true')),
(['--wpsubdomain'],
dict(help="create wordpress multisite with subdomain setup",
action='store_true')),
(['--w3tc'],
dict(help="create wordpress single/multi site with w3tc cache",
action='store_true')),
(['--wpfc'],
dict(help="create wordpress single/multi site with wpfc cache",
action='store_true')),
(['--wpsc'],
dict(help="create wordpress single/multi site with wpsc cache",
action='store_true')),
(['--hhvm'],
dict(help="create HHVM site", action='store_true')),
(['--pagespeed'],
dict(help="create pagespeed site", action='store_true')),
(['--user'],
dict(help="provide user for wordpress site")),
(['--email'],
dict(help="provide email address for wordpress site")),
(['--pass'],
dict(help="provide password for wordpress user",
dest='wppass')),
(['--proxy'],
dict(help="create proxy for site", nargs='+')),
(['--experimental'],
dict(help="Enable Experimenal packages without prompt",
action='store_true')),
]
@expose(hide=True)
def default(self):
# self.app.render((data), 'default.mustache')
# Check domain name validation
data = dict()
host, port = None, None
try:
stype, cache = detSitePar(vars(self.app.pargs))
except RuntimeError as e:
Log.debug(self, str(e))
Log.error(self, "Please provide valid options to creating site")
if stype is None and self.app.pargs.proxy:
stype, cache = 'proxy', ''
proxyinfo = self.app.pargs.proxy[0].strip()
if not proxyinfo:
Log.error(self, "Please provide proxy server host information")
proxyinfo = proxyinfo.split(':')
host = proxyinfo[0].strip()
port = '80' if len(proxyinfo) < 2 else proxyinfo[1].strip()
elif stype is None and not self.app.pargs.proxy:
stype, cache = 'html', 'basic'
elif stype and self.app.pargs.proxy:
Log.error(self, "proxy should not be used with other site types")
if (self.app.pargs.proxy and (self.app.pargs.pagespeed
or self.app.pargs.hhvm)):
Log.error(self, "Proxy site can not run on pagespeed or hhvm")
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
# preprocessing before finalize site name
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.debug(self, str(e))
Log.error(self, "Unable to input site name, Please try again!")
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
if not ee_domain.strip():
Log.error("Invalid domain name, "
"Provide valid domain name")
ee_site_webroot = EEVariables.ee_webroot + ee_domain
if check_domain_exists(self, ee_domain):
Log.error(self, "site {0} already exists".format(ee_domain))
elif os.path.isfile('/etc/nginx/sites-available/{0}'
.format(ee_domain)):
Log.error(self, "Nginx configuration /etc/nginx/sites-available/"
"{0} already exists".format(ee_domain))
if stype == 'proxy':
data['site_name'] = ee_domain
data['www_domain'] = ee_www_domain
data['proxy'] = True
data['host'] = host
data['port'] = port
ee_site_webroot = ""
if stype in ['html', 'php']:
data = dict(site_name=ee_domain, www_domain=ee_www_domain,
static=True, basic=False, wp=False, w3tc=False,
wpfc=False, wpsc=False, multisite=False,
wpsubdir=False, webroot=ee_site_webroot)
if stype == 'php':
data['static'] = False
data['basic'] = True
elif stype in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
data = dict(site_name=ee_domain, www_domain=ee_www_domain,
static=False, basic=True, wp=False, w3tc=False,
wpfc=False, wpsc=False, multisite=False,
wpsubdir=False, webroot=ee_site_webroot,
ee_db_name='', ee_db_user='', ee_db_pass='',
ee_db_host='')
if stype in ['wp', 'wpsubdir', 'wpsubdomain']:
data['wp'] = True
data['basic'] = False
data[cache] = True
data['wp-user'] = self.app.pargs.user
data['wp-email'] = self.app.pargs.email
data['wp-pass'] = self.app.pargs.wppass
if stype in ['wpsubdir', 'wpsubdomain']:
data['multisite'] = True
if stype == 'wpsubdir':
data['wpsubdir'] = True
else:
pass
if stype == "html" and self.app.pargs.hhvm:
Log.error(self, "Can not create HTML site with HHVM")
if data and self.app.pargs.hhvm:
if (not self.app.pargs.experimental):
Log.info(self, "HHVM is experimental feature and it may not"
"work with all plugins of your site.\nYou can "
"disable it by passing --hhvm=off later.\nDo you wish"
" to enable HHVM now for {0}?".format(ee_domain))
# Check prompt
check_prompt = input("Type \"y\" to continue [n]:")
if check_prompt != "Y" and check_prompt != "y":
Log.info(self, "Not using HHVM for site.")
data['hhvm'] = False
hhvm = 0
self.app.pargs.hhvm = False
else:
data['hhvm'] = True
hhvm = 1
else:
data['hhvm'] = True
hhvm = 1
elif data:
data['hhvm'] = False
hhvm = 0
if data and self.app.pargs.pagespeed:
if (not self.app.pargs.experimental):
Log.info(self, "PageSpeed is experimental feature and it may not"
"work with all CSS/JS/Cache of your site.\nYou can "
"disable it by passing --pagespeed=off later.\nDo you wish"
" to enable PageSpeed now for {0}?".format(ee_domain))
# Check prompt
check_prompt = input("Type \"y\" to continue [n]:")
if check_prompt != "Y" and check_prompt != "y":
Log.info(self, "Not using PageSpeed for site.")
data['pagespeed'] = False
pagespeed = 0
self.app.pargs.pagespeed = False
else:
data['pagespeed'] = True
pagespeed = 1
else:
data['pagespeed'] = True
pagespeed = 1
elif data:
data['pagespeed'] = False
pagespeed = 0
# self.app.args.print_help()
# if not data:
# self.app.close(1)
# Check rerequired packages are installed or not
ee_auth = site_package_check(self, stype)
try:
pre_run_checks(self)
except SiteError as e:
Log.debug(self, str(e))
Log.error(self, "NGINX configuration check failed.")
try:
try:
# setup NGINX configuration, and webroot
setupdomain(self, data)
# Fix Nginx Hashbucket size error
hashbucket(self)
except SiteError as e:
# call cleanup actions on failure
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'])
Log.debug(self, str(e))
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
if 'proxy' in data.keys() and data['proxy']:
addNewSite(self, ee_domain, stype, cache, ee_site_webroot)
# Service Nginx Reload
if not EEService.reload_service(self, 'nginx'):
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain)
Log.debug(self, str(e))
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
if ee_auth and len(ee_auth):
for msg in ee_auth:
Log.info(self, Log.ENDC + msg, log=False)
Log.info(self, "Successfully created site"
" http://{0}".format(ee_domain))
return
# Update pagespeed config
if self.app.pargs.pagespeed:
operateOnPagespeed(self, data)
addNewSite(self, ee_domain, stype, cache, ee_site_webroot,
hhvm=hhvm, pagespeed=pagespeed)
# Setup database for MySQL site
if 'ee_db_name' in data.keys() and not data['wp']:
try:
data = setupdatabase(self, data)
# Add database information for site into database
updateSiteInfo(self, ee_domain, db_name=data['ee_db_name'],
db_user=data['ee_db_user'],
db_password=data['ee_db_pass'],
db_host=data['ee_db_host'])
except SiteError as e:
# call cleanup actions on failure
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'],
dbname=data['ee_db_name'],
dbuser=data['ee_db_user'],
dbhost=data['ee_db_host'])
deleteSiteInfo(self, ee_domain)
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
try:
eedbconfig = open("{0}/ee-config.php"
.format(ee_site_webroot),
encoding='utf-8', mode='w')
eedbconfig.write("<?php \ndefine('DB_NAME', '{0}');"
"\ndefine('DB_USER', '{1}'); "
"\ndefine('DB_PASSWORD', '{2}');"
"\ndefine('DB_HOST', '{3}');\n?>"
.format(data['ee_db_name'],
data['ee_db_user'],
data['ee_db_pass'],
data['ee_db_host']))
eedbconfig.close()
stype = 'mysql'
except IOError as e:
Log.debug(self, str(e))
Log.debug(self, "Error occured while generating "
"ee-config.php")
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'],
dbname=data['ee_db_name'],
dbuser=data['ee_db_user'],
dbhost=data['ee_db_host'])
deleteSiteInfo(self, ee_domain)
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
# Setup WordPress if Wordpress site
if data['wp']:
try:
ee_wp_creds = setupwordpress(self, data)
# Add database information for site into database
updateSiteInfo(self, ee_domain, db_name=data['ee_db_name'],
db_user=data['ee_db_user'],
db_password=data['ee_db_pass'],
db_host=data['ee_db_host'])
except SiteError as e:
# call cleanup actions on failure
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'],
dbname=data['ee_db_name'],
dbuser=data['ee_db_user'],
dbhost=data['ee_db_host'])
deleteSiteInfo(self, ee_domain)
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
# Service Nginx Reload call cleanup if failed to reload nginx
if not EEService.reload_service(self, 'nginx'):
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'])
if 'ee_db_name' in data.keys():
doCleanupAction(self, domain=ee_domain,
dbname=data['ee_db_name'],
dbuser=data['ee_db_user'],
dbhost=data['ee_db_host'])
deleteSiteInfo(self, ee_domain)
Log.info(self, Log.FAIL + "service nginx reload failed."
" check issues with `nginx -t` command.")
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
EEGit.add(self, ["/etc/nginx"],
msg="{0} created with {1} {2}"
.format(ee_www_domain, stype, cache))
# Setup Permissions for webroot
try:
setwebrootpermissions(self, data['webroot'])
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Oops Something went wrong !!")
Log.info(self, Log.FAIL + "Calling cleanup actions ...")
doCleanupAction(self, domain=ee_domain,
webroot=data['webroot'])
if 'ee_db_name' in data.keys():
print("Inside db cleanup")
doCleanupAction(self, domain=ee_domain,
dbname=data['ee_db_name'],
dbuser=data['ee_db_user'],
dbhost=data['ee_db_host'])
deleteSiteInfo(self, ee_domain)
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
if ee_auth and len(ee_auth):
for msg in ee_auth:
Log.info(self, Log.ENDC + msg, log=False)
if data['wp']:
Log.info(self, Log.ENDC + "WordPress admin user :"
" {0}".format(ee_wp_creds['wp_user']), log=False)
Log.info(self, Log.ENDC + "WordPress admin user password : {0}"
.format(ee_wp_creds['wp_pass']), log=False)
display_cache_settings(self, data)
Log.info(self, "Successfully created site"
" http://{0}".format(ee_domain))
except SiteError as e:
Log.error(self, "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
class EESiteUpdateController(CementBaseController):
class Meta:
label = 'update'
stacked_on = 'site'
stacked_type = 'nested'
description = ('This command updates websites configuration to '
'another as per the options are provided')
arguments = [
(['site_name'],
dict(help='domain name for the site to be updated',
nargs='?')),
(['--password'],
dict(help="update to password for wordpress site user",
action='store_true')),
(['--html'],
dict(help="update to html site", action='store_true')),
(['--php'],
dict(help="update to php site", action='store_true')),
(['--mysql'],
dict(help="update to mysql site", action='store_true')),
(['--wp'],
dict(help="update to wordpress single site",
action='store_true')),
(['--wpsubdir'],
dict(help="update to wpsubdir site", action='store_true')),
(['--wpsubdomain'],
dict(help="update to wpsubdomain site", action='store_true')),
(['--w3tc'],
dict(help="update to w3tc cache", action='store_true')),
(['--wpfc'],
dict(help="update to wpfc cache", action='store_true')),
(['--wpsc'],
dict(help="update to wpsc cache", action='store_true')),
(['--hhvm'],
dict(help='Use HHVM for site',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--pagespeed'],
dict(help='Use PageSpeed for site',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--proxy'],
dict(help="update to proxy site", nargs='+')),
(['--experimental'],
dict(help="Enable Experimenal packages without prompt",
action='store_true')),
(['--all'],
dict(help="update all sites", action='store_true')),
]
@expose(help="Update site type or cache")
def default(self):
pargs = self.app.pargs
if pargs.all:
if pargs.site_name:
Log.error(self, "`--all` option cannot be used with site name"
" provided")
if pargs.html:
Log.error(self, "No site can be updated to html")
if not (pargs.php or
pargs.mysql or pargs.wp or pargs.wpsubdir or
pargs.wpsubdomain or pargs.w3tc or pargs.wpfc or
pargs.wpsc or pargs.hhvm or pargs.pagespeed):
Log.error(self, "Please provide options to update sites.")
if pargs.all:
sites = getAllsites(self)
if not sites:
pass
else:
for site in sites:
pargs.site_name = site.sitename
Log.info(self, Log.ENDC + Log.BOLD + "Updating site {0},"
" please wait..."
.format(pargs.site_name))
self.doupdatesite(pargs)
print("\n")
else:
self.doupdatesite(pargs)
def doupdatesite(self, pargs):
hhvm = None
pagespeed = None
data = dict()
try:
stype, cache = detSitePar(vars(pargs))
except RuntimeError as e:
Log.debug(self, str(e))
Log.error(self, "Please provide valid options combination for"
" site update")
if stype is None and pargs.proxy:
stype, cache = 'proxy', ''
proxyinfo = pargs.proxy[0].strip()
if not proxyinfo:
Log.error(self, "Please provide proxy server host information")
proxyinfo = proxyinfo.split(':')
host = proxyinfo[0].strip()
port = '80' if len(proxyinfo) < 2 else proxyinfo[1].strip()
elif stype is None and not pargs.proxy:
stype, cache = 'html', 'basic'
elif stype and pargs.proxy:
Log.error(self, "--proxy can not be used with other site types")
if (pargs.proxy and (pargs.pagespeed or pargs.hhvm)):
Log.error(self, "Proxy site can not run on pagespeed or hhvm")
if not pargs.site_name:
try:
while not pargs.site_name:
pargs.site_name = (input('Enter site name : ').strip())
except IOError as e:
Log.error(self, 'Unable to input site name, Please try again!')
pargs.site_name = pargs.site_name.strip()
(ee_domain,
ee_www_domain, ) = ValidateDomain(pargs.site_name)
ee_site_webroot = EEVariables.ee_webroot + ee_domain
check_site = getSiteInfo(self, ee_domain)
if check_site is None:
Log.error(self, " Site {0} does not exist.".format(ee_domain))
else:
oldsitetype = check_site.site_type
oldcachetype = check_site.cache_type
old_hhvm = check_site.is_hhvm
old_pagespeed = check_site.is_pagespeed
if (pargs.password and not (pargs.html or
pargs.php or pargs.mysql or pargs.wp or
pargs.w3tc or pargs.wpfc or pargs.wpsc
or pargs.wpsubdir or pargs.wpsubdomain)):
try:
updatewpuserpassword(self, ee_domain, ee_site_webroot)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, "Password Unchanged.")
return 0
if ((stype == "proxy" and stype == oldsitetype and self.app.pargs.hhvm)
or (stype == "proxy" and
stype == oldsitetype and self.app.pargs.pagespeed)):
Log.info(self, Log.FAIL +
"Can not update proxy site to HHVM or Pagespeed")
return 1
if stype == "html" and stype == oldsitetype and self.app.pargs.hhvm:
Log.info(self, Log.FAIL + "Can not update HTML site to HHVM")
return 1
if ((stype == 'php' and oldsitetype not in ['html', 'proxy']) or
(stype == 'mysql' and oldsitetype not in ['html', 'php',
'proxy']) or
(stype == 'wp' and oldsitetype not in ['html', 'php', 'mysql',
'proxy', 'wp']) or
(stype == 'wpsubdir' and oldsitetype in ['wpsubdomain']) or
(stype == 'wpsubdomain' and oldsitetype in ['wpsubdir']) or
(stype == oldsitetype and cache == oldcachetype) and
not pargs.pagespeed):
Log.info(self, Log.FAIL + "can not update {0} {1} to {2} {3}".
format(oldsitetype, oldcachetype, stype, cache))
return 1
if stype == 'proxy':
data['site_name'] = ee_domain
data['www_domain'] = ee_www_domain
data['proxy'] = True
data['host'] = host
data['port'] = port
pagespeed = False
hhvm = False
data['webroot'] = ee_site_webroot
data['currsitetype'] = oldsitetype
data['currcachetype'] = oldcachetype
if stype == 'php':
data = dict(site_name=ee_domain, www_domain=ee_www_domain,
static=False, basic=True, wp=False, w3tc=False,
wpfc=False, wpsc=False, multisite=False,
wpsubdir=False, webroot=ee_site_webroot,
currsitetype=oldsitetype, currcachetype=oldcachetype)
elif stype in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
data = dict(site_name=ee_domain, www_domain=ee_www_domain,
static=False, basic=True, wp=False, w3tc=False,
wpfc=False, wpsc=False, multisite=False,
wpsubdir=False, webroot=ee_site_webroot,
ee_db_name='', ee_db_user='', ee_db_pass='',
ee_db_host='',
currsitetype=oldsitetype, currcachetype=oldcachetype)
if stype in ['wp', 'wpsubdir', 'wpsubdomain']:
data['wp'] = True
data['basic'] = False
data[cache] = True
if stype in ['wpsubdir', 'wpsubdomain']:
data['multisite'] = True
if stype == 'wpsubdir':
data['wpsubdir'] = True
if pargs.pagespeed or pargs.hhvm:
if not data:
data = dict(site_name=ee_domain, www_domain=ee_www_domain,
currsitetype=oldsitetype,
currcachetype=oldcachetype,
webroot=ee_site_webroot)
stype = oldsitetype
cache = oldcachetype
if oldsitetype == 'html' or oldsitetype == 'proxy':
data['static'] = True
data['wp'] = False
data['multisite'] = False
data['wpsubdir'] = False
elif oldsitetype == 'php' or oldsitetype == 'mysql':
data['static'] = False
data['wp'] = False
data['multisite'] = False
data['wpsubdir'] = False
elif oldsitetype == 'wp':
data['static'] = False
data['wp'] = True
data['multisite'] = False
data['wpsubdir'] = False
elif oldsitetype == 'wpsubdir':
data['static'] = False
data['wp'] = True
data['multisite'] = True
data['wpsubdir'] = True
elif oldsitetype == 'wpsubdomain':
data['static'] = False
data['wp'] = True
data['multisite'] = True
data['wpsubdir'] = False
if oldcachetype == 'basic':
data['basic'] = True
data['w3tc'] = False
data['wpfc'] = False
data['wpsc'] = False
elif oldcachetype == 'w3tc':
data['basic'] = False
data['w3tc'] = True
data['wpfc'] = False
data['wpsc'] = False
elif oldcachetype == 'wpfc':
data['basic'] = False
data['w3tc'] = False
data['wpfc'] = True
data['wpsc'] = False
elif oldcachetype == 'wpsc':
data['basic'] = False
data['w3tc'] = False
data['wpfc'] = False
data['wpsc'] = True
if pargs.hhvm != 'off':
data['hhvm'] = True
hhvm = True
elif pargs.hhvm == 'off':
data['hhvm'] = False
hhvm = False
if pargs.pagespeed != 'off':
data['pagespeed'] = True
pagespeed = True
elif pargs.pagespeed == 'off':
data['pagespeed'] = False
pagespeed = False
if pargs.pagespeed:
if pagespeed is old_pagespeed:
if pagespeed is False:
Log.info(self, "Pagespeed is already disabled for given "
"site")
elif pagespeed is True:
Log.info(self, "Pagespeed is allready enabled for given "
"site")
pargs.pagespeed = False
if pargs.hhvm:
if hhvm is old_hhvm:
if hhvm is False:
Log.info(self, "HHVM is allready disabled for given "
"site")
elif hhvm is True:
Log.info(self, "HHVM is allready enabled for given "
"site")
pargs.hhvm = False
if data and (not pargs.hhvm):
if old_hhvm is True:
data['hhvm'] = True
hhvm = True
else:
data['hhvm'] = False
hhvm = False
if data and (not pargs.pagespeed):
if old_pagespeed is True:
data['pagespeed'] = True
pagespeed = True
else:
data['pagespeed'] = False
pagespeed = False
if pargs.pagespeed=="on" or pargs.hhvm=="on":
if pargs.hhvm == "on":
if (not pargs.experimental):
Log.info(self, "HHVM is experimental feature and it may not"
" work with all plugins of your site.\nYou can "
"disable it by passing --hhvm=off later.\nDo you wish"
" to enable HHVM now for {0}?".format(ee_domain))
# Check prompt
check_prompt = input("Type \"y\" to continue [n]:")
if check_prompt != "Y" and check_prompt != "y":
Log.info(self, "Not using HHVM for site")
data['hhvm'] = False
hhvm = False
else:
data['hhvm'] = True
hhvm = True
else:
data['hhvm'] = True
hhvm = True
if pargs.pagespeed=="on":
if (not pargs.experimental):
Log.info(self, "PageSpeed is experimental feature and it may not"
" work with all CSS/JS/Cache of your site.\nYou can "
"disable it by passing --pagespeed=off later.\nDo you wish"
" to enable PageSpeed now for {0}?".format(ee_domain))
# Check prompt
check_prompt = input("Type \"y\" to continue [n]:")
if check_prompt != "Y" and check_prompt != "y":
Log.info(self, "Not using Pagespeed for given site")
data['pagespeed'] = False
pagespeed = False
else:
data['pagespeed'] = True
pagespeed = True
else:
data['pagespeed'] = True
pagespeed = True
if ((hhvm is old_hhvm) and (pagespeed is old_pagespeed) and
(stype == oldsitetype and cache == oldcachetype)):
return 1
if not data:
Log.error(self, "Cannot update {0}, Invalid Options"
.format(ee_domain))
ee_auth = site_package_check(self, stype)
data['ee_db_name'] = check_site.db_name
data['ee_db_user'] = check_site.db_user
data['ee_db_pass'] = check_site.db_password
data['ee_db_host'] = check_site.db_host
try:
pre_run_checks(self)
except SiteError as e:
Log.debug(self, str(e))
Log.error(self, "NGINX configuration check failed.")
try:
sitebackup(self, data)
except Exception as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
# setup NGINX configuration, and webroot
try:
setupdomain(self, data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason"
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if 'proxy' in data.keys() and data['proxy']:
updateSiteInfo(self, ee_domain, stype=stype, cache=cache,
hhvm=hhvm, pagespeed=pagespeed)
Log.info(self, "Successfully updated site"
" http://{0}".format(ee_domain))
return 0
# Update pagespeed config
if pargs.pagespeed:
operateOnPagespeed(self, data)
if stype == oldsitetype and cache == oldcachetype:
# Service Nginx Reload
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
updateSiteInfo(self, ee_domain, stype=stype, cache=cache,
hhvm=hhvm, pagespeed=pagespeed)
Log.info(self, "Successfully updated site"
" http://{0}".format(ee_domain))
return 0
if data['ee_db_name'] and not data['wp']:
try:
data = setupdatabase(self, data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason"
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
try:
eedbconfig = open("{0}/ee-config.php".format(ee_site_webroot),
encoding='utf-8', mode='w')
eedbconfig.write("<?php \ndefine('DB_NAME', '{0}');"
"\ndefine('DB_USER', '{1}'); "
"\ndefine('DB_PASSWORD', '{2}');"
"\ndefine('DB_HOST', '{3}');\n?>"
.format(data['ee_db_name'],
data['ee_db_user'],
data['ee_db_pass'],
data['ee_db_host']))
eedbconfig.close()
except IOError as e:
Log.debug(self, str(e))
Log.debug(self, "creating ee-config.php failed.")
Log.info(self, Log.FAIL + "Update site failed. "
"Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
# Setup WordPress if old sites are html/php/mysql sites
if data['wp'] and oldsitetype in ['html', 'proxy', 'php', 'mysql']:
try:
ee_wp_creds = setupwordpress(self, data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
# Uninstall unnecessary plugins
if oldsitetype in ['wp', 'wpsubdir', 'wpsubdomain']:
# Setup WordPress Network if update option is multisite
# and oldsite is WordPress single site
if data['multisite'] and oldsitetype == 'wp':
try:
setupwordpressnetwork(self, data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed. "
"Check logs for reason"
" `tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if (oldcachetype == 'w3tc' or oldcachetype == 'wpfc' and
not (data['w3tc'] or data['wpfc'])):
try:
uninstallwp_plugin(self, 'w3-total-cache', data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed. "
"Check logs for reason"
" `tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if oldcachetype == 'wpsc' and not data['wpsc']:
try:
uninstallwp_plugin(self, 'wp-super-cache', data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason"
" `tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if (oldcachetype != 'w3tc' or oldcachetype != 'wpfc') and (data['w3tc']
or data['wpfc']):
try:
installwp_plugin(self, 'w3-total-cache', data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason"
" `tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if oldcachetype != 'wpsc' and data['wpsc']:
try:
installwp_plugin(self, 'wp-super-cache', data)
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
# Service Nginx Reload
if not EEService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
EEGit.add(self, ["/etc/nginx"],
msg="{0} updated with {1} {2}"
.format(ee_www_domain, stype, cache))
# Setup Permissions for webroot
try:
setwebrootpermissions(self, data['webroot'])
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL + "Update site failed."
"Check logs for reason "
"`tail /var/log/ee/ee.log` & Try Again!!!")
return 1
if ee_auth and len(ee_auth):
for msg in ee_auth:
Log.info(self, Log.ENDC + msg)
display_cache_settings(self, data)
if data['wp'] and oldsitetype in ['html', 'php', 'mysql']:
Log.info(self, "\n\n" + Log.ENDC + "WordPress admin user :"
" {0}".format(ee_wp_creds['wp_user']))
Log.info(self, Log.ENDC + "WordPress admin password : {0}"
.format(ee_wp_creds['wp_pass']) + "\n\n")
if oldsitetype in ['html', 'php'] and stype != 'php':
updateSiteInfo(self, ee_domain, stype=stype, cache=cache,
db_name=data['ee_db_name'],
db_user=data['ee_db_user'],
db_password=data['ee_db_pass'],
db_host=data['ee_db_host'], hhvm=hhvm,
pagespeed=pagespeed)
else:
updateSiteInfo(self, ee_domain, stype=stype, cache=cache,
hhvm=hhvm, pagespeed=pagespeed)
Log.info(self, "Successfully updated site"
" http://{0}".format(ee_domain))
return 0
class EESiteDeleteController(CementBaseController):
class Meta:
label = 'delete'
stacked_on = 'site'
stacked_type = 'nested'
description = 'delete an existing website'
arguments = [
(['site_name'],
dict(help='domain name to be deleted', nargs='?')),
(['--no-prompt'],
dict(help="doesnt ask permission for delete",
action='store_true')),
(['--all'],
dict(help="delete all", action='store_true')),
(['--db'],
dict(help="delete db only", action='store_true')),
(['--files'],
dict(help="delete webroot only", action='store_true')),
]
@expose(help="Delete website configuration and files")
@expose(hide=True)
def default(self):
if not self.app.pargs.site_name:
try:
while not self.app.pargs.site_name:
self.app.pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.error(self, 'could not input site name')
self.app.pargs.site_name = self.app.pargs.site_name.strip()
(ee_domain, ee_www_domain) = ValidateDomain(self.app.pargs.site_name)
ee_db_name = ''
ee_prompt = ''
ee_nginx_prompt = ''
mark_db_deleted = False
mark_webroot_deleted = False
if not check_domain_exists(self, ee_domain):
Log.error(self, "site {0} does not exist".format(ee_domain))
if ((not self.app.pargs.db) and (not self.app.pargs.files) and
(not self.app.pargs.all)):
self.app.pargs.all = True
# Gather information from ee-db for ee_domain
check_site = getSiteInfo(self, ee_domain)
ee_site_type = check_site.site_type
ee_site_webroot = check_site.site_path
if ee_site_webroot == 'deleted':
mark_webroot_deleted = True
if ee_site_type in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
ee_db_name = check_site.db_name
ee_db_user = check_site.db_user
ee_db_host = check_site.db_host
if ee_db_name == 'deleted':
mark_db_deleted = True
if self.app.pargs.all:
self.app.pargs.db = True
self.app.pargs.files = True
else:
if self.app.pargs.all:
mark_db_deleted = True
self.app.pargs.files = True
# Delete website database
if self.app.pargs.db:
if ee_db_name != 'deleted' and ee_db_name != '':
if not self.app.pargs.no_prompt:
ee_db_prompt = input('Are you sure, you want to delete'
' database [y/N]: ')
else:
ee_db_prompt = 'Y'
if ee_db_prompt == 'Y' or ee_db_prompt == 'y':
Log.info(self, "Deleting Database, {0}, user {1}"
.format(ee_db_name, ee_db_user))
deleteDB(self, ee_db_name, ee_db_user, ee_db_host)
updateSiteInfo(self, ee_domain,
db_name='deleted',
db_user='deleted',
db_password='deleted')
mark_db_deleted = True
Log.info(self, "Deleted Database successfully.")
else:
mark_db_deleted = True
Log.info(self, "Does not seems to have database for this site."
)
# Delete webroot
if self.app.pargs.files:
if ee_site_webroot != 'deleted':
if not self.app.pargs.no_prompt:
ee_web_prompt = input('Are you sure, you want to delete '
'webroot [y/N]: ')
else:
ee_web_prompt = 'Y'
if ee_web_prompt == 'Y' or ee_web_prompt == 'y':
Log.info(self, "Deleting Webroot, {0}"
.format(ee_site_webroot))
deleteWebRoot(self, ee_site_webroot)
updateSiteInfo(self, ee_domain, webroot='deleted')
mark_webroot_deleted = True
Log.info(self, "Deleted webroot successfully")
else:
mark_webroot_deleted = True
Log.info(self, "Webroot seems to be already deleted")
if (mark_webroot_deleted and mark_db_deleted):
# TODO Delete nginx conf
removeNginxConf(self, ee_domain)
deleteSiteInfo(self, ee_domain)
Log.info(self, "Deleted site {0}".format(ee_domain))
# else:
# Log.error(self, " site {0} does not exists".format(ee_domain))
class EESiteListController(CementBaseController):
class Meta:
label = 'list'
stacked_on = 'site'
stacked_type = 'nested'
description = 'List websites'
arguments = [
(['--enabled'],
dict(help='List enabled websites', action='store_true')),
(['--disabled'],
dict(help="List disabled websites", action='store_true')),
]
@expose(help="Lists websites")
def default(self):
sites = getAllsites(self)
if not sites:
pass
if self.app.pargs.enabled:
for site in sites:
if site.is_enabled:
Log.info(self, "{0}".format(site.sitename))
elif self.app.pargs.disabled:
for site in sites:
if not site.is_enabled:
Log.info(self, "{0}".format(site.sitename))
else:
for site in sites:
Log.info(self, "{0}".format(site.sitename))
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(EESiteController)
handler.register(EESiteCreateController)
handler.register(EESiteUpdateController)
handler.register(EESiteDeleteController)
handler.register(EESiteListController)
handler.register(EESiteEditController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', ee_site_hook)
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post-processing utilities for question answering.
"""
import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if len(predictions) != 2:
raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).")
all_start_logits, all_end_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or len(offset_mapping[start_index]) < 2
or offset_mapping[end_index] is None
or len(offset_mapping[end_index]) < 2
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if len(predictions) != 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
|
|
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from minerstat.utils import Config
from minerstat.remote import MinerStatRemoteProtocol
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessExitedAlready
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.python.failure import Failure
from twisted.internet import defer, task
import subprocess
import os
from typing import Union, Iterable, Optional # noqa
from twisted.logger import Logger
import asyncio
from twisted.plugin import getPlugins
from minerstat.miners.base import IMiner, MinerUtils
from minerstat.miners.claymore import AlgoClaymoreMiner
from minerstat.miners.claymore import DualClaymoreMiner
class MinerProcessProtocol(ProcessProtocol):
log = Logger()
def __init__(self):
self.on_ended = defer.Deferred()
self.on_started = defer.Deferred()
def connectionMade(self):
self.log.debug("Miner started.")
self.on_started.callback(None)
def outReceived(self, data):
pass
print(data)
def errReceived(self, data):
pass
print(data)
def processExited(self, status: Failure):
self.log.debug(
"miner process has exited with status: {status}", status=status)
self.on_ended.callback(status.value)
def processEnded(self, status: Failure):
self.log.debug(
"miner process has ended with status: {status}", status=status)
self.on_ended.callback(status.value)
def stop_it(self) -> defer.Deferred:
self.log.debug("Stopping the miner.")
try:
self.transport.signalProcess("KILL")
return self.on_ended
except ProcessExitedAlready:
self.log.info("Miner process is already gone.")
return defer.succeed(None)
class Rig:
log = Logger()
def __init__(
self,
config: Config,
remote: MinerStatRemoteProtocol,
reactor=reactor
) -> None:
self.config = config
self.remote = remote
self.reactor = reactor
self._looper = LoopingCall(
lambda: defer.ensureDeferred(self.mainloop()))
self._coin_lock = asyncio.Lock()
self._current_coin = None # type: Optional[IMiner]
self._last_dr = 'null'
self._last_bq = 'null'
def reboot(self) -> None:
"""
Reboot the service.
NOTE: this depends on the user having passwordless sudo access.
"""
command = "/usr/bin/sudo /sbin/shutdown -r now"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
async def start(self) -> None:
self.header()
await self.load_configured_miner(
self.config.client, self.config.db,
None, None)
self.log.info("About to announce.")
await self.remote.announce(self._current_coin)
await self.start_miner()
self._looper.start(20).addErrback(self.log.error)
async def stop(self) -> None:
self._looper.stop()
await self.stop_miner()
async def load_configured_miner(
self, client: str, db: str,
bq: Optional[str], dr: Optional[str]) -> IMiner:
miner_coins = getPlugins(IMiner) # type: Iterable[IMiner]
if not (bq and dr):
bq, dr = await self.remote.algoinfo()
for coin in miner_coins:
if coin.name == client:
if coin.name == "algo":
if dr != "null" and not isinstance(
coin, DualClaymoreMiner):
continue
elif coin.db != db:
continue
with (await self._coin_lock):
self.log.debug(
"Setting current coin to {0}"
.format(coin.name))
self._current_coin = coin
await self.remote.dlconf(coin)
return coin
else:
raise RuntimeError("No miner configured in global config.")
async def mainloop(self) -> None:
self.log.debug("About to get miner data.")
data = await self.collect_miner_data()
self.log.debug("About send logs to server.")
await self.send_logs_to_server(data)
self.log.debug("About to check algorithms.")
if isinstance(self._current_coin, AlgoClaymoreMiner):
await self.check_algorithms()
self.log.debug("About to check remote commands.")
await self.check_remote_commands()
async def check_algorithms(self) -> None:
"""call to self.remote.check_algo"""
bqt, bq, dr = await self.remote.algo_check()
self.log.info("check algorithms {} {} {}".format(bqt, bq, dr))
if not isinstance(self._current_coin, DualClaymoreMiner):
if self._last_bq != bq:
self._last_bq = bq
algo = AlgoClaymoreMiner()
await self.load_configured_miner(
algo.client, algo.db,
bq=bq[0], dr=dr[0])
if dr != self._last_dr:
self._last_dr = dr
if dr[1] == "null":
self.log.info("disable dual mining")
coin = AlgoClaymoreMiner()
else:
self.log.info("enable dual mining")
coin = DualClaymoreMiner()
await self.remote.dlconf(coin)
await self.setup_miner(coin)
async def setup_miner(self, coin: IMiner) -> None:
with (await self._coin_lock):
await self.stop_miner()
self._current_coin = coin
await self.start_miner()
async def check_remote_commands(self) -> None:
"""call to self.dispatch_remote"""
command = await self.remote.fetch_remote_command(self._current_coin)
if command and command.coin and \
(command.coin.name != self._current_coin.name):
await self.setup_miner(command.coin)
async def collect_miner_data(self) -> str:
"""hit the subprocess protocol to get buffers"""
return await self._current_coin.fetch_logs()
async def send_logs_to_server(self, data: str) -> None:
"""use self.remote.send_logs"""
await self.remote.send_log(data)
def header(self):
self.log.info('----------------------- minerstat.com --------------------------') # noqa
self.log.info('------------------------ Linux Alpha ------------------------') # noqa
async def start_miner(self) -> None:
if self._current_coin is None:
self.log.warning("Can't start miner that doesnt exist.")
return
self._process_protocol = MinerProcessProtocol()
util = MinerUtils(self._current_coin, self.config)
path = util.miner_path()
self.reactor.spawnProcess(
self._process_protocol,
os.path.join(path, "start.bash"),
args=[self.config.client],
env=os.environ,
path=path)
self._process_protocol.on_ended.addCallbacks(
callback=self.miner_ended,
errback=self.miner_ended)
await self._process_protocol.on_started
async def stop_miner(self) -> None:
if self._process_protocol.connected:
await self._process_protocol.stop_it()
@defer.inlineCallbacks
def miner_ended(
self, status: Union[ProcessDone, ProcessTerminated]):
self.log.info("Got satus from ending miner. {status}", status=status)
yield task.deferLater(
self.reactor, 1,
lambda: defer.ensureDeferred(self.start_miner()))
self.log.info("Started miner: {miner}", miner=self.config.client)
|
|
#!/usr/bin/python
# Author: Chris Zacharias (chris@imgix.com)
# Copyright (c) 2012, Zebrafish Labs Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import httplib2
import urllib
import re
import json
from datetime import datetime
FASTLY_SCHEME = "https"
FASTLY_HOST = "api.fastly.com"
FASTLY_SESSION_REGEX = re.compile("(fastly\.session=[^;]+);")
class FastlyRoles(object):
USER = "user"
BILLING = "billing"
ENGINEER = "engineer"
SUPERUSER = "superuser"
class FastlyCacheSettingsAction(object):
CACHE = "cache"
PASS = "pass"
RESTART = "restart"
class FastlyConditionType(object):
RESPONSE="response"
CACHE="cache"
REQUEST="request"
FETCH="fetch"
class FastlyHeaderAction(object):
SET="set"
APPEND="append"
DELETE="delete"
REGEX="regex"
REGEX_ALL="regex_repeat"
class FastlyHeaderType(object):
RESPONSE="response"
FETCH="fetch"
CACHE="cache"
REQUEST="request"
class FastlyRequestSettingAction(object):
LOOKUP="lookup"
PASS="pass"
class FastlyForwardedForAction(object):
CLEAR="clear"
LEAVE="leave"
APPEND="append"
APPEND_ALL="append_all"
OVERWRITE="overwrite"
class FastlyStatsType(object):
ALL="all"
DAILY="daily"
HOURLY="hourly"
MINUTELY="minutely"
class FastlyDirectorType(object):
RANDOM=1
ROUNDROBIN=2
HASH=3
CLIENT=4
class FastlyConnection(object):
def __init__(self, api_key):
self._session = None
self._api_key = api_key
self._fully_authed = False
@property
def fully_authed(self):
return self._fully_authed
def login(self, user, password):
body = self._formdata({
"user": user,
"password": password,
}, ["user", "password"])
content = self._fetch("/login", method="POST", body=body)
self._fully_authed = True
return FastlySession(self, content)
def list_backends(self, service_id, version_number):
"""List all backends for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend" % (service_id, version_number))
return map(lambda x: FastlyBackend(self, x), content)
def create_backend(self,
service_id,
version_number,
name,
address,
use_ssl=False,
port=80,
connect_timeout=1000,
first_byte_timeout=15000,
between_bytes_timeout=10000,
error_threshold=0,
max_conn=20,
weight=100,
auto_loadbalance=False,
shield=None,
request_condition=None,
healthcheck=None,
comment=None):
"""Create a backend for a particular service and version."""
body = self._formdata({
"name": name,
"address": address,
"use_ssl": use_ssl,
"port": port,
"connect_timeout": connect_timeout,
"first_byte_timeout": first_byte_timeout,
"between_bytes_timeout": between_bytes_timeout,
"error_threshold": error_threshold,
"max_conn": max_conn,
"weight": weight,
"auto_loadbalance": auto_loadbalance,
"shield": shield,
"request_condition": request_condition,
"healthcheck": healthcheck,
"comment": comment,
}, FastlyBackend.FIELDS)
content = self._fetch("/service/%s/version/%d/backend" % (service_id, version_number), method="POST", body=body)
return FastlyBackend(self, content)
def get_backend(self, service_id, version_number, name):
"""Get the backend for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, name))
return FastlyBackend(self, content)
def update_backend(self, service_id, version_number, name_key, **kwargs):
"""Update the backend for a particular service and version."""
body = self._formdata(kwargs, FastlyBackend.FIELDS)
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyBackend(self, content)
def delete_backend(self, service_id, version_number, name):
"""Delete the backend for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def check_backends(self, service_id, version_number):
"""Performs a health check against each backend in version. If the backend has a specific type of healthcheck, that one is performed, otherwise a HEAD request to / is performed. The first item is the details on the Backend itself. The second item is details of the specific HTTP request performed as a health check. The third item is the response details."""
content = self._fetch("/service/%s/version/%d/backend/check_all" % (service_id, version_number))
# TODO: Use a strong-typed class for output?
return content
def list_cache_settings(self, service_id, version_number):
"""Get a list of all cache settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/cache_settings" % (service_id, version_number))
return map(lambda x: FastlyCacheSettings(self, x), content)
def create_cache_settings(self,
service_id,
version_number,
name,
action,
ttl=None,
stale_ttl=None,
cache_condition=None):
"""Create a new cache settings object."""
body = self._formdata({
"name": name,
"action": action,
"ttl": ttl,
"stale_ttl": stale_ttl,
"cache_condition": cache_condition,
}, FastlyCacheSettings.FIELDS)
content = self._fetch("/service/%s/version/%d/cache_settings" % (service_id, version_number), method="POST", body=body)
return FastlyCacheSettings(self, content)
def get_cache_settings(self, service_id, version_number, name):
"""Get a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name))
return FastlyCacheSettings(self, content)
def update_cache_settings(self, service_id, version_number, name_key, **kwargs):
"""Update a specific cache settings object."""
body = self._formdata(kwargs, FastlyCacheSettings.FIELDS)
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyCacheSettings(self, content)
def delete_cache_settings(self, service_id, version_number, name):
"""Delete a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def list_conditions(self, service_id, version_number):
"""Gets all conditions for a particular service and version."""
content = self._fetch("/service/%s/version/%d/condition" % (service_id, version_number))
return map(lambda x: FastlyCondition(self, x), content)
def create_condition(self,
service_id,
version_number,
name,
_type,
statement,
priority="10",
comment=None):
"""Creates a new condition."""
body = self._formdata({
"name": name,
"type": _type,
"statement": statement,
"priority": priority,
"comment": comment,
}, FastlyCondition.FIELDS)
content = self._fetch("/service/%s/version/%d/condition" % (service_id, version_number), method="POST", body=body)
return FastlyCondition(self, content)
def get_condition(self, service_id, version_number, name):
"""Gets a specified condition."""
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, name))
return FastlyCondition(self, content)
def update_condition(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified condition."""
body = self._formdata(kwargs, FastlyCondition.FIELDS)
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyCondition(self, content)
def delete_condition(self, service_id, version_number, name):
"""Deletes the specified condition."""
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def content_edge_check(self, url):
"""Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server."""
prefixes = ["http://", "https://"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix):]
break
content = self._fetch("/content/edge_check/%s" % url)
return content
def get_current_customer(self):
"""Get the logged in customer."""
content = self._fetch("/current_customer")
return FastlyCustomer(self, content)
def get_customer(self, customer_id):
"""Get a specific customer."""
content = self._fetch("/customer/%s" % customer_id)
return FastlyCustomer(self, content)
def get_customer_details(self, customer_id):
"""Get a specific customer, owner, and billing contact."""
content = self._fetch("/customer/details/%s" % customer_id)
return content
def list_customer_users(self, customer_id):
"""List all users from a specified customer id."""
content = self._fetch("/customer/users/%s" % customer_id)
return map(lambda x: FastlyUser(self, x), content)
def update_customer(self, customer_id, **kwargs):
"""Update a customer."""
body = self._formdata(kwargs, FastlyCustomer.FIELDS)
content = self._fetch("/customer/%s" % customer_id, method="PUT", body=body)
return FastlyCustomer(self, content)
def delete_customer(self, customer_id):
"""Delete a customer."""
content = self._fetch("/customer/%s" % customer_id, method="DELETE")
return self._status(content)
def list_directors(self, service_id, version_number):
"""List the directors for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director" % (service_id, version_number))
return map(lambda x: FastlyDirector(self, x), content)
def create_director(self, service_id, version_number,
name,
quorum=75,
_type=FastlyDirectorType.RANDOM,
retries=5,
shield=None):
"""Create a director for a particular service and version."""
body = self._formdata({
"name": name,
"quorum": quorum,
"type": _type,
"retries": retries,
"shield": shield,
}, FastlyDirector.FIELDS)
content = self._fetch("/service/%s/version/%d/director" % (service_id, version_number), method="POST", body=body)
return FastlyDirector(self, content)
def get_director(self, service_id, version_number, name):
"""Get the director for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name))
return FastlyDirector(self, content)
def update_director(self, service_id, version_number, name_key, **kwargs):
"""Update the director for a particular service and version."""
body = self._formdata(kwargs, FastlyDirector.FIELDS)
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyDirector(self, content)
def delete_director(self, service_id, version_number, name):
"""Delete the director for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def get_director_backend(self, service_id, version_number, director_name, backend_name):
"""Returns the relationship between a Backend and a Director. If the Backend has been associated with the Director, it returns a simple record indicating this. Otherwise, returns a 404."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, backend_name), method="GET")
return FastlyDirectorBackend(self, content)
def create_director_backend(self, service_id, version_number, director_name, backend_name):
"""Establishes a relationship between a Backend and a Director. The Backend is then considered a member of the Director and can be used to balance traffic onto."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, backend_name), method="POST")
return FastlyDirectorBackend(self, content)
def delete_director_backend(self, service_id, version_number, director_name, backend_name):
"""Deletes the relationship between a Backend and a Director. The Backend is no longer considered a member of the Director and thus will not have traffic balanced onto it from this Director."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, backend_name), method="DELETE")
return self._status(content)
def list_domains(self, service_id, version_number):
"""List the domains for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number))
return map(lambda x: FastlyDomain(self, x), content)
def create_domain(self,
service_id,
version_number,
name,
comment=None):
"""Create a domain for a particular service and version."""
body = self._formdata({
"name": name,
"comment": comment,
}, FastlyDomain.FIELDS)
content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body)
return FastlyDomain(self, content)
def get_domain(self, service_id, version_number, name):
"""Get the domain for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, name))
return FastlyDomain(self, content)
def update_domain(self, service_id, version_number, name_key, **kwargs):
"""Update the domain for a particular service and version."""
body = self._formdata(kwargs, FastlyDomain.FIELDS)
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyDomain(self, content)
def delete_domain(self, service_id, version_number, name):
"""Delete the domain for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, name), method="DELETE")
return self._status(self, content)
def check_domain(self, service_id, version_number, name):
"""Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly."""
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content)
def check_domains(self, service_id, version_number):
"""Checks the status of all domain DNS records for a Service Version. Returns an array items in the same format as the single domain /check."""
content = self._fetch("/service/%s/version/%d/domain/check_all" % (service_id, version_number))
return map(lambda x: FastlyDomainCheck(self, x), content)
def get_event_log(self, object_id):
"""Get the specified event log."""
content = self._fetch("/event_log/%s" % object_id, method="GET")
return FastlyEventLog(self, content)
def list_headers(self, service_id, version_number):
"""Retrieves all Header objects for a particular Version of a Service."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number))
return map(lambda x: FastlyHeader(self, x), content)
def create_header(self, service_id, version_number, name, destination, source, _type=FastlyHeaderType.RESPONSE, action=FastlyHeaderAction.SET, regex=None, substitution=None, ignore_if_set=None, priority=10, response_condition=None, cache_condition=None, request_condition=None):
body = self._formdata({
"name": name,
"dst": destination,
"src": source,
"type": _type,
"action": action,
"regex": regex,
"substitution": substitution,
"ignore_if_set": ignore_if_set,
"priority": priority,
"response_condition": response_condition,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyHeader.FIELDS)
"""Creates a new Header object."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number), method="POST", body=body)
return FastlyHeader(self, content)
def get_header(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, name))
return FastlyHeader(self, content)
def update_header(self, service_id, version_number, name_key, **kwargs):
"""Modifies an existing Header object by name."""
body = self._formdata(kwargs, FastlyHeader.FIELDS)
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyHeader(self, content)
def delete_header(self, service_id, version_number, name):
"""Deletes a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def list_healthchecks(self, service_id, version_number):
"""List all of the healthchecks for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number))
return map(lambda x: FastlyHealthCheck(self, x), content)
def create_healthcheck(self,
service_id,
version_number,
name,
host,
method="HEAD",
path="/",
http_version="1.1",
timeout=1000,
check_interval=5000,
expected_response=200,
window=5,
threshold=3,
initial=1):
"""Create a healthcheck for a particular service and version."""
body = self._formdata({
"name": name,
"method": method,
"host": host,
"path": path,
"http_version": http_version,
"timeout": timeout,
"check_interval": check_interval,
"expected_response": expected_response,
"window": window,
"threshold": threshold,
"initial": initial,
}, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number), method="POST", body=body)
return FastlyHealthCheck(self, content)
def get_healthcheck(self, service_id, version_number, name):
"""Get the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name))
return FastlyHealthCheck(self, content)
def update_healthcheck(self, service_id, version_number, name_key, **kwargs):
"""Update the healthcheck for a particular service and version."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyHealthCheck(self, content)
def delete_healthcheck(self, service_id, version_number, name):
"""Delete the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def purge_url(self, host, path):
"""Purge an individual URL."""
content = self._fetch(path, method="PURGE", headers={ "Host": host })
return FastlyPurge(self, content)
def check_purge_status(self, purge_id):
"""Get the status and times of a recently completed purge."""
content = self._fetch("/purge?id=%s" % purge_id)
return map(lambda x: FastlyPurgeStatus(self, x), content)
def list_request_settings(self, service_id, version_number):
"""Returns a list of all Request Settings objects for the given service and version."""
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number))
return map(lambda x: FastlyRequestSetting(self, x), content)
def create_request_setting(self,
service_id,
version_number,
name,
default_host=None,
force_miss=None,
force_ssl=None,
action=None,
bypass_busy_wait=None,
max_stale_age=None,
hash_keys=None,
xff=None,
timer_support=None,
geo_headers=None,
request_condition=None):
"""Creates a new Request Settings object."""
body = self._formdata({
"name": name,
"default_host": default_host,
"force_miss": force_miss,
"force_ssl": force_ssl,
"action": action,
"bypass_busy_wait": bypass_busy_wait,
"max_stale_age": max_stale_age,
"hash_keys": hash_keys,
"xff": xff,
"timer_support": timer_support,
"geo_headers": geo_headers,
"request_condition": request_condition,
}, FastlyRequestSetting.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number), method="POST", body=body)
return FastlyRequestSetting(self, content)
def get_request_setting(self, service_id, version_number, name):
"""Gets the specified Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name))
return FastlyRequestSetting(self, content)
def update_request_setting(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Request Settings object."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyRequestSetting(self, content)
def delete_request_setting(self, service_id, version_number, name):
"""Removes the specfied Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def list_response_objects(self, service_id, version_number):
"""Returns all Response Objects for the specified service and version."""
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number))
return map(lambda x: FastlyResponseObject(self, x), content)
def create_response_object(self, service_id, version_number, name, status="200", response="OK", content="", request_condition=None, cache_condition=None):
"""Creates a new Response Object."""
body = self._formdata({
"name": name,
"status": status,
"response": response,
"content": content,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number), method="POST", body=body)
return FastlyResponseObject(self, content)
def get_response_object(self, service_id, version_number, name):
"""Gets the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, name))
return FastlyResponseObject(self, content)
def update_response_object(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Response Object."""
body = self._formdata(kwargs, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyResponseObject(self, content)
def delete_response_object(self, service_id, version_number, name):
"""Deletes the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def create_service(self, customer_id, name, publish_key=None, comment=None):
"""Create a service."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"publish_key": publish_key,
"comment": comment,
}, FastlyService.FIELDS)
content = self._fetch("/service", method="POST", body=body)
return FastlyService(self, content)
def list_services(self):
"""List Services."""
content = self._fetch("/service")
return map(lambda x: FastlyService(self, x), content)
def get_service(self, service_id):
"""Get a specific service by id."""
content = self._fetch("/service/%s" % service_id)
return FastlyService(self, content)
def get_service_details(self, service_id):
"""List detailed information on a specified service."""
content = self._fetch("/service/%s/details" % service_id)
return FastlyService(self, content)
def get_service_by_name(self, service_name):
"""Get a specific service by name."""
content = self._fetch("/service/search?name=%s" % service_name)
return FastlyService(self, content)
def update_service(self, service_id, **kwargs):
"""Update a service."""
body = self._formdata(kwargs, FastlyService.FIELDS)
content = self._fetch("/service/%s" % service_id, method="PUT", body=body)
return FastlyService(self, content)
def delete_service(self, service_id):
"""Delete a service."""
content = self._fetch("/service/%s" % service_id, method="DELETE")
return self._status(content)
def list_domains_by_service(self, service_id):
"""List the domains within a service."""
content = self._fetch("/service/%s/domain" % service_id, method="GET")
return map(lambda x: FastlyDomain(self, x), content)
def purge_service(self, service_id):
"""Purge everything from a service."""
content = self._fetch("/service/%s/purge_all" % service_id, method="POST")
return self._status(content)
def purge_service_by_key(self, service_id, key):
"""Purge a particular service by a key."""
content = self._fetch("/service/%s/purge/%s" % (service_id, key), method="POST")
return self._status(content)
def get_settings(self, service_id, version_number):
"""Get the settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number))
return FastlySettings(self, content)
def update_settings(self, service_id, version_number, settings={}):
"""Update the settings for a particular service and version."""
body = urllib.urlencode(settings)
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number), method="PUT", body=body)
return FastlySettings(self, content)
def get_stats(self, service_id, stat_type=FastlyStatsType.ALL):
"""Get the stats from a service."""
content = self._fetch("/service/%s/stats/%s" % (service_id, stat_type))
return content
def list_syslogs(self, service_id, version_number):
"""List all of the Syslogs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number))
return map(lambda x: FastlySyslog(self, x), content)
def create_syslog(self,
service_id,
version_number,
name,
address,
port=514,
use_tls="0",
tls_ca_cert=None,
token=None,
_format=None,
response_condition=None):
"""Create a Syslog for a particular service and version."""
body = self._formdata({
"name": name,
"address": address,
"port": port,
"use_tls": use_tls,
"tls_ca_cert": tls_ca_cert,
"token": token,
"format": _format,
"response_condition": response_condition,
}, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number), method="POST", body=body)
return FastlySyslog(self, content)
def get_syslog(self, service_id, version_number, name):
"""Get the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name))
return FastlySyslog(self, content)
def update_syslog(self, service_id, version_number, name_key, **kwargs):
"""Update the Syslog for a particular service and version."""
body = self._formdata(kwargs, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlySyslog(self, content)
def delete_syslog(self, service_id, version_number, name):
"""Delete the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def change_password(self, old_password, new_password):
"""Update the user's password to a new one."""
body = self._formdata({
"old_password": old_password,
"password": new_password,
}, ["old_password", "password"])
content = self._fetch("/current_user/password", method="POST", body=body)
return FastlyUser(self, content)
def get_current_user(self):
"""Get the logged in user."""
content = self._fetch("/current_user")
return FastlyUser(self, content)
def get_user(self, user_id):
"""Get a specific user."""
content = self._fetch("/user/%s" % user_id)
return FastlyUser(self, content)
def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True):
"""Create a user."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"login": login,
"password": password,
"role": role,
"require_new_password": require_new_password,
}, FastlyUser.FIELDS)
content = self._fetch("/user", method="POST", body=body)
return FastlyUser(self, content)
def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content)
def delete_user(self, user_id):
"""Delete a user."""
content = self._fetch("/user/%s" % user_id, method="DELETE")
return self._status(content)
def request_password_reset(self, user_id):
"""Requests a password reset for the specified user."""
content = self._fetch("/user/%s/password/request_reset" % (user_id), method="POST")
return FastlyUser(self, content)
def list_vcls(self, service_id, version_number):
"""List the uploaded VCLs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number))
return map(lambda x: FastlyVCL(self, x), content)
def upload_vcl(self, service_id, version_number, name, content, main=None, comment=None):
"""Upload a VCL for a particular service and version."""
body = self._formdata({
"name": name,
"content": content,
"comment": comment,
"main": main,
}, FastlyVCL.FIELDS)
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number), method="POST", body=body)
return FastlyVCL(self, content)
def download_vcl(self, service_id, version_number, name):
"""Download the specified VCL."""
# TODO: Not sure what to do here, the documentation shows invalid response. Will have to test.
raise Exception("Not implemented")
def get_vcl(self, service_id, version_number, name, include_content=True):
"""Get the uploaded VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl/%s?include_content=%d" % (service_id, version_number, name, int(include_content)))
return FastlyVCL(self, content)
def get_vcl_html(self, service_id, version_number, name):
"""Get the uploaded VCL for a particular service and version with HTML syntax highlighting."""
content = self._fetch("/service/%s/version/%d/vcl/%s/content" % (service_id, version_number, name))
return content.get("content", None)
def get_generated_vcl(self, service_id, version_number):
"""Display the generated VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/generated_vcl" % (service_id, version_number))
return FastlyVCL(self, content)
def get_generated_vcl_html(self, service_id, version_number):
"""Display the content of generated VCL with HTML syntax highlighting."""
content = self._fetch("/service/%s/version/%d/generated_vcl/content" % (service_id, version_number))
return content.get("content", None)
def set_main_vcl(self, service_id, version_number, name):
"""Set the specified VCL as the main."""
content = self._fetch("/service/%s/version/%d/vcl/%s/main" % (service_id, version_number, name), method="PUT")
return FastlyVCL(self, content)
def update_vcl(self, service_id, version_number, name_key, **kwargs):
"""Update the uploaded VCL for a particular service and version."""
body = self._formdata(kwargs, FastlyVCL.FIELDS)
content = self._fetch("/service/%s/version/%d/vcl/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyVCL(self, content)
def delete_vcl(self, service_id, version_number, name):
"""Delete the uploaded VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
def create_version(self, service_id, inherit_service_id=None, comment=None):
"""Create a version for a particular service."""
body = self._formdata({
"service_id": service_id,
"inherit_service_id": inherit_service_id,
"comment": comment,
}, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version" % service_id, method="POST", body=body)
return FastlyVersion(self, content)
def list_versions(self, service_id):
content = self._fetch("/service/%s/version"% service_id)
return map(lambda x: FastlyVersion(self, x), content)
def get_version(self, service_id, version_number):
"""Get the version for a particular service."""
content = self._fetch("/service/%s/version/%d" % (service_id, version_number))
return FastlyVersion(self, content)
def update_version(self, service_id, version_number, **kwargs):
"""Update a particular version for a particular service."""
body = self._formdata(kwargs, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body)
return FastlyVersion(self, content)
def clone_version(self, service_id, version_number):
"""Clone the current configuration into a new version."""
content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def activate_version(self, service_id, version_number):
"""Activate the current version."""
content = self._fetch("/service/%s/version/%d/activate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def deactivate_version(self, service_id, version_number):
"""Deactivate the current version."""
content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def validate_version(self, service_id, version_number):
"""Validate the version for a particular service and version."""
content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number))
return self._status(content)
def lock_version(self, service_id, version_number):
"""Locks the specified version."""
content = self._fetch("/service/%s/version/%d/lock" % (service_id, version_number))
return self._status(content)
def list_wordpressess(self, service_id, version_number):
"""Get all of the wordpresses for a specified service and version."""
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number))
return map(lambda x: FastlyWordpress(self, x), content)
def create_wordpress(self,
service_id,
version_number,
name,
path,
comment=None):
"""Create a wordpress for the specified service and version."""
body = self._formdata({
"name": name,
"path": path,
"comment": comment,
}, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body)
return FastlyWordpress(self, content)
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name))
return FastlyWordpress(self, content)
def update_wordpress(self, service_id, version_number, name_key, **kwargs):
"""Update a specified wordpress."""
body = self._formdata(kwargs, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyWordpress(self, content)
def delete_wordpress(self, service_id, version_number, name):
"""Delete a specified wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content)
# TODO: Is this broken?
def delete_version(self, service_id, version_number):
content = self._fetch("/service/%s/version/%d" % (service_id, version_number), method="DELETE")
return self._status(content)
def _status(self, status):
if not isinstance(status, FastlyStatus):
status = FastlyStatus(self, status)
if status.status != "ok":
raise FastlyError("FastlyError: %s" % status.msg)
return True
def _formdata(self, fields, valid=[]):
data = {}
for key in fields.keys():
if key in valid and fields[key] is not None:
data[key] = fields[key]
if isinstance(data[key], bool):
data[key] = str(int(data[key]))
return urllib.urlencode(data)
def _fetch(self, url, method="GET", body=None, headers={}):
hdrs = {}
hdrs.update(headers)
print "Fetch: %s %s" % (method, url)
if body:
print "Body: %s" % body
if self._fully_authed:
hdrs["Cookie"] = self._session
else:
hdrs["X-Fastly-Key"] = self._api_key
hdrs["Content-Accept"] = "application/json"
if not hdrs.has_key("Content-Type") and method in ["POST", "PUT"]:
hdrs["Content-Type"] = "application/x-www-form-urlencoded"
conn = httplib2.Http(disable_ssl_certificate_validation=True)
endpoint = "%s://%s%s" % (FASTLY_SCHEME, FASTLY_HOST, url)
return self._check(*conn.request(endpoint, method, body=body, headers=hdrs))
def _check(self, resp, content):
status = resp.status
payload = None
if content:
try:
payload = json.loads(content)
except ValueError: # Could not decode, usually HTML
payload = content
if status == 200:
# Keep track of the session. Only really set during /login
if resp.has_key("set-cookie"):
set_cookie = resp["set-cookie"]
match = FASTLY_SESSION_REGEX.search(set_cookie)
if match is not None:
self._session = match.group(1)
return payload
if payload is None:
raise Exception("HTTP Error %d occurred." % status)
elif isinstance(payload, basestring):
raise Exception("HTTP Error %d occurred. { %s }" % (status, payload))
else:
payload["status"] = "error"
status = FastlyStatus(self, payload)
raise FastlyError(status)
class IDateStampedObject(object):
@property
def created_date(self):
if hasattr(self, "created_at"):
return self._parse_date(self.created_at)
else:
return self._parse_date(self.created)
@property
def updated_date(self):
if hasattr(self, "updated_at"):
return self._parse_date(self.updated_at)
else:
return self._parse_date(self.updated)
@property
def deleted_date(self):
if hasattr(self, "deleted_at"):
return self._parse_date(self.deleted_at)
else:
return self._parse_date(self.deleted)
class IServiceObject(object):
@property
def service(self):
return self._conn.get_service(self.service_id)
class IServiceVersionObject(IServiceObject):
@property
def service_version(self):
return self._conn.get_service_version(self.service_id, self.version)
class FastlyObject(object):
def __init__(self, conn, data):
self._conn = conn
self._data = data or {}
def __getattr__(self, name):
cls = self.__class__
if name in cls.FIELDS:
return self._data.get(name, None)
raise AttributeError()
def __str__(self):
return str(self._data)
def __repr__(self):
return repr(self._data)
def _parse_date(self, _date):
return datetime.strptime(_date, "%Y-%m-%dT%H:%M:%S+00:00")
class FastlyStatus(FastlyObject):
FIELDS = [
"msg",
"detail",
"status",
]
class FastlyError(Exception):
def __init__(self, status):
if isinstance(status, FastlyStatus):
Exception.__init__(self, "FastlyError: %s (%s)" % (status.msg, status.detail))
return
Exception.__init__(self, status)
class FastlySession(FastlyObject):
FIELDS = []
@property
def customer(self):
return FastlyCustomer(self._conn, self._data["customer"])
@property
def user(self):
return FastlyUser(self._conn, self._data["user"])
class FastlyBackend(FastlyObject, IServiceVersionObject):
"""A Backend is an address (ip or domain) from which Fastly pulls content. There can be multiple Backends for a Service."""
FIELDS = [
"service_id",
"version",
"name",
"address",
"port",
"use_ssl",
"connect_timeout",
"first_byte_timeout",
"between_bytes_timeout",
"error_threshold",
"max_conn",
"weight",
"auto_loadbalance",
"shield",
"request_condition",
"healthcheck",
"comment",
]
@property
def healthcheck(self):
return self._conn.get_healthcheck(self.service_id, self.version, self.healthcheck)
class FastlyCacheSettings(FastlyObject, IServiceVersionObject):
"""Controls how caching is performed on Fastly. When used in conjunction with Conditions the Cache Settings provide you with fine grain control over how long content persists in the cache."""
FIELDS = [
"service_id",
"version",
"name",
"action",
"ttl",
"stale_ttl",
"cache_condition",
]
class FastlyCondition(FastlyObject, IServiceVersionObject):
"""Conditions are used to control when and how other objects are used in a service configuration. They contain a statement that evaluates to either true or false and is used to determine whether the condition is met.
Depending on the type of the condition, the statment field can make reference to the Varnish Variables req, resp, and/or beresp."""
FIELDS = [
"name",
"service_id",
"version",
"type",
"statement",
"priority",
]
class FastlyCustomer(FastlyObject, IDateStampedObject):
"""A Customer is the base object which owns your Users and Services."""
FIELDS = [
"can_configure_wordpress",
"can_edit_matches",
"name",
"created_at",
"updated_at",
"can_stream_syslog",
"id",
"pricing_plan",
"can_upload_vcl",
"has_config_panel",
"raw_api_key",
"has_billing_panel",
"can_reset_passwords",
"owner_id",
]
@property
def owner(self):
return self._conn.get_user(self.owner_id)
class FastlyDirector(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""A Director is responsible for balancing requests among a group of Backends. In addition to simply balancing, Directors can be configured to attempt retrying failed requests. Additionally, Directors have a quorum setting which can be used to determine when the Director as a whole is considered "up", in order to prevent "server whack-a-mole" following an outage as servers come back up."""
FIELDS = [
"name",
"service_id",
"version",
"quorum",
"type",
"retries",
"shield",
"created",
"updated",
"deleted",
"capacity",
"comment",
]
class FastlyDirectorBackend(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""Maps and relates backends as belonging to directors. Backends can belong to any number of directors but directors can only hold one reference to a specific backend."""
FIELDS = [
"service_id",
"version",
"director",
"backend",
"created",
"updated",
"deleted",
]
class FastlyDomain(FastlyObject, IServiceVersionObject):
"""A Domain represents the domain name through which visitors will retrieve content. There can be multiple Domains for a Service."""
FIELDS = [
"name",
"comment",
"service_id",
"version",
]
class FastlyDomainCheck(FastlyObject):
@property
def domain(self):
return FastlyDomain(self._conn, self._data[0])
@property
def cname(self):
return self._data[1]
@property
def success(self):
return self._data[2]
class FastlyEventLog(FastlyObject):
"""EventLogs keep track of things that occur within your services or organization. Currently we track events such as activation and deactivation of Versions and mass purges. In the future we intend to track more events and let you trigger EventLog creation as well."""
FIELDS = [
"object_type",
"id",
"message",
"details",
"level",
"timestamp",
"system",
"subsystem",
]
class FastlyHeader(FastlyObject, IServiceVersionObject):
"""Header objects are used to add, modify, or delete headers from requests and responses. The header content can be simple strings or be derived from variables inside Varnish. Regular expressions can be used to customize the headers even further."""
FIELDS = [
"name",
"service_id",
"version",
"dst",
"src",
"type",
"action",
"regex",
"substitution",
"ignore_if_set",
"priority",
"response_condition",
"request_condition",
"cache_condition",
]
class FastlyHealthCheck(FastlyObject, IServiceVersionObject):
"""Healthchecks are used to customize the way Fastly checks on your Backends. Only Backends that have successful Healthchecks will be sent traffic, thus assuring that the failure of one server does not affect visitors."""
FIELDS = [
"service_id",
"version",
"name",
"method",
"host",
"path",
"http_version",
"timeout",
"check_interval",
"expected_response",
"window",
"threshold",
"initial",
"comment",
]
class FastlyPurge(FastlyObject):
"""Purging removes content from Fastly so it can be refreshed from your origin servers."""
FIELDS = [
"status",
"id",
]
class FastlyPurgeStatus(FastlyObject):
"""The status of a given purge request."""
FIELDS = [
"timestamp",
"server",
]
class FastlyRequestSetting(FastlyObject, IServiceVersionObject):
"""Settings used to customize Fastly's request handling. When used with Conditions the Request Settings object allows you to fine tune how specific types of requests are handled."""
FIELDS = [
"service_id",
"version",
"name",
"default_host",
"force_miss",
"force_ssl",
"action",
"bypass_busy_wait",
"max_stale_age",
"hash_keys",
"xff",
"timer_support",
"geo_headers",
"request_condition",
]
class FastlyResponseObject(FastlyObject, IServiceVersionObject):
"""Allows you to create synthetic responses that exist entirely on the varnish machine. Useful for creating error or maintainence pages that exists outside the scope of your datacenter. Best when used with Condition objects."""
FIELDS = [
"name",
"service_id",
"version",
"status",
"response",
"content",
"cache_condition",
"request_condition",
]
class FastlyService(FastlyObject):
"""A Service represents the configuration for a website, app, api, or anything else to be served through Fastly. A Service can have many Versions, through which Backends, Domains, and more can be configured."""
FIELDS = [
"id",
"name",
"customer_id",
"publish_key",
"active_version",
"versions"
"comment",
]
@property
def active_version(self):
for version in self.versions.values():
if version.active:
return version
return None
class FastlySettings(FastlyObject, IServiceVersionObject):
"""Handles default settings for a particular version of a service."""
FIELDS = [
"service_id",
"version",
]
class FastlySyslog(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""Fastly will stream log messages to the location, and in the format, specified in the Syslog object."""
FIELDS = [
"name",
"service_id",
"version",
"address",
"port",
"use_tls",
"tls_ca_cert",
"token",
"format",
"response_condition",
"created",
"updated",
"deleted",
]
class FastlyUser(FastlyObject, IDateStampedObject):
FIELDS = [
"name",
"created_at",
"updated_at",
"role",
"id",
"email_hash",
"customer_id",
"require_new_password",
"login",
]
@property
def customer(self):
return self._conn.get_customer(self.customer_id)
class FastlyVCL(FastlyObject, IServiceVersionObject):
"""A VCL is a Varnish configuration file used to customize the configuration for a Service."""
FIELDS = [
"name",
"service_id",
"version",
"generation",
"md5",
"content",
"main",
"vcl",
]
class FastlyVersion(FastlyObject, IServiceObject, IDateStampedObject):
"""A Version represents a specific instance of the configuration for a Service. A Version can be cloned, locked, activated, or deactivated."""
FIELDS = [
"comment",
"staging",
"locked",
"created_at",
"testing",
"number",
"updated_at",
"active",
"service_id",
"deleted_at",
"deployed",
"inherit_service_id",
]
@property
def settings(self):
dct = {}
result = self._conn.get_service_version_settings(self.service_id)
if result:
dct = result.settings
return dct
@property
def backends(self):
return dict([ (b.name, b) for b in self._conn.list_backends(self.service_id, int(self.number))])
@property
def healthchecks(self):
return dict([ (h.name, h) for h in self._conn.list_healthchecks(self.service_id, int(self.number))])
@property
def domains(self):
return dict([ (d.name, d) for d in self._conn.list_domains(self.service_id, int(self.number))])
@property
def directors(self):
return dict([ (d.name, d) for d in self._conn.list_directors(self.service_id, int(self.number))])
@property
def origins(self):
return dict([ (o.name, o) for o in self._conn.list_origins(self.service_id, int(self.number))])
@property
def syslogs(self):
return dict([ (s.name, s) for s in self._conn.list_syslogs(self.service_id, int(self.number))])
@property
def vcls(self):
return dict([ (v.name, v) for v in self._conn.list_vcls(self.service_id, int(self.number))])
class FastlyWordpress(FastlyObject, IServiceVersionObject):
"""The Wordpress object applies configuration optimized for Wordpress to a given path."""
FIELDS = [
"service_id",
"version",
"name",
"path",
"comment",
]
def connect(api_key, username=None, password=None):
conn = FastlyConnection(api_key)
if username is not None and password is not None:
conn.login(username, password)
return conn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.