gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services to operate on app feedback report app_feedback_report_models."""
from __future__ import annotations
from core import feconf
from core import utils
from core.domain import app_feedback_report_constants
from core.domain import app_feedback_report_domain
from core.platform import models
from typing import Any, Dict, List, Optional, cast
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import app_feedback_report_models
from mypy_imports import transaction_services
(app_feedback_report_models,) = models.Registry.import_models(
[models.NAMES.app_feedback_report])
transaction_services = models.Registry.import_transaction_services()
PLATFORM_ANDROID = app_feedback_report_constants.PLATFORM_CHOICE_ANDROID
PLATFORM_WEB = app_feedback_report_constants.PLATFORM_CHOICE_WEB
def get_report_models(
report_ids: List[str]
) -> List[Optional[app_feedback_report_models.AppFeedbackReportModel]]:
"""Fetches and returns the AppFeedbackReportModels with the given ids.
Args:
report_ids: list(str). The ids for the models to fetch.
Returns:
list(AppFeedbackReportModel). A list of models that correspond to the
requested reports.
"""
return (
app_feedback_report_models.AppFeedbackReportModel.get_multi(report_ids))
def create_report_from_json(
report_json: Dict[str, Any]
) -> app_feedback_report_domain.AppFeedbackReport:
"""Creates an AppFeedbackReport domain object instance from the incoming
JSON request.
Args:
report_json: dict. The JSON for the app feedback report.
Returns:
AppFeedbackReport. The domain object for an Android feedback report.
"""
return app_feedback_report_domain.AppFeedbackReport.from_dict(report_json)
def store_incoming_report_stats(
report_obj: app_feedback_report_domain.AppFeedbackReport
) -> None:
"""Adds a new report's stats to the aggregate stats model.
Args:
report_obj: AppFeedbackReport. AppFeedbackReport domain object.
"""
if report_obj.platform == PLATFORM_WEB:
raise NotImplementedError(
'Stats aggregation for incoming web reports have not been '
'implemented yet.')
platform = PLATFORM_ANDROID
unticketed_id = (
app_feedback_report_constants.UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID) # pylint: disable=line-too-long
all_reports_id = (
app_feedback_report_constants.ALL_ANDROID_REPORTS_STATS_TICKET_ID)
stats_date = report_obj.submitted_on_timestamp.date()
_update_report_stats_model_in_transaction(
unticketed_id, platform, stats_date, report_obj, 1)
_update_report_stats_model_in_transaction(
all_reports_id, platform, stats_date, report_obj, 1)
@transaction_services.run_in_transaction_wrapper
def _update_report_stats_model_in_transaction( # type: ignore[no-untyped-def]
ticket_id, platform, date, report_obj, delta):
"""Adds a new report's stats to the stats model for a specific ticket's
stats. Note that this currently only supports Android reports.
Args:
ticket_id: str. The id of the ticket that we want to update stats for.
platform: str. The platform of the report being aggregated.
date: datetime.date. The date of the stats.
report_obj: AppFeedbackReport. AppFeedbackReport domain object.
delta: int. The amount to increment the stats by, depending on if the
report is added or removed from the model.
"""
# The stats we want to aggregate on.
report_type = report_obj.user_supplied_feedback.report_type.name
country_locale_code = (
report_obj.device_system_context.device_country_locale_code)
entry_point_name = report_obj.app_context.entry_point.entry_point_name
text_language_code = report_obj.app_context.text_language_code
audio_language_code = report_obj.app_context.audio_language_code
# All the keys in the stats dict must be a string. Note that this parameter
# is only aggregated on Android reports.
report_obj.device_system_context.__class__ = (
app_feedback_report_domain.AndroidDeviceSystemContext)
sdk_version = str(report_obj.device_system_context.sdk_version)
version_name = report_obj.device_system_context.version_name
stats_id = (
app_feedback_report_models.AppFeedbackReportStatsModel.calculate_id(
platform, ticket_id, date))
stats_model = (
app_feedback_report_models.AppFeedbackReportStatsModel.get_by_id(
stats_id))
stats_parameter_names = (
app_feedback_report_constants.STATS_PARAMETER_NAMES)
if stats_model is None:
assert delta > 0
# Create new stats model entity. These are the individual report fields
# that we will want to splice aggregate stats by and they will each have
# a count of 1 since this is the first report added for this entity.
stats_dict = {
stats_parameter_names.report_type.name: {
report_type: 1
},
stats_parameter_names.country_locale_code.name: {
country_locale_code: 1
},
stats_parameter_names.entry_point_name.name: {
entry_point_name: 1
},
stats_parameter_names.text_language_code.name: {
text_language_code: 1
},
stats_parameter_names.audio_language_code.name: {
audio_language_code: 1
},
stats_parameter_names.android_sdk_version.name: {
sdk_version: 1
},
stats_parameter_names.version_name.name: {
version_name: 1
}
}
app_feedback_report_models.AppFeedbackReportStatsModel.create(
stats_id, platform, ticket_id, date, 0, stats_dict)
stats_model = (
app_feedback_report_models.AppFeedbackReportStatsModel.get_by_id(
stats_id))
else:
# Update existing stats model.
stats_dict = stats_model.daily_param_stats
stats_dict[
stats_parameter_names.report_type.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.report_type.name],
report_type, delta))
stats_dict[
stats_parameter_names.country_locale_code.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.country_locale_code.name],
country_locale_code, delta))
stats_dict[
stats_parameter_names.entry_point_name.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.entry_point_name.name],
entry_point_name, delta))
stats_dict[
stats_parameter_names.audio_language_code.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.audio_language_code.name],
audio_language_code, delta))
stats_dict[
stats_parameter_names.text_language_code.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.text_language_code.name],
text_language_code, delta))
stats_dict[
stats_parameter_names.android_sdk_version.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.android_sdk_version.name],
sdk_version, delta))
stats_dict[
stats_parameter_names.version_name.name] = (
calculate_new_stats_count_for_parameter(
stats_dict[
stats_parameter_names.version_name.name],
version_name, delta))
stats_model.daily_param_stats = stats_dict
stats_model.total_reports_submitted += delta
stats_model.update_timestamps()
stats_model.put()
def calculate_new_stats_count_for_parameter(
current_stats_map: Dict[str, int],
current_value: str,
delta: int
) -> Dict[Any, int]:
"""Helper to increment or initialize the stats count for a parameter.
Args:
current_stats_map: dict. The current stats map for the parameter we are
updating; keys correspond to the possible value for a single
parameter.
current_value: str. The value for the parameter that we are updating
the stats of.
delta: int. The amount to increment the current count by, either -1 or
+1.
Returns:
dict. The new stats values for the given parameter.
"""
if current_value in current_stats_map:
current_stats_map[current_value] += delta
else:
# The stats did not previously have this parameter value.
if delta < 0:
raise utils.InvalidInputException(
'Cannot decrement a count for a parameter value that does not '
'exist for this stats model.')
# Update the stats so that it now contains this new value.
current_stats_map[current_value] = 1
return current_stats_map
def get_report_from_model(
report_model: app_feedback_report_models.AppFeedbackReportModel
) -> app_feedback_report_domain.AppFeedbackReport:
"""Create and return a domain object AppFeedbackReport given a model loaded
from the the data.
Args:
report_model: AppFeedbackReportModel. The model loaded from the
datastore.
Returns:
AppFeedbackReport. An AppFeedbackReport domain object corresponding to
the given model.
Raises:
NotImplementedError. The web report domain object needs to be
implemented.
"""
if report_model.platform == PLATFORM_ANDROID:
return get_android_report_from_model(report_model)
else:
raise NotImplementedError(
'Web app feedback report domain objects must be defined.')
def get_ticket_from_model(
ticket_model: app_feedback_report_models.AppFeedbackReportTicketModel
) -> app_feedback_report_domain.AppFeedbackReportTicket:
"""Create and return a domain object AppFeedbackReportTicket given a model
loaded from the the data.
Args:
ticket_model: AppFeedbackReportTicketModel. The model loaded from the
datastore.
Returns:
AppFeedbackReportTicket. An AppFeedbackReportTicket domain object
corresponding to the given model.
"""
return app_feedback_report_domain.AppFeedbackReportTicket(
ticket_model.id, ticket_model.ticket_name, ticket_model.platform,
ticket_model.github_issue_repo_name, ticket_model.github_issue_number,
ticket_model.archived, ticket_model.newest_report_timestamp,
ticket_model.report_ids)
def get_stats_from_model(
stats_model: app_feedback_report_models.AppFeedbackReportStatsModel
) -> app_feedback_report_domain.AppFeedbackReportDailyStats:
"""Create and return a domain object AppFeedbackReportDailyStats given a
model loaded from the the storage.
Args:
stats_model: AppFeedbackReportStatsModel. The model loaded from the
datastore.
Returns:
AppFeedbackReportDailyStats. An AppFeedbackReportDailyStats domain
object corresponding tothe given model.
"""
ticket_model = (
app_feedback_report_models.AppFeedbackReportTicketModel.get_by_id(
stats_model.ticket_id))
ticket_obj = get_ticket_from_model(ticket_model)
param_stats = create_app_daily_stats_from_model_json(
stats_model.daily_param_stats)
return app_feedback_report_domain.AppFeedbackReportDailyStats(
stats_model.id, ticket_obj, stats_model.platform,
stats_model.stats_tracking_date, stats_model.total_reports_submitted,
param_stats)
def create_app_daily_stats_from_model_json(
daily_param_stats: Dict[str, Dict[str, int]]
) -> Dict[str, app_feedback_report_domain.ReportStatsParameterValueCounts]:
"""Create and return a dict representing the AppFeedbackReportDailyStats
domain object's daily_param_stats.
Args:
daily_param_stats: dict. The stats data from the model.
Returns:
dict. A dict mapping param field names to
ReportStatsParameterValueCounts domain objects.
"""
stats_dict = {}
for (stats_name, stats_values_dict) in daily_param_stats.items():
# For each parameter possible, create a
# ReportStatsParameterValueCounts domain object of possible parameter
# values and number of reports with that value.
counts_obj = (
app_feedback_report_domain.ReportStatsParameterValueCounts(
stats_values_dict))
stats_dict[stats_name] = counts_obj
return stats_dict
def get_android_report_from_model(
android_report_model: app_feedback_report_models.AppFeedbackReportModel
) -> app_feedback_report_domain.AppFeedbackReport:
"""Creates a domain object that represents an Android feedback report from
the given model.
Args:
android_report_model: AppFeedbackReportModel. The model to convert to a
domain object.
Returns:
AppFeedbackReport. The corresponding AppFeedbackReport domain object.
"""
feedback_report = app_feedback_report_domain.AppFeedbackReport
if android_report_model.android_report_info_schema_version < (
feconf.CURRENT_ANDROID_REPORT_SCHEMA_VERSION):
raise NotImplementedError(
'Android app feedback report migrations must be added for new '
'report schemas implemented.')
report_info_dict = android_report_model.android_report_info
user_supplied_feedback = app_feedback_report_domain.UserSuppliedFeedback(
feedback_report.get_report_type_from_string(
android_report_model.report_type),
feedback_report.get_category_from_string(
android_report_model.category),
report_info_dict['user_feedback_selected_items'],
report_info_dict['user_feedback_other_text_input'])
device_system_context = (
app_feedback_report_domain.AndroidDeviceSystemContext(
android_report_model.platform_version,
report_info_dict['package_version_code'],
android_report_model.android_device_country_locale_code,
report_info_dict['android_device_language_locale_code'],
android_report_model.android_device_model,
android_report_model.android_sdk_version,
report_info_dict['build_fingerprint'],
feedback_report.get_android_network_type_from_string(
report_info_dict['network_type'])))
entry_point = feedback_report.get_entry_point_from_json(
{
'entry_point_name': android_report_model.entry_point,
'entry_point_topic_id': android_report_model.entry_point_topic_id,
'entry_point_story_id': android_report_model.entry_point_story_id,
'entry_point_exploration_id': (
android_report_model.entry_point_exploration_id),
'entry_point_subtopic_id': (
android_report_model.entry_point_subtopic_id)
})
app_context = app_feedback_report_domain.AndroidAppContext(
entry_point, android_report_model.text_language_code,
android_report_model.audio_language_code,
feedback_report.get_android_text_size_from_string(
report_info_dict['text_size']),
report_info_dict['only_allows_wifi_download_and_update'],
report_info_dict['automatically_update_topics'],
report_info_dict['account_is_profile_admin'],
report_info_dict['event_logs'], report_info_dict['logcat_logs'])
return app_feedback_report_domain.AppFeedbackReport(
android_report_model.id,
android_report_model.android_report_info_schema_version,
android_report_model.platform, android_report_model.submitted_on,
android_report_model.local_timezone_offset_hrs,
android_report_model.ticket_id, android_report_model.scrubbed_by,
user_supplied_feedback, device_system_context, app_context)
def scrub_all_unscrubbed_expiring_reports(scrubbed_by: str) -> None:
"""Fetches the reports that are expiring and must be scrubbed.
Args:
scrubbed_by: str. The ID of the user initiating scrubbing or
feconf.APP_FEEDBACK_REPORT_SCRUBBER_BOT_ID if scrubbed by the cron
job.
"""
reports_to_scrub = get_all_expiring_reports_to_scrub()
for report in reports_to_scrub:
scrub_single_app_feedback_report(report, scrubbed_by)
def get_all_expiring_reports_to_scrub() -> List[
app_feedback_report_domain.AppFeedbackReport]:
"""Fetches the reports that are expiring and must be scrubbed.
Returns:
list(AppFeedbackReport). The list of AppFeedbackReportModel domain
objects that need to be scrubbed.
"""
model_class = app_feedback_report_models.AppFeedbackReportModel
model_entities = model_class.get_all_unscrubbed_expiring_report_models()
return [
get_report_from_model(model_entity) for model_entity in model_entities]
def scrub_single_app_feedback_report(
report: app_feedback_report_domain.AppFeedbackReport,
scrubbed_by: str
) -> None:
"""Scrubs the instance of AppFeedbackReportModel with given ID, removing
any user-entered input in the entity.
Args:
report: AppFeedbackReport. The domain object of the report to scrub.
scrubbed_by: str. The id of the user that is initiating scrubbing of
this report, or a constant
feconf.APP_FEEDBACK_REPORT_SCRUBBER_BOT_ID if scrubbed by the cron
job.
"""
report.scrubbed_by = scrubbed_by
report.user_supplied_feedback.user_feedback_other_text_input = ''
if report.platform == PLATFORM_ANDROID:
report.app_context = cast(
app_feedback_report_domain.AndroidAppContext, report.app_context)
report.app_context.event_logs = []
report.app_context.logcat_logs = []
save_feedback_report_to_storage(report)
def save_feedback_report_to_storage(
report: app_feedback_report_domain.AppFeedbackReport,
new_incoming_report: bool=False
) -> None:
"""Saves the AppFeedbackReport domain object to persistent storage.
Args:
report: AppFeedbackReport. The domain object of the report to save.
new_incoming_report: bool. Whether the report is a new incoming report
that does not have a corresponding model entity.
"""
if report.platform == PLATFORM_WEB:
raise utils.InvalidInputException(
'Web report domain objects have not been defined.')
report.validate()
user_supplied_feedback = report.user_supplied_feedback
device_system_context = cast(
app_feedback_report_domain.AndroidDeviceSystemContext,
report.device_system_context)
app_context = cast(
app_feedback_report_domain.AndroidAppContext, report.app_context)
entry_point = app_context.entry_point
report_info_json = {
'user_feedback_selected_items': (
user_supplied_feedback.user_feedback_selected_items),
'user_feedback_other_text_input': (
user_supplied_feedback.user_feedback_other_text_input)
}
report_info_json = {
'user_feedback_selected_items': (
user_supplied_feedback.user_feedback_selected_items),
'user_feedback_other_text_input': (
user_supplied_feedback.user_feedback_other_text_input),
'event_logs': app_context.event_logs,
'logcat_logs': app_context.logcat_logs,
'package_version_code': str(device_system_context.package_version_code),
'android_device_language_locale_code': (
device_system_context.device_language_locale_code),
'build_fingerprint': device_system_context.build_fingerprint,
'network_type': device_system_context.network_type.name,
'text_size': app_context.text_size.name,
'only_allows_wifi_download_and_update': str(
app_context.only_allows_wifi_download_and_update),
'automatically_update_topics': str(
app_context.automatically_update_topics),
'account_is_profile_admin': str(app_context.account_is_profile_admin)
}
if new_incoming_report:
app_feedback_report_models.AppFeedbackReportModel.create(
report.report_id, report.platform,
report.submitted_on_timestamp,
report.local_timezone_offset_hrs,
user_supplied_feedback.report_type.name,
user_supplied_feedback.category.name,
device_system_context.version_name,
device_system_context.device_country_locale_code,
device_system_context.sdk_version,
device_system_context.device_model,
entry_point.entry_point_name, entry_point.topic_id,
entry_point.story_id, entry_point.exploration_id,
entry_point.subtopic_id, app_context.text_language_code,
app_context.audio_language_code, None, None)
model_entity = app_feedback_report_models.AppFeedbackReportModel.get_by_id(
report.report_id)
model_entity.android_report_info = report_info_json
model_entity.ticket_id = report.ticket_id
model_entity.scrubbed_by = report.scrubbed_by
model_entity.update_timestamps()
model_entity.put()
def get_all_filter_options() -> List[
app_feedback_report_domain.AppFeedbackReportFilter]:
"""Fetches all the possible values that moderators can filter reports or
tickets by.
Returns:
list(AppFeedbackReportFilter). A list of filters and the possible values
they can have.
"""
filter_list = []
model_class = app_feedback_report_models.AppFeedbackReportModel
for filter_field in app_feedback_report_constants.ALLOWED_FILTERS:
filter_values = model_class.get_filter_options_for_field(filter_field)
filter_list.append(app_feedback_report_domain.AppFeedbackReportFilter(
filter_field, filter_values))
return filter_list
def reassign_ticket(
report: app_feedback_report_domain.AppFeedbackReport,
new_ticket: Optional[app_feedback_report_domain.AppFeedbackReportTicket]
) -> None:
"""Reassign the ticket the report is associated with.
Args:
report: AppFeedbackReport. The report being assigned to a new ticket.
new_ticket: AppFeedbackReportTicket|None. The ticket domain object to
reassign the report to or None if removing the report form a ticket
wihtout reassigning.
"""
if report.platform == PLATFORM_WEB:
raise NotImplementedError(
'Assigning web reports to tickets has not been implemented yet.')
platform = report.platform
stats_date = report.submitted_on_timestamp.date()
# Remove the report from the stats model associated with the old ticket.
old_ticket_id = report.ticket_id
if old_ticket_id is None:
_update_report_stats_model_in_transaction(
app_feedback_report_constants.UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID, # pylint: disable=line-too-long
platform, stats_date, report, -1)
else:
# The report was ticketed so the report needs to be removed from its old
# ticket in storage.
old_ticket_model = (
app_feedback_report_models.AppFeedbackReportTicketModel.get_by_id(
old_ticket_id))
if old_ticket_model is None:
raise utils.InvalidInputException(
'The report is being removed from an invalid ticket id: %s.'
% old_ticket_id)
old_ticket_obj = get_ticket_from_model(old_ticket_model)
old_ticket_obj.reports.remove(report.report_id)
if len(old_ticket_obj.reports) == 0:
# We are removing the only report associated with this ticket.
old_ticket_obj.newest_report_creation_timestamp = None # type: ignore[assignment]
else:
if old_ticket_obj.newest_report_creation_timestamp == (
report.submitted_on_timestamp):
# Update the newest report timestamp.
optional_report_models = get_report_models(
old_ticket_obj.reports)
report_models = cast(
List[app_feedback_report_models.AppFeedbackReportModel],
optional_report_models)
latest_timestamp = report_models[0].submitted_on
for index in range(1, len(report_models)):
if report_models[index].submitted_on > (
latest_timestamp):
latest_timestamp = (
report_models[index].submitted_on)
old_ticket_obj.newest_report_creation_timestamp = (
latest_timestamp)
_save_ticket(old_ticket_obj)
_update_report_stats_model_in_transaction(
old_ticket_id, platform, stats_date, report, -1)
# Add the report to the new ticket.
new_ticket_id = (
app_feedback_report_constants.UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID) # pylint: disable=line-too-long
if new_ticket is not None:
new_ticket_id = new_ticket.ticket_id
new_ticket_model = (
app_feedback_report_models.AppFeedbackReportTicketModel.get_by_id(
new_ticket_id))
new_ticket_obj = get_ticket_from_model(new_ticket_model)
new_ticket_obj.reports.append(report.report_id)
if report.submitted_on_timestamp > (
new_ticket_obj.newest_report_creation_timestamp):
new_ticket_obj.newest_report_creation_timestamp = (
report.submitted_on_timestamp)
_save_ticket(new_ticket_obj)
# Update the stats model for the new ticket.
platform = report.platform
stats_date = report.submitted_on_timestamp.date()
_update_report_stats_model_in_transaction(
new_ticket_id, platform, stats_date, report, 1)
# Update the report model to the new ticket id.
report.ticket_id = new_ticket_id
save_feedback_report_to_storage(report)
def edit_ticket_name(
ticket: app_feedback_report_domain.AppFeedbackReportTicket,
new_name: str
) -> None:
"""Updates the ticket name.
Returns:
ticket: AppFeedbackReportTicket. The domain object for a ticket.
new_name: str. The new name to assign the ticket.
"""
ticket.ticket_name = new_name
_save_ticket(ticket)
def _save_ticket(
ticket: app_feedback_report_domain.AppFeedbackReportTicket
) -> None:
"""Saves the ticket to persistent storage.
Returns:
ticket: AppFeedbackReportTicket. The domain object to save to storage.
"""
model_class = app_feedback_report_models.AppFeedbackReportTicketModel
ticket_model = model_class.get_by_id(ticket.ticket_id)
ticket_model.ticket_name = ticket.ticket_name
ticket_model.platform = ticket.platform
ticket_model.github_issue_repo_name = ticket.github_issue_repo_name
ticket_model.github_issue_number = ticket.github_issue_number
ticket_model.archived = ticket.archived
ticket_model.newest_report_timestamp = (
ticket.newest_report_creation_timestamp)
ticket_model.report_ids = ticket.reports
ticket_model.update_timestamps()
ticket_model.put()
|
|
import itertools
import json
import re
import subprocess
import time
from datetime import timedelta
import requests
from celery.exceptions import SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.utils import timezone
from polymorphic.models import PolymorphicModel
from ..alert import AlertPluginUserData, send_alert, send_alert_update
from ..calendar import get_events
from ..graphite import parse_metric
from ..tasks import update_instance, update_service
RAW_DATA_LIMIT = 5000
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
def get_custom_check_plugins():
custom_check_types = []
check_subclasses = StatusCheck.__subclasses__()
# Checks that aren't using the plugin system
legacy_checks = [
"JenkinsStatusCheck",
"HttpStatusCheck",
"ICMPStatusCheck",
"GraphiteStatusCheck",
]
for check in check_subclasses:
if check.__name__ in legacy_checks:
continue
check_name = check.check_name
custom_check = {}
custom_check['creation_url'] = "create-" + check_name + "-check"
custom_check['check_name'] = check_name
custom_check['icon_class'] = getattr(check, "icon_class", "glyphicon-ok")
custom_check['objects'] = check.objects
custom_check_types.append(custom_check)
return custom_check_types
class CheckGroupMixin(models.Model):
class Meta:
abstract = True
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
alerts = models.ManyToManyField(
'AlertPlugin',
blank=True,
help_text='Alerts channels through which you wish to be notified'
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Embedded recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. '
'https://you.hackpad.com/some_document.js'
)
runbook_link = models.TextField(
blank=True,
help_text='Link to the service runbook on your wiki.'
)
def __unicode__(self):
return self.name
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (
timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
more_important = self.old_overall_status == self.WARNING_STATUS or \
(self.old_overall_status == self.ERROR_STATUS and self.overall_status == self.CRITICAL_STATUS)
if not more_important and self.last_alert_sent and (
timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
if self.unexpired_acknowledgement():
send_alert_update(self, duty_officers=get_duty_officers())
else:
self.snapshot.did_send_alert = True
self.snapshot.save()
send_alert(self, duty_officers=get_duty_officers())
def unexpired_acknowledgements(self):
acknowledgements = self.alertacknowledgement_set.all().filter(
time__gte=timezone.now() - timedelta(minutes=settings.ACKNOWLEDGEMENT_EXPIRY),
cancelled_time__isnull=True,
).order_by('-time')
return acknowledgements
def acknowledge_alert(self, user):
if self.unexpired_acknowledgements(): # Don't allow users to jump on each other
return None
acknowledgement = AlertAcknowledgement.objects.create(
user=user,
time=timezone.now(),
service=self,
)
def remove_acknowledgement(self, user):
self.unexpired_acknowledgements().update(
cancelled_time=timezone.now(),
cancelled_user=user,
)
def unexpired_acknowledgement(self):
try:
return self.unexpired_acknowledgements()[0]
except:
return None
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24))).order_by("time")
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapshots
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS)
class Service(CheckGroupMixin):
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = ServiceStatusSnapshot(
service=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS):
self.alert()
instances = models.ManyToManyField(
'Instance',
blank=True,
help_text='Instances this service is running on.',
)
url = models.TextField(
blank=True,
help_text="URL of service."
)
is_public = models.BooleanField(
verbose_name='Is Public',
default=False,
help_text='The service will be shown in the public home'
)
class Meta:
ordering = ['name']
class Instance(CheckGroupMixin):
def duplicate(self):
checks = self.status_checks.all()
new_instance = self
new_instance.pk = None
new_instance.id = None
new_instance.name = u"Copy of %s" % self.name
new_instance.save()
for check in checks:
check.duplicate(inst_set=(new_instance,), serv_set=())
return new_instance.pk
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = InstanceStatusSnapshot(
instance=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
class Meta:
ordering = ['name']
address = models.TextField(
blank=True,
help_text="Address (IP/Hostname) of service."
)
def icmp_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck')
def active_icmp_status_checks(self):
return self.icmp_status_checks().filter(active=True)
def delete(self, *args, **kwargs):
self.icmp_status_checks().delete()
return super(Instance, self).delete(*args, **kwargs)
class Snapshot(models.Model):
class Meta:
abstract = True
time = models.DateTimeField(db_index=True)
num_checks_active = models.IntegerField(default=0)
num_checks_passing = models.IntegerField(default=0)
num_checks_failing = models.IntegerField(default=0)
overall_status = models.TextField(default=Service.PASSING_STATUS)
did_send_alert = models.IntegerField(default=False)
class ServiceStatusSnapshot(Snapshot):
service = models.ForeignKey(Service, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.service.name, self.overall_status)
class InstanceStatusSnapshot(Snapshot):
instance = models.ForeignKey(Instance, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.instance.name, self.overall_status)
class StatusCheck(PolymorphicModel):
"""
Base class for polymorphic models. We're going to use
proxy models for inheriting because it makes life much simpler,
but this allows us to stick different methods etc on subclasses.
You can work out what (sub)class a model is an instance of by accessing `instance.polymorphic_ctype.model`
We are using django-polymorphic for polymorphism
"""
# Common attributes to all
name = models.TextField()
active = models.BooleanField(
default=True,
help_text='If not active, check will not be used to calculate service status and will not trigger alerts.',
)
importance = models.CharField(
max_length=30,
choices=Service.IMPORTANCES,
default=Service.ERROR_STATUS,
help_text='Severity level of a failure. Critical alerts are for failures you want to wake you up at 2am, '
'Errors are things you can sleep through but need to fix in the morning, and warnings for less '
'important things.'
)
frequency = models.IntegerField(
default=5,
help_text='Minutes between each check.',
)
debounce = models.IntegerField(
default=0,
null=True,
help_text='Number of successive failures permitted before check will be marked as failed. Default is 0, '
'i.e. fail on first failure.'
)
created_by = models.ForeignKey(User, null=True)
calculated_status = models.CharField(
max_length=50, choices=Service.STATUSES, default=Service.CALCULATED_PASSING_STATUS, blank=True)
last_run = models.DateTimeField(null=True)
cached_health = models.TextField(editable=False, null=True)
# Graphite checks
metric = models.TextField(
null=True,
help_text='fully.qualified.name of the Graphite metric you want to watch. This can be any valid Graphite '
'expression, including wildcards, multiple hosts, etc.',
)
check_type = models.CharField(
choices=CHECK_TYPES,
max_length=100,
null=True,
)
value = models.TextField(
null=True,
help_text='If this expression evaluates to true, the check will fail (possibly triggering an alert).',
)
expected_num_hosts = models.IntegerField(
default=0,
null=True,
help_text='The minimum number of data series (hosts) you expect to see.',
)
allowed_num_failures = models.IntegerField(
default=0,
null=True,
help_text='The maximum number of data series (metrics) you expect to fail. For example, you might be OK with '
'2 out of 3 webservers having OK load (1 failing), but not 1 out of 3 (2 failing).',
)
# HTTP checks
endpoint = models.TextField(
null=True,
help_text='HTTP(S) endpoint to poll.',
)
username = models.TextField(
blank=True,
null=True,
help_text='Basic auth username.',
)
password = models.TextField(
blank=True,
null=True,
help_text='Basic auth password.',
)
text_match = models.TextField(
blank=True,
null=True,
help_text='Regex to match against source of page.',
)
status_code = models.TextField(
default=200,
null=True,
help_text='Status code expected from endpoint.'
)
timeout = models.IntegerField(
default=30,
null=True,
help_text='Time out after this many seconds.',
)
verify_ssl_certificate = models.BooleanField(
default=True,
help_text='Set to false to allow not try to verify ssl certificates (default True)',
)
# Jenkins checks
max_queued_build_time = models.IntegerField(
null=True,
blank=True,
help_text='Alert if build queued for more than this many minutes.',
)
class Meta(PolymorphicModel.Meta):
ordering = ['name']
def __unicode__(self):
return self.name
def recent_results(self):
# Not great to use id but we are getting lockups, possibly because of something to do with index
# on time_complete
return StatusCheckResult.objects.filter(status_check=self).order_by('-id').defer('raw_data')[:10]
def last_result(self):
try:
return StatusCheckResult.objects.filter(status_check=self).order_by('-id').defer('raw_data')[0]
except:
return None
def run(self):
start = timezone.now()
try:
result = self._run()
except SoftTimeLimitExceeded as e:
result = StatusCheckResult(status_check=self)
result.error = u'Error in performing check: Celery soft time limit exceeded'
result.succeeded = False
except Exception as e:
result = StatusCheckResult(status_check=self)
logger.error(u"Error performing check: %s" % (e.message,))
result.error = u'Error in performing check: %s' % (e.message,)
result.succeeded = False
finish = timezone.now()
result.time = start
result.time_complete = finish
result.save()
self.last_run = finish
self.save()
def _run(self):
"""
Implement on subclasses. Should return a `CheckResult` instance.
"""
raise NotImplementedError('Subclasses should implement')
def save(self, *args, **kwargs):
if self.last_run:
recent_results = list(self.recent_results())
if calculate_debounced_passing(recent_results, self.debounce):
self.calculated_status = Service.CALCULATED_PASSING_STATUS
else:
self.calculated_status = Service.CALCULATED_FAILING_STATUS
self.cached_health = serialize_recent_results(recent_results)
try:
updated = StatusCheck.objects.get(pk=self.pk)
except StatusCheck.DoesNotExist as e:
logger.error('Cannot find myself (check %s) in the database, presumably have been deleted' % self.pk)
return
else:
self.cached_health = ''
self.calculated_status = Service.CALCULATED_PASSING_STATUS
ret = super(StatusCheck, self).save(*args, **kwargs)
self.update_related_services()
self.update_related_instances()
return ret
def duplicate(self, inst_set=(), serv_set=()):
new_check = self
new_check.pk = None
new_check.id = None
new_check.last_run = None
new_check.save()
for linked in list(inst_set) + list(serv_set):
linked.status_checks.add(new_check)
return new_check.pk
def update_related_services(self):
services = self.service_set.all()
for service in services:
update_service.delay(service.id)
def update_related_instances(self):
instances = self.instance_set.all()
for instance in instances:
update_instance.delay(instance.id)
class ICMPStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "ICMP/Ping Check"
def _run(self):
result = StatusCheckResult(status_check=self)
instances = self.instance_set.all()
target = self.instance_set.get().address
# We need to read both STDOUT and STDERR because ping can write to both, depending on the kind of error.
# Thanks a lot, ping.
ping_process = subprocess.Popen("ping -c 1 " + target, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True)
response = ping_process.wait()
if response == 0:
result.succeeded = True
else:
output = ping_process.stdout.read()
result.succeeded = False
result.error = output
return result
def minimize_targets(targets):
split = [target.split(".") for target in targets]
prefix_nodes_in_common = 0
for i, nodes in enumerate(itertools.izip(*split)):
if any(node != nodes[0] for node in nodes):
prefix_nodes_in_common = i
break
split = [nodes[prefix_nodes_in_common:] for nodes in split]
suffix_nodes_in_common = 0
for i, nodes in enumerate(reversed(zip(*split))):
if any(node != nodes[0] for node in nodes):
suffix_nodes_in_common = i
break
if suffix_nodes_in_common:
split = [nodes[:-suffix_nodes_in_common] for nodes in split]
return [".".join(nodes) for nodes in split]
class GraphiteStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Metric check"
def format_error_message(self, failures, actual_hosts, hosts_by_target):
if actual_hosts < self.expected_num_hosts:
return "Hosts missing | %d/%d hosts" % (
actual_hosts, self.expected_num_hosts)
elif actual_hosts > 1:
threshold = float(self.value)
failures_by_host = ["%s: %s %s %0.1f" % (
hosts_by_target[target], value, self.check_type, threshold)
for target, value in failures]
return ", ".join(failures_by_host)
else:
target, value = failures[0]
return "%s %s %0.1f" % (value, self.check_type, float(self.value))
def _run(self):
if not hasattr(self, 'utcnow'):
self.utcnow = None
result = StatusCheckResult(status_check=self)
failures = []
last_result = self.last_result()
if last_result:
last_result_started = last_result.time
time_to_check = max(self.frequency, ((timezone.now() - last_result_started).total_seconds() / 60) + 1)
else:
time_to_check = self.frequency
graphite_output = parse_metric(self.metric, mins_to_check=time_to_check, utcnow=self.utcnow)
try:
result.raw_data = json.dumps(graphite_output['raw'])
except:
result.raw_data = graphite_output['raw']
if graphite_output["error"]:
result.succeeded = False
result.error = graphite_output["error"]
return result
if graphite_output['num_series_with_data'] > 0:
result.average_value = graphite_output['average_value']
for s in graphite_output['series']:
if not s["values"]:
continue
failure_value = None
if self.check_type == '<':
if float(s['min']) < float(self.value):
failure_value = s['min']
elif self.check_type == '<=':
if float(s['min']) <= float(self.value):
failure_value = s['min']
elif self.check_type == '>':
if float(s['max']) > float(self.value):
failure_value = s['max']
elif self.check_type == '>=':
if float(s['max']) >= float(self.value):
failure_value = s['max']
elif self.check_type == '==':
if float(self.value) in s['values']:
failure_value = float(self.value)
else:
raise Exception(u'Check type %s not supported' %
self.check_type)
if not failure_value is None:
failures.append((s["target"], failure_value))
if len(failures) > self.allowed_num_failures:
result.succeeded = False
elif graphite_output['num_series_with_data'] < self.expected_num_hosts:
result.succeeded = False
else:
result.succeeded = True
if not result.succeeded:
targets = [s["target"] for s in graphite_output["series"]]
hosts = minimize_targets(targets)
hosts_by_target = dict(zip(targets, hosts))
result.error = self.format_error_message(
failures,
graphite_output['num_series_with_data'],
hosts_by_target,
)
return result
class HttpStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "HTTP check"
@classmethod
def _check_content_pattern(self, text_match, content):
content = content if isinstance(content, unicode) else unicode(content, "UTF-8")
return re.search(text_match, content)
def _run(self):
result = StatusCheckResult(status_check=self)
auth = None
if self.username or self.password:
auth = (self.username, self.password)
try:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
auth=auth,
headers={
"User-Agent": settings.HTTP_USER_AGENT,
},
)
except requests.RequestException as e:
result.error = u'Request error occurred: %s' % (e.message,)
result.succeeded = False
else:
if self.status_code and resp.status_code != int(self.status_code):
result.error = u'Wrong code: got %s (expected %s)' % (
resp.status_code, int(self.status_code))
result.succeeded = False
result.raw_data = resp.content
elif self.text_match:
if not self._check_content_pattern(self.text_match, resp.content):
result.error = u'Failed to find match regex /%s/ in response body' % self.text_match
result.raw_data = resp.content
result.succeeded = False
else:
result.succeeded = True
else:
result.succeeded = True
return result
class StatusCheckResult(models.Model):
"""
We use the same StatusCheckResult model for all check types,
because really they are not so very different.
Checks don't have to use all the fields, so most should be
nullable
"""
status_check = models.ForeignKey(StatusCheck)
time = models.DateTimeField(null=False, db_index=True)
time_complete = models.DateTimeField(null=True, db_index=True)
raw_data = models.TextField(null=True)
succeeded = models.BooleanField(default=False)
error = models.TextField(null=True)
# Jenkins specific
job_number = models.PositiveIntegerField(null=True)
class Meta:
ordering = ['-time_complete']
index_together = (
('status_check', 'time_complete'),
('status_check', 'id'), # used to speed up StatusCheck.last_result
)
def __unicode__(self):
return '%s: %s @%s' % (self.status, self.status_check.name, self.time)
@property
def status(self):
if self.succeeded:
return 'succeeded'
else:
return 'failed'
@property
def took(self):
"""
Time taken by check in ms
"""
try:
diff = self.time_complete - self.time
return (diff.microseconds + (diff.seconds + diff.days * 24 * 3600) * 10 ** 6) / 1000
except:
return None
@property
def short_error(self):
snippet_len = 30
if len(self.error) > snippet_len:
return u"%s..." % self.error[:snippet_len - 3]
else:
return self.error
def save(self, *args, **kwargs):
if isinstance(self.raw_data, basestring):
self.raw_data = self.raw_data[:RAW_DATA_LIMIT]
return super(StatusCheckResult, self).save(*args, **kwargs)
class AlertAcknowledgement(models.Model):
time = models.DateTimeField()
user = models.ForeignKey(settings.AUTH_USER_MODEL)
service = models.ForeignKey(Service)
cancelled_time = models.DateTimeField(null=True, blank=True)
cancelled_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
related_name='cancelleduser_set'
)
def unexpired(self):
return self.expires() > timezone.now()
def expires(self):
return self.time + timedelta(minutes=settings.ACKNOWLEDGEMENT_EXPIRY)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile')
def user_data(self):
for user_data_subclass in AlertPluginUserData.__subclasses__():
user_data = user_data_subclass.objects.get_or_create(user=self, title=user_data_subclass.name)
return AlertPluginUserData.objects.filter(user=self)
def __unicode__(self):
return 'User profile: %s' % self.user.username
def save(self, *args, **kwargs):
# Enforce uniqueness
if self.fallback_alert_user:
profiles = UserProfile.objects.exclude(id=self.id)
profiles.update(fallback_alert_user=False)
return super(UserProfile, self).save(*args, **kwargs)
@property
def prefixed_mobile_number(self):
return '+%s' % self.mobile_number
mobile_number = models.CharField(max_length=20, blank=True, default='')
hipchat_alias = models.CharField(max_length=50, blank=True, default='')
fallback_alert_user = models.BooleanField(default=False)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=settings.AUTH_USER_MODEL)
class Shift(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
user = models.ForeignKey(settings.AUTH_USER_MODEL)
uid = models.TextField()
last_modified = models.DateTimeField()
deleted = models.BooleanField(default=False)
def __unicode__(self):
deleted = ''
if self.deleted:
deleted = ' (deleted)'
return "%s: %s to %s%s" % (self.user.username, self.start, self.end, deleted)
def get_duty_officers(at_time=None):
"""Returns a list of duty officers for a given time or now if none given"""
duty_officers = []
if not at_time:
at_time = timezone.now()
current_shifts = Shift.objects.filter(
deleted=False,
start__lt=at_time,
end__gt=at_time,
)
if current_shifts:
duty_officers = [shift.user for shift in current_shifts]
return duty_officers
else:
try:
u = UserProfile.objects.get(fallback_alert_user=True)
return [u.user]
except UserProfile.DoesNotExist:
return []
def update_shifts():
events = get_events()
users = User.objects.filter(is_active=True)
user_lookup = {}
for u in users:
user_lookup[u.username.lower()] = u
future_shifts = Shift.objects.filter(start__gt=timezone.now())
future_shifts.update(deleted=True)
for event in events:
e = event['summary'].lower().strip()
if e in user_lookup:
user = user_lookup[e]
# Delete any events that have been updated in ical
Shift.objects.filter(uid=event['uid'],
last_modified__lt=event['last_modified']).delete()
Shift.objects.get_or_create(
uid=event['uid'],
start=event['start'],
end=event['end'],
last_modified=event['last_modified'],
user=user,
deleted=False)
|
|
#!/usr/bin/env python
import os
import sys
import gzip
import json
import hashlib
import resource
import datetime
from CacheBuckets import CacheBuckets
from StorageSystem import StorageSystem
# add ecmwf_utils to python path
util_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
print (util_path)
sys.path.append(util_path)
from ecmwf_util import Stats
START_TIME = 0
USER_ID = 1
HOST_ID = 2
PROCESS_ID = 3
REQUEST = 4
PARAMS = 5
FILE_SIZE = 6
EXECUTION_TIME = 7
ADDITIONAL_INFO = 8
NEXT_LINE = 9
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
TB = 1024 * GB
import time
class Timer():
def __init__(self, s):
self.s = s
def __enter__(self):
self.start = time.time()
def __exit__(self, *args):
print ("%s: %fs" % (self.s, (time.time() - self.start)))
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def get_md5(s, hexdigest=False):
# return s
m = hashlib.md5()
m.update(s)
if hexdigest:
return m.hexdigest()
else:
return m.digest()
# import pdb; pdb.set_trace()
#import objgraph
def main(results_dir, test_config_file):
# h = hpy()
stats = Stats.Stats()
with open(test_config_file, 'r') as f:
config = json.load(f)
cache_buckets = config["buckets"]
# for bucket_name, bucket_config in cache_buckets.items():
# print (bucket_name, bucket_config)
cache = CacheBuckets(cache_buckets)
storage = StorageSystem(cache)
trace_file = os.path.abspath(config["trace"])
if not os.path.exists(trace_file):
print ("tracefile does not exist: %s" % (trace_file))
sys.exit(1)
processed_lines = 0
min_epoch = 99999999999999999999999
max_epoch = 0
# suspicious_elems = ["624d66b5d86684334fd0387015ddbb9a","8be1e3557e7b7f5b0599e224409b5136", "07e9134eece9be8ac6cafc0b4c0907a4", "838e6c0a20308a730f5d1d3b92147e88", "fe99c5fa4a71454d04a919c156807d35", "416e7504069f37420f3075906c02d46a", "52ef73fa630d46245ee676d6b47e9995"]
lines_dump = 10000
with gzip.open(trace_file, 'r') as source_file:
with Timer("Analyzing file: %s" % (trace_file)):
epoch = 0
obj_id = ""
size = 0
t = time.time()
dt = None
last_day = None
current_day = None
next_day_epoch = 0
for line in source_file:
# print line
processed_lines += 1
if processed_lines % lines_dump == 0:
print ("processed: %d, cached_objects: %d, mem: %rMB, requests/s: %r" %
(processed_lines,
storage.get_cache().get_num_cached_objects(),
float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024,
int(lines_dump / (time.time() - t))
)
)
# print(json.dumps(storage.get_stats(), indent=2, sort_keys=True))
# print ("=================")
t = time.time()
# if not storage.check_cache_sanity(processed_lines):
# print ("Sanity Check failed")
# sys.exit(1)
elems = line.split('|')
if len(elems) > FILE_SIZE and elems[START_TIME].isdigit():
epoch = int(elems[START_TIME])
next_line = int(elems[NEXT_LINE])
#check if a new day has started.
if next_day_epoch == 0 or epoch >= next_day_epoch:
# this is a quite expensive operation and does not need to be calculated more than once a day.
dt = datetime.datetime.utcfromtimestamp(epoch)
current_day = dt.strftime("%Y-%m-%d")
next_day_epoch = unix_time(dt + datetime.timedelta(days=1))
print("======================> NEW DAY!!: %s" % current_day)
if current_day != last_day:
# a new day! write some stats...
if last_day is not None:
# day has changed, write daily snapshot of storage system stats
day_stats = storage.get_stats_day()
total_stats = storage.get_stats_total()
stats.setDictDay(("stats",), epoch, day_stats)
stats.setDictDay(("ctotal",), epoch, total_stats)
last_day = current_day
last_day = current_day
min_epoch = min(min_epoch, epoch)
max_epoch = max(max_epoch, epoch)
obj_id = get_md5(elems[PARAMS].strip(), hexdigest=True)
# if obj_id in suspicious_elems:
# print("%s ::::> %s" % (obj_id, line))
# if not storage.check_cache_sanity(processed_lines):
# print ("Sanity Check before failed")
# sys.exit(1)
if elems[REQUEST] == 'GET':
if elems[FILE_SIZE].isdigit():
size = int(elems[FILE_SIZE])
uid = elems[USER_ID]
storage.get_object(obj_id, uid, size, epoch, next_line)
else:
print("bad line: %s" % (line))
elif elems[REQUEST] == 'PUT':
if elems[FILE_SIZE].isdigit():
uid = elems[USER_ID]
size = int(elems[FILE_SIZE])
storage.put_object(obj_id, uid, size, epoch, next_line)
else:
print("bad line: %s" % (line))
elif elems[REQUEST] == 'DEL':
uid = elems[USER_ID]
storage.del_object(obj_id, uid, epoch)
elif elems[REQUEST] == 'RENAME':
uid = elems[USER_ID]
from_obj_id = get_md5(elems[PARAMS].split(" ")[0].strip(), hexdigest=True)
to_obj_id = get_md5(elems[PARAMS].split(" ")[1].strip(), hexdigest=True)
# if to_obj_id in suspicious_elems or from_obj_id in suspicious_elems:
# print("%s ::::> %s" % ( obj_id, line))
# if not storage.check_cache_sanity(processed_lines):
# print ("Sanity Check before failed")
# sys.exit(1)
# print ("rename %s \n\t --> %s" % (from_obj_id, to_obj_id))
storage.rename_object(from_obj_id, to_obj_id)
# if to_obj_id in suspicious_elems or from_obj_id in suspicious_elems:
# if not storage.check_cache_sanity(processed_lines):
# print ("Sanity Check after failed")
# sys.exit(1)
else:
print("bad line: %s" % (line))
# if obj_id in suspicious_elems:
# if not storage.check_cache_sanity(processed_lines):
# print ("Sanity Check after failed")
# sys.exit(1)
# write stats for ongoing day / month /year
s = storage.get_stats_day()
stats.setDictDay(("stats",), epoch, s)
results = dict()
results["totals"] = storage.get_stats_total()
# also store the initial config
results["config"] = config
results["epoch_start_ts"] = min_epoch
results["epoch_start"] = datetime.datetime.utcfromtimestamp(min_epoch).strftime("%Y-%m-%d")
results["epoch_end_ts"] = max_epoch
results["epoch_end"] = datetime.datetime.utcfromtimestamp(max_epoch).strftime("%Y-%m-%d")
results["stats"] = stats.to_dict()
print(json.dumps(results, indent=4, sort_keys=True))
results_file = os.path.join(results_dir, "results.json")
with open(results_file, 'w') as f:
json.dump(results, f, indent=4, sort_keys=True)
return 0
if __name__ == "__main__":
name = sys.argv[0]
if len(sys.argv) == 3:
results_dir = sys.argv[1]
test_config_file = sys.argv[2]
if not os.path.exists(results_dir):
print ("results_dir %r does not exist.")
sys.exit(1)
if not os.path.exists(test_config_file):
print("test_config_file %r does not exist.")
sys.exit(1)
else:
print ("usage: %s trace_dir results_dir test_config_file" % name)
sys.exit(main(results_dir, test_config_file))
|
|
from six.moves import xrange
from six import StringIO
import csv
import six
import sys
import time
import select
import struct
import tinylink
import argparse
try:
import serial
except ImportError:
serial = None
def run():
"""
Entry point for console script.
"""
sys.exit(main())
def parse_arguments():
"""
Create and parse command line arguments.
"""
parser = argparse.ArgumentParser()
# Add option
parser.add_argument("port", type=str, help="serial port")
parser.add_argument(
"baudrate", type=int, default=9600, help="serial baudrate")
parser.add_argument(
"--length", type=int, default=2**16, help="maximum length of frame")
parser.add_argument(
"--endianness", type=str, default="little", choices=["big", "little"],
help="maximum length of frame")
# Parse command line
return parser.parse_args(), parser
def dump(prefix, data):
"""
Dump data as two hex columns.
"""
result = []
length = len(data)
for i in xrange(0, length, 16):
hexstr = ""
bytestr = b""
for j in xrange(0, 16):
if i + j < length:
b = six.indexbytes(data, i + j)
hexstr += "%02x " % b
bytestr += six.int2byte(b) if 0x20 <= b < 0x7F else b"."
else:
hexstr += " "
if (j % 4) == 3:
hexstr += " "
result.append(prefix + " " + hexstr + bytestr.decode("ascii"))
# Return concatenated string
return "\n".join(result)
def process_link(link):
"""
Process incoming link data.
"""
frames = link.read()
# Print received frames
for frame in frames:
sys.stdout.write("### Type = %s\n" % frame.__class__.__name__)
sys.stdout.write("### Flags = 0x%04x\n" % frame.flags)
sys.stdout.write("### Length = %d\n" % len(frame.data))
sys.stdout.write(dump("<<<", frame.data) + "\n\n")
def process_stdin(link):
"""
Process stdin commands.
"""
command = sys.stdin.readline()
# End of file.
if len(command) == 0:
return False
# Abuse the CSV module as a command parser, because CSV-like arguments are
# possible.
items = list(csv.reader(StringIO(command.strip()), delimiter=" "))
if not items:
return
# Initialize state and start parsing.
frame = tinylink.Frame()
repeat = 1
pack = "B"
try:
for item in items[0]:
if item[0] == "\\":
k, v = item[1:].split("=")
if k == "flags":
frame.flags = int(v, 0)
elif k == "pack":
pack = v
elif k == "wait":
time.sleep(float(v))
elif k == "repeat":
repeat = int(v)
else:
raise ValueError("Unkown option: %s" % k)
else:
try:
# Assume it is a float.
value = struct.pack(link.endianness + pack, float(item))
except:
try:
# Assume it is an int.
value = struct.pack(
link.endianness + pack, int(item, 0))
except ValueError:
# Assume it is a byte string.
item = item.encode("ascii")
value = struct.pack(
link.endianness + str(len(item)) + "s", item)
# Concat to frame.
frame.data = (frame.data or bytes()) + value
except Exception as e:
sys.stdout.write("Parse exception: %s\n" % e)
# Output the data.
for i in xrange(repeat):
sys.stdout.write("### Flags = 0x%04x\n" % frame.flags)
if frame.data:
sys.stdout.write("### Length = %d\n" % len(frame.data))
sys.stdout.write(dump(">>>", frame.data) + "\n\n")
# Send the frame.
try:
link.write_frame(frame)
except ValueError as e:
sys.stdout.write("Could not send frame: %s\n" % e)
return
def main():
"""
Main entry point.
"""
if serial is None:
sys.stdout.write(
"TinyLink CLI uses PySerial, but it is not installed. Please "
"install this first.\n")
return 1
# Parse arguments
arguments, parser = parse_arguments()
if arguments.endianness == "little":
endianness = tinylink.LITTLE_ENDIAN
else:
endianness = tinylink.BIG_ENDIAN
# Open serial port and create link
handle = serial.Serial(arguments.port, baudrate=arguments.baudrate)
link = tinylink.TinyLink(
handle, max_length=arguments.length, endianness=endianness)
# Loop until finished
try:
# Input indicator
sys.stdout.write("--> ")
sys.stdout.flush()
while True:
readables, _, _ = select.select([handle, sys.stdin], [], [])
# Read from serial port
if handle in readables:
process_link(link)
# Read from stdin
if sys.stdin in readables:
if process_stdin(link) is False:
break
# Input indicator
sys.stdout.write("--> ")
sys.stdout.flush()
except KeyboardInterrupt:
handle.close()
# Done
return 0
# E.g. `python tinylink_cli.py /dev/tty.usbmodem1337 --baudrate 9600'
if __name__ == "__main__":
run()
|
|
"""Color util methods."""
import math
import colorsys
from typing import Tuple, List, Optional
import attr
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletredred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
@attr.s()
class XYPoint:
"""Represents a CIE 1931 XY coordinate pair."""
x = attr.ib(type=float)
y = attr.ib(type=float)
@attr.s()
class GamutType:
"""Represents the Gamut of a light."""
# ColorGamut = gamut(xypoint(xR,yR),xypoint(xG,yG),xypoint(xB,yB))
red = attr.ib(type=XYPoint)
green = attr.ib(type=XYPoint)
blue = attr.ib(type=XYPoint)
def color_name_to_rgb(color_name: str) -> Tuple[int, int, int]:
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(' ', '').lower())
if not hex_value:
raise ValueError('Unknown color')
return hex_value
# pylint: disable=invalid-name
def color_RGB_to_xy(iR: int, iG: int, iB: int,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert from RGB color to XY color."""
return color_RGB_to_xy_brightness(iR, iG, iB, Gamut)[:2]
# Taken from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
# License: Code is given as is. Use at your own risk and discretion.
# pylint: disable=invalid-name
def color_RGB_to_xy_brightness(
iR: int, iG: int, iB: int,
Gamut: Optional[GamutType] = None) -> Tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0.0, 0
R = iR / 255
B = iB / 255
G = iG / 255
# Gamma correction
R = pow((R + 0.055) / (1.0 + 0.055),
2.4) if (R > 0.04045) else (R / 12.92)
G = pow((G + 0.055) / (1.0 + 0.055),
2.4) if (G > 0.04045) else (G / 12.92)
B = pow((B + 0.055) / (1.0 + 0.055),
2.4) if (B > 0.04045) else (B / 12.92)
# Wide RGB D65 conversion formula
X = R * 0.664511 + G * 0.154324 + B * 0.162028
Y = R * 0.283881 + G * 0.668433 + B * 0.047685
Z = R * 0.000088 + G * 0.072310 + B * 0.986039
# Convert XYZ to xy
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
# Brightness
Y = 1 if Y > 1 else Y
brightness = round(Y * 255)
# Check if the given xy value is within the color-reach of the lamp.
if Gamut:
in_reach = check_point_in_lamps_reach((x, y), Gamut)
if not in_reach:
xy_closest = get_closest_point_to_point((x, y), Gamut)
x = xy_closest[0]
y = xy_closest[1]
return round(x, 3), round(y, 3), brightness
def color_xy_to_RGB(
vX: float, vY: float,
Gamut: Optional[GamutType] = None) -> Tuple[int, int, int]:
"""Convert from XY to a normalized RGB."""
return color_xy_brightness_to_RGB(vX, vY, 255, Gamut)
# Converted to Python from Obj-C, original source from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
def color_xy_brightness_to_RGB(
vX: float, vY: float, ibrightness: int,
Gamut: Optional[GamutType] = None) -> Tuple[int, int, int]:
"""Convert from XYZ to RGB."""
if Gamut:
if not check_point_in_lamps_reach((vX, vY), Gamut):
xy_closest = get_closest_point_to_point((vX, vY), Gamut)
vX = xy_closest[0]
vY = xy_closest[1]
brightness = ibrightness / 255.
if brightness == 0:
return (0, 0, 0)
Y = brightness
if vY == 0:
vY += 0.00000000001
X = (Y / vY) * vX
Z = (Y / vY) * (1 - vX - vY)
# Convert to RGB using Wide RGB D65 conversion.
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction.
r, g, b = map(
lambda x: (12.92 * x) if (x <= 0.0031308) else
((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),
[r, g, b]
)
# Bring all negative components to zero.
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
ir, ig, ib = map(lambda x: int(x * 255), [r, g, b])
return (ir, ig, ib)
def color_hsb_to_RGB(fH: float, fS: float, fB: float) -> Tuple[int, int, int]:
"""Convert a hsb into its rgb representation."""
if fS == 0:
fV = int(fB * 255)
return fV, fV, fV
r = g = b = 0
h = fH / 60
f = h - float(math.floor(h))
p = fB * (1 - fS)
q = fB * (1 - fS * f)
t = fB * (1 - (fS * (1 - f)))
if int(h) == 0:
r = int(fB * 255)
g = int(t * 255)
b = int(p * 255)
elif int(h) == 1:
r = int(q * 255)
g = int(fB * 255)
b = int(p * 255)
elif int(h) == 2:
r = int(p * 255)
g = int(fB * 255)
b = int(t * 255)
elif int(h) == 3:
r = int(p * 255)
g = int(q * 255)
b = int(fB * 255)
elif int(h) == 4:
r = int(t * 255)
g = int(p * 255)
b = int(fB * 255)
elif int(h) == 5:
r = int(fB * 255)
g = int(p * 255)
b = int(q * 255)
return (r, g, b)
def color_RGB_to_hsv(
iR: float, iG: float, iB: float) -> Tuple[float, float, float]:
"""Convert an rgb color to its hsv representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fHSV = colorsys.rgb_to_hsv(iR/255.0, iG/255.0, iB/255.0)
return round(fHSV[0]*360, 3), round(fHSV[1]*100, 3), round(fHSV[2]*100, 3)
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> Tuple[float, float]:
"""Convert an rgb color to its hs representation."""
return color_RGB_to_hsv(iR, iG, iB)[:2]
def color_hsv_to_RGB(iH: float, iS: float, iV: float) -> Tuple[int, int, int]:
"""Convert an hsv color into its rgb representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fRGB = colorsys.hsv_to_rgb(iH/360, iS/100, iV/100)
return (int(fRGB[0]*255), int(fRGB[1]*255), int(fRGB[2]*255))
def color_hs_to_RGB(iH: float, iS: float) -> Tuple[int, int, int]:
"""Convert an hsv color into its rgb representation."""
return color_hsv_to_RGB(iH, iS, 100)
def color_xy_to_hs(vX: float, vY: float,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert an xy color to its hs representation."""
h, s, _ = color_RGB_to_hsv(*color_xy_to_RGB(vX, vY, Gamut))
return h, s
def color_hs_to_xy(iH: float, iS: float,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert an hs color to its xy representation."""
return color_RGB_to_xy(*color_hs_to_RGB(iH, iS), Gamut)
def _match_max_scale(input_colors: Tuple, output_colors: Tuple) -> Tuple:
"""Match the maximum value of the output to the input."""
max_in = max(input_colors)
max_out = max(output_colors)
if max_out == 0:
factor = 0.0
else:
factor = max_in / max_out
return tuple(int(round(i * factor)) for i in output_colors)
def color_rgb_to_rgbw(r: int, g: int, b: int) -> Tuple[int, int, int, int]:
"""Convert an rgb color to an rgbw representation."""
# Calculate the white channel as the minimum of input rgb channels.
# Subtract the white portion from the remaining rgb channels.
w = min(r, g, b)
rgbw = (r - w, g - w, b - w, w)
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return _match_max_scale((r, g, b), rgbw) # type: ignore
def color_rgbw_to_rgb(r: int, g: int, b: int, w: int) -> Tuple[int, int, int]:
"""Convert an rgbw color to an rgb representation."""
# Add the white channel back into the rgb channels.
rgb = (r + w, g + w, b + w)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return _match_max_scale((r, g, b, w), rgb) # type: ignore
def color_rgb_to_hex(r: int, g: int, b: int) -> str:
"""Return a RGB color from a hex color string."""
return '{0:02x}{1:02x}{2:02x}'.format(round(r), round(g), round(b))
def rgb_hex_to_rgb_list(hex_string: str) -> List[int]:
"""Return an RGB color value list from a hex color string."""
return [int(hex_string[i:i + len(hex_string) // 3], 16)
for i in range(0,
len(hex_string),
len(hex_string) // 3)]
def color_temperature_to_hs(
color_temperature_kelvin: float) -> Tuple[float, float]:
"""Return an hs color from a color temperature in Kelvin."""
return color_RGB_to_hs(*color_temperature_to_rgb(color_temperature_kelvin))
def color_temperature_to_rgb(
color_temperature_kelvin: float) -> Tuple[float, float, float]:
"""
Return an RGB color from a color temperature in Kelvin.
This is a rough approximation based on the formula provided by T. Helland
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if color_temperature_kelvin < 1000:
color_temperature_kelvin = 1000
elif color_temperature_kelvin > 40000:
color_temperature_kelvin = 40000
tmp_internal = color_temperature_kelvin / 100.0
red = _get_red(tmp_internal)
green = _get_green(tmp_internal)
blue = _get_blue(tmp_internal)
return red, green, blue
def _bound(color_component: float, minimum: float = 0,
maximum: float = 255) -> float:
"""
Bound the given color component value between the given min and max values.
The minimum and maximum values will be included in the valid output.
i.e. Given a color_component of 0 and a minimum of 10, the returned value
will be 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
def _get_red(temperature: float) -> float:
"""Get the red component of the temperature in RGB space."""
if temperature <= 66:
return 255
tmp_red = 329.698727446 * math.pow(temperature - 60, -0.1332047592)
return _bound(tmp_red)
def _get_green(temperature: float) -> float:
"""Get the green component of the given color temp in RGB space."""
if temperature <= 66:
green = 99.4708025861 * math.log(temperature) - 161.1195681661
else:
green = 288.1221695283 * math.pow(temperature - 60, -0.0755148492)
return _bound(green)
def _get_blue(temperature: float) -> float:
"""Get the blue component of the given color temperature in RGB space."""
if temperature >= 66:
return 255
if temperature <= 19:
return 0
blue = 138.5177312231 * math.log(temperature - 10) - 305.0447927307
return _bound(blue)
def color_temperature_mired_to_kelvin(mired_temperature: float) -> float:
"""Convert absolute mired shift to degrees kelvin."""
return math.floor(1000000 / mired_temperature)
def color_temperature_kelvin_to_mired(kelvin_temperature: float) -> float:
"""Convert degrees kelvin to mired shift."""
return math.floor(1000000 / kelvin_temperature)
# The following 5 functions are adapted from rgbxy provided by Benjamin Knight
# License: The MIT License (MIT), 2014.
# https://github.com/benknight/hue-python-rgb-converter
def cross_product(p1: XYPoint, p2: XYPoint) -> float:
"""Calculate the cross product of two XYPoints."""
return float(p1.x * p2.y - p1.y * p2.x)
def get_distance_between_two_points(one: XYPoint, two: XYPoint) -> float:
"""Calculate the distance between two XYPoints."""
dx = one.x - two.x
dy = one.y - two.y
return math.sqrt(dx * dx + dy * dy)
def get_closest_point_to_line(A: XYPoint, B: XYPoint, P: XYPoint) -> XYPoint:
"""
Find the closest point from P to a line defined by A and B.
This point will be reproducible by the lamp
as it is on the edge of the gamut.
"""
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t)
def get_closest_point_to_point(xy_tuple: Tuple[float, float],
Gamut: GamutType) -> Tuple[float, float]:
"""
Get the closest matching color within the gamut of the light.
Should only be used if the supplied color is outside of the color gamut.
"""
xy_point = XYPoint(xy_tuple[0], xy_tuple[1])
# find the closest point on each line in the CIE 1931 'triangle'.
pAB = get_closest_point_to_line(Gamut.red, Gamut.green, xy_point)
pAC = get_closest_point_to_line(Gamut.blue, Gamut.red, xy_point)
pBC = get_closest_point_to_line(Gamut.green, Gamut.blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if dAC < lowest:
lowest = dAC
closest_point = pAC
if dBC < lowest:
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return (cx, cy)
def check_point_in_lamps_reach(p: Tuple[float, float],
Gamut: GamutType) -> bool:
"""Check if the provided XYPoint can be recreated by a Hue lamp."""
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
q = XYPoint(p[0] - Gamut.red.x, p[1] - Gamut.red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)
def check_valid_gamut(Gamut: GamutType) -> bool:
"""Check if the supplied gamut is valid."""
# Check if the three points of the supplied gamut are not on the same line.
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
not_on_line = cross_product(v1, v2) > 0.0001
# Check if all six coordinates of the gamut lie between 0 and 1.
red_valid = Gamut.red.x >= 0 and Gamut.red.x <= 1 and \
Gamut.red.y >= 0 and Gamut.red.y <= 1
green_valid = Gamut.green.x >= 0 and Gamut.green.x <= 1 and \
Gamut.green.y >= 0 and Gamut.green.y <= 1
blue_valid = Gamut.blue.x >= 0 and Gamut.blue.x <= 1 and \
Gamut.blue.y >= 0 and Gamut.blue.y <= 1
return not_on_line and red_valid and green_valid and blue_valid
|
|
import pytest
import json
import numpy as np
from keras.layers import Dense, Dropout, InputLayer
from keras import layers
from keras.engine import Input, get_source_inputs
from keras.models import Model, Sequential
from keras import backend as K
from keras.models import model_from_json, model_from_yaml
from keras.utils.test_utils import keras_test
@keras_test
def test_get_updates_for():
a = Input(shape=(2,))
dense_layer = Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
assert dense_layer.get_updates_for(a) == [0]
assert dense_layer.get_updates_for(None) == [1]
@keras_test
def test_get_losses_for():
a = Input(shape=(2,))
dense_layer = Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
assert dense_layer.get_losses_for(a) == [0]
assert dense_layer.get_losses_for(None) == [1]
@keras_test
def test_trainable_weights():
a = Input(shape=(2,))
b = Dense(1)(a)
model = Model(a, b)
weights = model.weights
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
model.trainable = True
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.layers[1].trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
# sequential model
model = Sequential()
model.add(Dense(1, input_dim=2))
weights = model.weights
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
model.trainable = True
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.layers[0].trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
@keras_test
def test_learning_phase():
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
a_2 = Dense(16, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
assert not a_2._uses_learning_phase
assert b_2._uses_learning_phase
# test merge
m = layers.concatenate([a_2, b_2])
assert m._uses_learning_phase
# Test recursion
model = Model([a, b], [a_2, b_2])
print(model.input_spec)
assert model.uses_learning_phase
c = Input(shape=(32,), name='input_c')
d = Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
assert c_2._uses_learning_phase
assert b_2._uses_learning_phase
# try actually running graph
fn = K.function(model.inputs + [K.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
assert fn_outputs_no_dp[0].sum() == fn_outputs_dp[0].sum()
# output b: dropout applied
assert fn_outputs_no_dp[1].sum() != fn_outputs_dp[1].sum()
@keras_test
def test_layer_call_arguments():
# Test the ability to pass and serialize arguments to `call`.
inp = layers.Input(shape=(2,))
x = layers.Dense(3)(inp)
x = layers.Dropout(0.5)(x, training=True)
model = Model(inp, x)
assert not model.uses_learning_phase
# Test that argument is kept when applying the model
inp2 = layers.Input(shape=(2,))
out2 = model(inp2)
assert not out2._uses_learning_phase
# Test that argument is kept after loading a model
config = model.get_config()
model = Model.from_config(config)
assert not model.uses_learning_phase
@keras_test
def test_node_construction():
####################################################
# test basics
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
assert a._keras_shape == (None, 32)
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, b_node_index, b_tensor_index = b._keras_history
assert len(a_layer.inbound_nodes) == 1
assert a_tensor_index is 0
node = a_layer.inbound_nodes[a_node_index]
assert node.outbound_layer == a_layer
assert type(node.inbound_layers) is list
assert node.inbound_layers == []
assert type(node.input_tensors) is list
assert node.input_tensors == [a]
assert type(node.input_masks) is list
assert node.input_masks == [None]
assert type(node.input_shapes) is list
assert node.input_shapes == [(None, 32)]
assert type(node.output_tensors) is list
assert node.output_tensors == [a]
assert type(node.output_shapes) is list
assert node.output_shapes == [(None, 32)]
assert type(node.output_masks) is list
assert node.output_masks == [None]
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
assert len(dense.inbound_nodes) == 2
assert len(dense.outbound_nodes) == 0
assert dense.inbound_nodes[0].inbound_layers == [a_layer]
assert dense.inbound_nodes[0].outbound_layer == dense
assert dense.inbound_nodes[1].inbound_layers == [b_layer]
assert dense.inbound_nodes[1].outbound_layer == dense
assert dense.inbound_nodes[0].input_tensors == [a]
assert dense.inbound_nodes[1].input_tensors == [b]
# test layer properties
test_layer = Dense(16, name='test_layer')
a_test = test_layer(a)
assert K.int_shape(test_layer.kernel) == (32, 16)
assert test_layer.input == a
assert test_layer.output == a_test
assert test_layer.input_mask is None
assert test_layer.output_mask is None
assert test_layer.input_shape == (None, 32)
assert test_layer.output_shape == (None, 16)
with pytest.raises(Exception):
dense.input
with pytest.raises(Exception):
dense.output
with pytest.raises(Exception):
dense.input_mask
with pytest.raises(Exception):
dense.output_mask
assert dense.get_input_at(0) == a
assert dense.get_input_at(1) == b
assert dense.get_output_at(0) == a_2
assert dense.get_output_at(1) == b_2
assert dense.get_input_shape_at(0) == (None, 32)
assert dense.get_input_shape_at(1) == (None, 32)
assert dense.get_output_shape_at(0) == (None, 16)
assert dense.get_output_shape_at(1) == (None, 16)
assert dense.get_input_mask_at(0) is None
assert dense.get_input_mask_at(1) is None
assert dense.get_output_mask_at(0) is None
assert dense.get_output_mask_at(1) is None
@keras_test
def test_multi_input_layer():
####################################################
# test multi-input layer
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
assert merged._keras_shape == (None, 16 * 2)
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
assert merge_node_index == 0
assert merge_tensor_index == 0
assert len(merge_layer.inbound_nodes) == 1
assert len(merge_layer.outbound_nodes) == 0
assert len(merge_layer.inbound_nodes[0].input_tensors) == 2
assert len(merge_layer.inbound_nodes[0].inbound_layers) == 2
c = Dense(64, name='dense_2')(merged)
d = Dense(5, name='dense_3')(c)
model = Model(inputs=[a, b], outputs=[c, d], name='model')
assert len(model.layers) == 6
print('model.input_layers:', model.input_layers)
print('model.input_layers_node_indices:', model.input_layers_node_indices)
print('model.input_layers_tensor_indices:', model.input_layers_tensor_indices)
print('model.output_layers', model.output_layers)
print('output_shape:', model.compute_output_shape([(None, 32), (None, 32)]))
assert model.compute_output_shape([(None, 32), (None, 32)]) == [(None, 64), (None, 5)]
print('mask:', model.compute_mask([a, b], [None, None]))
assert model.compute_mask([a, b], [None, None]) == [None, None]
print('output_shape:', model.compute_output_shape([(None, 32), (None, 32)]))
assert model.compute_output_shape([(None, 32), (None, 32)]) == [(None, 64), (None, 5)]
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
print('layers:', [layer.name for layer in model.layers])
assert [l.name for l in model.layers][2:] == ['dense_1', 'merge', 'dense_2', 'dense_3']
print('input_layers:', [l.name for l in model.input_layers])
assert [l.name for l in model.input_layers] == ['input_a', 'input_b']
print('output_layers:', [l.name for l in model.output_layers])
assert [l.name for l in model.output_layers] == ['dense_2', 'dense_3']
# actually run model
fn = K.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]
# test get_source_inputs
print(get_source_inputs(c))
assert get_source_inputs(c) == [a, b]
# serialization / deserialization
json_config = model.to_json()
recreated_model = model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
print('recreated:')
print([layer.name for layer in recreated_model.layers])
print([layer.name for layer in recreated_model.input_layers])
print([layer.name for layer in recreated_model.output_layers])
assert [l.name for l in recreated_model.layers][2:] == ['dense_1', 'merge', 'dense_2', 'dense_3']
assert [l.name for l in recreated_model.input_layers] == ['input_a', 'input_b']
assert [l.name for l in recreated_model.output_layers] == ['dense_2', 'dense_3']
fn = K.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]
@keras_test
def test_recursion():
####################################################
# test recursion
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = Dense(64, name='dense_2')(merged)
d = Dense(5, name='dense_3')(c)
model = Model(inputs=[a, b], outputs=[c, d], name='model')
e = Input(shape=(32,), name='input_e')
f = Input(shape=(32,), name='input_f')
g, h = model([e, f])
# g2, h2 = model([e, f])
assert g._keras_shape == c._keras_shape
assert h._keras_shape == d._keras_shape
# test separate manipulation of different layer outputs
i = Dense(7, name='dense_4')(h)
final_model = Model(inputs=[e, f], outputs=[i, g], name='final')
assert len(final_model.inputs) == 2
assert len(final_model.outputs) == 2
assert len(final_model.layers) == 4
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
print('final_model layers:', [layer.name for layer in final_model.layers])
assert [layer.name for layer in final_model.layers][2:] == ['model', 'dense_4']
print(model.compute_mask([e, f], [None, None]))
assert model.compute_mask([e, f], [None, None]) == [None, None]
print(final_model.compute_output_shape([(10, 32), (10, 32)]))
assert final_model.compute_output_shape([(10, 32), (10, 32)]) == [(10, 7), (10, 64)]
# run recursive model
fn = K.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]
# test serialization
model_config = final_model.get_config()
print(json.dumps(model_config, indent=4))
recreated_model = Model.from_config(model_config)
fn = K.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]
####################################################
# test multi-input multi-output
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
o = Input(shape=(32,), name='input_o')
p = Input(shape=(32,), name='input_p')
q, r = model([o, p])
assert n._keras_shape == (None, 5)
assert q._keras_shape == (None, 64)
s = layers.concatenate([n, q], name='merge_nq')
assert s._keras_shape == (None, 64 + 5)
# test with single output as 1-elem list
multi_io_model = Model([j, k, o, p], [s])
fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))])
assert [x.shape for x in fn_outputs] == [(10, 69)]
# test with single output as tensor
multi_io_model = Model([j, k, o, p], s)
fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))])
# note that the output of the K.function will still be a 1-elem list
assert [x.shape for x in fn_outputs] == [(10, 69)]
# test serialization
print('multi_io_model.layers:', multi_io_model.layers)
print('len(model.inbound_nodes):', len(model.inbound_nodes))
print('len(model.outbound_nodes):', len(model.outbound_nodes))
model_config = multi_io_model.get_config()
print(model_config)
print(json.dumps(model_config, indent=4))
recreated_model = Model.from_config(model_config)
fn = K.function(recreated_model.inputs, recreated_model.outputs)
fn_outputs = fn([np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))])
# note that the output of the K.function will still be a 1-elem list
assert [x.shape for x in fn_outputs] == [(10, 69)]
config = model.get_config()
Model.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
####################################################
# test invalid graphs
# input is not an Input tensor
j = Input(shape=(32,), name='input_j')
j = Dense(32)(j)
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
with pytest.raises(Exception):
Model([j, k], [m, n])
# disconnected graph
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
with pytest.raises(Exception) as e:
Model([j], [m, n])
# redudant outputs
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
# this should work lol
# TODO: raise a warning
Model([j, k], [m, n, n])
# redundant inputs
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
with pytest.raises(Exception):
Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
with pytest.raises(Exception):
Model([j, k], [m, n, 0])
####################################################
# test calling layers/models on TF tensors
if K._BACKEND == 'tensorflow':
import tensorflow as tf
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
tf_model = Model([j, k], [m, n])
j_tf = tf.placeholder(dtype=K.floatx())
k_tf = tf.placeholder(dtype=K.floatx())
m_tf, n_tf = tf_model([j_tf, k_tf])
assert m_tf.get_shape().as_list() == [None, 64]
assert n_tf.get_shape().as_list() == [None, 5]
# test merge
layers.concatenate([j_tf, k_tf], axis=1)
layers.add([j_tf, k_tf])
# test tensor input
x = tf.placeholder(shape=(None, 2), dtype=K.floatx())
InputLayer(input_tensor=x)
x = Input(tensor=x)
Dense(2)(x)
@keras_test
def test_load_layers():
from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input
from keras.models import Model
from keras.engine.topology import preprocess_weights_for_loading
if K.backend() == 'tensorflow':
inputs = Input(shape=(10, 20, 20, 1))
else:
inputs = Input(shape=(10, 1, 20, 20))
td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs)
bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv)
model = Model(inputs=inputs, outputs=bi_convlstm2d)
weight_value_tuples = []
# TimeDistributed Conv2D layer
# use 'channels_first' data format to check that the function is being called correctly for Conv2D
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, stack_size, filters)
weight_tensor_td_conv_old = list()
weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5)))
weight_tensor_td_conv_old.append(np.zeros((15,)))
td_conv_layer = model.layers[1]
td_conv_layer.layer.data_format = 'channels_first'
weight_tensor_td_conv_new = preprocess_weights_for_loading(td_conv_layer,
weight_tensor_td_conv_old,
original_keras_version='1')
symbolic_weights = td_conv_layer.weights
assert (len(symbolic_weights) == len(weight_tensor_td_conv_new))
weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new)
# Bidirectional ConvLSTM2D layer
# old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors.
weight_tensor_bi_convlstm_old = []
for j in range(2): # bidirectional
for i in range(4):
weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel
weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel
weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias
bi_convlstm_layer = model.layers[2]
weight_tensor_bi_convlstm_new = preprocess_weights_for_loading(bi_convlstm_layer,
weight_tensor_bi_convlstm_old,
original_keras_version='1')
symbolic_weights = bi_convlstm_layer.weights
assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new))
weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new)
K.batch_set_value(weight_value_tuples)
assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0])
assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1])
assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0])
assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1])
assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2])
assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3])
assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4])
assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
def test_recursion_with_bn_and_loss():
model1 = Sequential([
layers.Dense(5, input_dim=5, activity_regularizer='l1'),
layers.BatchNormalization(),
layers.Dense(5),
])
print('NEW MODEL')
inputs = layers.Input(shape=(5,))
outputs = model1(inputs)
model2 = Model(inputs=inputs, outputs=outputs)
assert len(model1.updates) == 2
assert len(model2.updates) == 2
assert len(model1.losses) == 1
assert len(model2.losses) == 1, model2.layers[1]._per_input_losses
model1.compile(optimizer='sgd', loss='categorical_crossentropy')
model2.compile(optimizer='sgd', loss='categorical_crossentropy')
x = np.ones((3, 5))
y = np.ones((3, 5))
model1.fit(x, y, verbose=0, epochs=1)
model2.fit(x, y, verbose=0, epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
|
|
from sklearn.base import is_classifier
from .base import _get_response
from .. import average_precision_score
from .. import precision_recall_curve
from .._base import _check_pos_label_consistency
from .._classification import check_consistent_length
from ...utils import check_matplotlib_support, deprecated
class PrecisionRecallDisplay:
"""Precision Recall visualization.
It is recommend to use
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` or
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` to create
a :class:`~sklearn.metrics.PredictionRecallDisplay`. All parameters are
stored as attributes.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
-----------
precision : ndarray
Precision values.
recall : ndarray
Recall values.
average_precision : float, default=None
Average precision. If None, the average precision is not shown.
estimator_name : str, default=None
Name of estimator. If None, then the estimator name is not shown.
pos_label : str or int, default=None
The class considered as the positive class. If None, the class will not
be shown in the legend.
.. versionadded:: 0.24
Attributes
----------
line_ : matplotlib Artist
Precision recall curve.
ax_ : matplotlib Axes
Axes with precision recall curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given
a binary classifier.
PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve
using predictions from a binary classifier.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import (precision_recall_curve,
... PrecisionRecallDisplay)
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> predictions = clf.predict(X_test)
>>> precision, recall, _ = precision_recall_curve(y_test, predictions)
>>> disp = PrecisionRecallDisplay(precision=precision, recall=recall)
>>> disp.plot()
<...>
>>> plt.show()
"""
def __init__(
self,
precision,
recall,
*,
average_precision=None,
estimator_name=None,
pos_label=None,
):
self.estimator_name = estimator_name
self.precision = precision
self.recall = recall
self.average_precision = average_precision
self.pos_label = pos_label
def plot(self, ax=None, *, name=None, **kwargs):
"""Plot visualization.
Extra keyword arguments will be passed to matplotlib's `plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name of precision recall curve for labeling. If `None`, use
`estimator_name` if not `None`, otherwise no labeling is shown.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
Object that stores computed values.
"""
check_matplotlib_support("PrecisionRecallDisplay.plot")
name = self.estimator_name if name is None else name
line_kwargs = {"drawstyle": "steps-post"}
if self.average_precision is not None and name is not None:
line_kwargs["label"] = f"{name} (AP = {self.average_precision:0.2f})"
elif self.average_precision is not None:
line_kwargs["label"] = f"AP = {self.average_precision:0.2f}"
elif name is not None:
line_kwargs["label"] = name
line_kwargs.update(**kwargs)
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
(self.line_,) = ax.plot(self.recall, self.precision, **line_kwargs)
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "Recall" + info_pos_label
ylabel = "Precision" + info_pos_label
ax.set(xlabel=xlabel, ylabel=ylabel)
if "label" in line_kwargs:
ax.legend(loc="lower left")
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
pos_label=None,
response_method="auto",
name=None,
ax=None,
**kwargs,
):
"""Plot precision-recall curve given an estimator and some data.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : str or int, default=None
The class considered as the positive class when computing the
precision and recall metrics. By default, `estimators.classes_[1]`
is considered as the positive class.
response_method : {'predict_proba', 'decision_function', 'auto'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name for labeling curve. If `None`, no name is used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_predictions : Plot precision-recall curve
using estimated probabilities or output of decision function.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> PrecisionRecallDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
method_name = f"{cls.__name__}.from_estimator"
check_matplotlib_support(method_name)
if not is_classifier(estimator):
raise ValueError(f"{method_name} only supports classifiers")
y_pred, pos_label = _get_response(
X,
estimator,
response_method,
pos_label=pos_label,
)
name = name if name is not None else estimator.__class__.__name__
return cls.from_predictions(
y,
y_pred,
sample_weight=sample_weight,
name=name,
pos_label=pos_label,
ax=ax,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_pred,
*,
sample_weight=None,
pos_label=None,
name=None,
ax=None,
**kwargs,
):
"""Plot precision-recall curve given binary class predictions.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels.
y_pred : array-like of shape (n_samples,)
Estimated probabilities or output of decision function.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : str or int, default=None
The class considered as the positive class when computing the
precision and recall metrics.
name : str, default=None
Name for labeling curve. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_estimator : Plot precision-recall curve
using an estimator.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> y_pred = clf.predict_proba(X_test)[:, 1]
>>> PrecisionRecallDisplay.from_predictions(
... y_test, y_pred)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_predictions")
check_consistent_length(y_true, y_pred, sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
precision, recall, _ = precision_recall_curve(
y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
average_precision = average_precision_score(
y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
name = name if name is not None else "Classifier"
viz = PrecisionRecallDisplay(
precision=precision,
recall=recall,
average_precision=average_precision,
estimator_name=name,
pos_label=pos_label,
)
return viz.plot(ax=ax, name=name, **kwargs)
@deprecated(
"Function `plot_precision_recall_curve` is deprecated in 1.0 and will be "
"removed in 1.2. Use one of the class methods: "
"PrecisionRecallDisplay.from_predictions or "
"PrecisionRecallDisplay.from_estimator."
)
def plot_precision_recall_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs,
):
"""Plot Precision Recall Curve for binary classifiers.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
.. deprecated:: 1.0
`plot_precision_recall_curve` is deprecated in 1.0 and will be removed in
1.2. Use one of the following class methods:
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` or
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator`.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Binary target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name for labeling curve. If `None`, the name of the
estimator is used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The class considered as the positive class when computing the precision
and recall metrics. By default, `estimators.classes_[1]` is considered
as the positive class.
.. versionadded:: 0.24
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
Object that stores computed values.
See Also
--------
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
PrecisionRecallDisplay : Precision Recall visualization.
"""
check_matplotlib_support("plot_precision_recall_curve")
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
precision, recall, _ = precision_recall_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
average_precision = average_precision_score(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
name = name if name is not None else estimator.__class__.__name__
viz = PrecisionRecallDisplay(
precision=precision,
recall=recall,
average_precision=average_precision,
estimator_name=name,
pos_label=pos_label,
)
return viz.plot(ax=ax, name=name, **kwargs)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is an implementation of the Port interface that overrides other
ports and changes the Driver binary to "MockDRT".
The MockDRT objects emulate what a real DRT would do. In particular, they
return the output a real DRT would return for a given test, assuming that
test actually passes (except for reftests, which currently cause the
MockDRT to crash).
"""
import base64
import logging
import optparse
import os
import sys
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput, DriverProxy
from webkitpy.layout_tests.port.factory import PortFactory
_log = logging.getLogger(__name__)
class MockDRTPort(object):
port_name = 'mock'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
return port_name
def __init__(self, host, port_name, **kwargs):
self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
def __getattr__(self, name):
return getattr(self.__delegate, name)
def check_build(self, needs_http):
return True
def check_sys_deps(self, needs_http):
return True
def create_driver(self, worker_number, no_timeout=False):
# The magic of the MockDRTPort is that we create a driver that has a
# cmd_line() method monkey-patched to invoke this script instead of DRT.
return DriverProxy(self, worker_number, self._mocked_driver_maker, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
@staticmethod
def _mocked_driver_maker(port, worker_number, pixel_tests, no_timeout=False):
path_to_this_file = port.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
driver = port.__delegate._driver_class()(port, worker_number, pixel_tests, no_timeout)
driver.cmd_line = port._overriding_cmd_line(driver.cmd_line,
port.__delegate._path_to_driver(),
sys.executable,
path_to_this_file,
port.__delegate.name())
return driver
@staticmethod
def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
def new_cmd_line(pixel_tests, per_test_args):
cmd_line = original_cmd_line(pixel_tests, per_test_args)
index = cmd_line.index(driver_path)
cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
return cmd_line
return new_cmd_line
def start_helper(self):
pass
def start_http_server(self, number_of_servers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_helper(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
options, args = parse_options(argv)
if options.test_shell:
drt = MockTestShell(options, args, host, stdin, stdout, stderr)
else:
drt = MockDRT(options, args, host, stdin, stdout, stderr)
return drt.run()
def parse_options(argv):
# FIXME: We have to do custom arg parsing instead of using the optparse
# module. First, Chromium and non-Chromium DRTs have a different argument
# syntax. Chromium uses --pixel-tests=<path>, and non-Chromium uses
# --pixel-tests as a boolean flag. Second, we don't want to have to list
# every command line flag DRT accepts, but optparse complains about
# unrecognized flags. At some point it might be good to share a common
# DRT options class between this file and webkit.py and chromium.py
# just to get better type checking.
platform_index = argv.index('--platform')
platform = argv[platform_index + 1]
pixel_tests = False
pixel_path = None
test_shell = '--test-shell' in argv
if test_shell:
for arg in argv:
if arg.startswith('--pixel-tests'):
pixel_tests = True
pixel_path = arg[len('--pixel-tests='):]
else:
pixel_tests = '--pixel-tests' in argv
options = optparse.Values({'test_shell': test_shell, 'platform': platform, 'pixel_tests': pixel_tests, 'pixel_path': pixel_path})
return (options, argv)
class MockDRT(object):
def __init__(self, options, args, host, stdin, stdout, stderr):
self._options = options
self._args = args
self._host = host
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
port_name = None
if options.platform:
port_name = options.platform
self._port = PortFactory(host).get(port_name=port_name, options=options)
self._driver = self._port.create_driver(0)
def run(self):
while True:
line = self._stdin.readline()
if not line:
return 0
driver_input = self.input_from_line(line)
dirname, basename = self._port.split_test(driver_input.test_name)
is_reftest = (self._port.reference_files(driver_input.test_name) or
self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
output = self.output_for_test(driver_input, is_reftest)
self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
if len(vals) == 1:
uri = vals[0]
checksum = None
else:
uri = vals[0]
checksum = vals[1]
if uri.startswith('http://') or uri.startswith('https://'):
test_name = self._driver.uri_to_test(uri)
else:
test_name = self._port.relative_test_filename(uri)
return DriverInput(test_name, 0, checksum, self._options.pixel_tests)
def output_for_test(self, test_input, is_reftest):
port = self._port
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
actual_checksum = 'mock-checksum'
actual_image = 'blank'
if test_input.test_name.endswith('-mismatch.html'):
actual_text = 'not reference text\n'
actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif self._options.pixel_tests and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
actual_image = port.expected_image(test_input.test_name)
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
self._stdout.write(base64.b64encode(output.audio))
else:
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
if output.text:
self._stdout.write(output.text)
self._stdout.write('#EOF\n')
if self._options.pixel_tests and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
if output.image_hash != test_input.image_hash:
self._stdout.write('Content-Type: image/png\n')
self._stdout.write('Content-Length: %s\n' % len(output.image))
self._stdout.write(output.image)
self._stdout.write('#EOF\n')
self._stdout.flush()
self._stderr.write('#EOF\n')
self._stderr.flush()
class MockTestShell(MockDRT):
def input_from_line(self, line):
vals = line.strip().split()
if len(vals) == 3:
uri, timeout, checksum = vals
else:
uri, timeout = vals
checksum = None
test_name = self._driver.uri_to_test(uri)
return DriverInput(test_name, timeout, checksum, self._options.pixel_tests)
def output_for_test(self, test_input, is_reftest):
# FIXME: This is a hack to make virtual tests work. Need something more general.
original_test_name = test_input.test_name
if '--enable-accelerated-2d-canvas' in self._args and 'canvas' in test_input.test_name:
test_input.test_name = 'platform/chromium/virtual/gpu/' + test_input.test_name
output = super(MockTestShell, self).output_for_test(test_input, is_reftest)
test_input.test_name = original_test_name
return output
def write_test_output(self, test_input, output, is_reftest):
self._stdout.write("#URL:%s\n" % self._driver.test_to_uri(test_input.test_name))
if self._options.pixel_tests and output.image_hash:
self._stdout.write("#MD5:%s\n" % output.image_hash)
if output.image:
self._host.filesystem.maybe_make_directory(self._host.filesystem.dirname(self._options.pixel_path))
self._host.filesystem.write_binary_file(self._options.pixel_path, output.image)
if output.text:
self._stdout.write(output.text)
if output.text and not output.text.endswith('\n'):
self._stdout.write('\n')
self._stdout.write('#EOF\n')
self._stdout.flush()
if __name__ == '__main__':
# Note that the Mock in MockDRT refers to the fact that it is emulating a
# real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
|
|
# VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 28 21:10:10 2014 by generateDS.py version 2.12e.
#
# Command line options:
# ('-o', 'schema/vcim/serviceType.py')
#
# Command line arguments:
# schema/vcim/service.xsd
#
# Command line:
# ./generateDS-2.12e/generateDS.py -o "schema/vcim/serviceType.py" schema/vcim/service.xsd
#
# Current working directory (os.getcwd()):
# vchs-api-cli-cli
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class EntityType(GeneratedsSuper):
"""5.6 The base type for all objects in the VCHS model. Has an optional
list of links and href and type attributes. always The URI of
the entity. falsealways The MIME type of the entity. falsealways
The name type of the entity. false"""
subclass = None
superclass = None
def __init__(self, href=None, type_=None, name=None, Link=None, extensiontype_=None):
self.original_tagname_ = None
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.name = _cast(None, name)
if Link is None:
self.Link = []
else:
self.Link = Link
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityType.subclass:
return EntityType.subclass(*args_, **kwargs_)
else:
return EntityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Link(self): return self.Link
def set_Link(self, Link): self.Link = Link
def add_Link(self, value): self.Link.append(value)
def insert_Link_at(self, index, value): self.Link.insert(index, value)
def replace_Link_at(self, index, value): self.Link[index] = value
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Link
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Link_ in self.Link:
Link_.export(outfile, level, namespace_, name_='Link', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Link=[\n')
level += 1
for Link_ in self.Link:
showIndent(outfile, level)
outfile.write('model_.LinkType(\n')
Link_.exportLiteral(outfile, level, name_='LinkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Link':
obj_ = LinkType.factory()
obj_.build(child_)
self.Link.append(obj_)
obj_.original_tagname_ = 'Link'
# end class EntityType
class ReferenceType(GeneratedsSuper):
"""5.6 A reference to a entity. Contains an href attribute and optional
name and type attributes. always Contains the URI to the entity.
truealways The resource identifier, expressed in URN format. The
value of this attribute uniquely identifies the resource,
persists for the life of the entity, and is never reused.
falsealways Contains the type of the the entity. falsealways
Contains the name of the the entity. false"""
subclass = None
superclass = None
def __init__(self, href=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferenceType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReferenceType
class LinkType(ReferenceType):
"""5.6 Extends reference type by adding relation attribute. Defines a
hyper-link with a relationship, hyper-link reference, and an
optional MIME type. always Defines the relationship of the link
to the object that contains it. A relationship can be the name
of an operation on the object, a reference to a contained or
containing object, or a reference to an alternate representation
of the object. The relationship value implies the HTTP verb to
use when you use the link's href as a request URL. See the VCHS
API Programming Guide for a list of links and link relations.
true"""
subclass = None
superclass = ReferenceType
def __init__(self, href=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(LinkType, self).__init__(href, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if LinkType.subclass:
return LinkType.subclass(*args_, **kwargs_)
else:
return LinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(LinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LinkType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinkType'):
super(LinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='LinkType', fromsubclass_=False, pretty_print=True):
super(LinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='LinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(LinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LinkType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(LinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LinkType
class ServiceType(ReferenceType):
"""5.6 Represents a reference to VCHS service meta-data. 5.6 Defines
the type of service this object refers to. Possible values are:
compute:dedicatedcloud : reference to an instance of a Dedicated
Cloud. compute:vpc: reference to a Virtual Private Cloud. 5.6
Provides the Service ID, assigned by VCHS for this service. 5.6
Provides the geographical region name for this service."""
subclass = None
superclass = ReferenceType
def __init__(self, href=None, type_=None, id=None, name=None, serviceType=None, serviceId=None, region=None):
self.original_tagname_ = None
super(ServiceType, self).__init__(href, type_, id, name, )
self.serviceType = _cast(None, serviceType)
self.serviceId = _cast(None, serviceId)
self.region = _cast(None, region)
def factory(*args_, **kwargs_):
if ServiceType.subclass:
return ServiceType.subclass(*args_, **kwargs_)
else:
return ServiceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_serviceType(self): return self.serviceType
def set_serviceType(self, serviceType): self.serviceType = serviceType
def get_serviceId(self): return self.serviceId
def set_serviceId(self, serviceId): self.serviceId = serviceId
def get_region(self): return self.region
def set_region(self, region): self.region = region
def hasContent_(self):
if (
super(ServiceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ServiceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ServiceType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceType'):
super(ServiceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceType')
if self.serviceType is not None and 'serviceType' not in already_processed:
already_processed.add('serviceType')
outfile.write(' serviceType=%s' % (self.gds_format_string(quote_attrib(self.serviceType).encode(ExternalEncoding), input_name='serviceType'), ))
if self.serviceId is not None and 'serviceId' not in already_processed:
already_processed.add('serviceId')
outfile.write(' serviceId=%s' % (self.gds_format_string(quote_attrib(self.serviceId).encode(ExternalEncoding), input_name='serviceId'), ))
if self.region is not None and 'region' not in already_processed:
already_processed.add('region')
outfile.write(' region=%s' % (self.gds_format_string(quote_attrib(self.region).encode(ExternalEncoding), input_name='region'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ServiceType', fromsubclass_=False, pretty_print=True):
super(ServiceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='ServiceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.serviceType is not None and 'serviceType' not in already_processed:
already_processed.add('serviceType')
showIndent(outfile, level)
outfile.write('serviceType="%s",\n' % (self.serviceType,))
if self.serviceId is not None and 'serviceId' not in already_processed:
already_processed.add('serviceId')
showIndent(outfile, level)
outfile.write('serviceId="%s",\n' % (self.serviceId,))
if self.region is not None and 'region' not in already_processed:
already_processed.add('region')
showIndent(outfile, level)
outfile.write('region="%s",\n' % (self.region,))
super(ServiceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ServiceType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('serviceType', node)
if value is not None and 'serviceType' not in already_processed:
already_processed.add('serviceType')
self.serviceType = value
value = find_attr_value_('serviceId', node)
if value is not None and 'serviceId' not in already_processed:
already_processed.add('serviceId')
self.serviceId = value
value = find_attr_value_('region', node)
if value is not None and 'region' not in already_processed:
already_processed.add('region')
self.region = value
super(ServiceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ServiceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ServiceType
class ServiceListType(EntityType):
"""5.6 Represents a list of VCHS Services."""
subclass = None
superclass = EntityType
def __init__(self, href=None, type_=None, name=None, Link=None, Service=None):
self.original_tagname_ = None
super(ServiceListType, self).__init__(href, type_, name, Link, )
if Service is None:
self.Service = []
else:
self.Service = Service
def factory(*args_, **kwargs_):
if ServiceListType.subclass:
return ServiceListType.subclass(*args_, **kwargs_)
else:
return ServiceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Service(self): return self.Service
def set_Service(self, Service): self.Service = Service
def add_Service(self, value): self.Service.append(value)
def insert_Service_at(self, index, value): self.Service.insert(index, value)
def replace_Service_at(self, index, value): self.Service[index] = value
def hasContent_(self):
if (
self.Service or
super(ServiceListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ServiceListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ServiceListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceListType'):
super(ServiceListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceListType')
def exportChildren(self, outfile, level, namespace_='', name_='ServiceListType', fromsubclass_=False, pretty_print=True):
super(ServiceListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Service_ in self.Service:
Service_.export(outfile, level, namespace_, name_='Service', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ServiceListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ServiceListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ServiceListType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Service=[\n')
level += 1
for Service_ in self.Service:
showIndent(outfile, level)
outfile.write('model_.ServiceType(\n')
Service_.exportLiteral(outfile, level, name_='ServiceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ServiceListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Service':
obj_ = ServiceType.factory()
obj_.build(child_)
self.Service.append(obj_)
obj_.original_tagname_ = 'Service'
super(ServiceListType, self).buildChildren(child_, node, nodeName_, True)
# end class ServiceListType
GDSClassesMapping = {
'Services': ServiceListType,
'Link': LinkType,
'Service': ServiceType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceListType'
rootClass = ServiceListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceListType'
rootClass = ServiceListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceListType'
rootClass = ServiceListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceListType'
rootClass = ServiceListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from serviceType import *\n\n')
sys.stdout.write('import serviceType as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"EntityType",
"LinkType",
"ReferenceType",
"ServiceListType",
"ServiceType"
]
|
|
import os
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError, BadMigrationError, InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_migrations) or (
not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
for migration_name in migration_names:
migration_module = import_module("%s.%s" % (module_name, migration_name))
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(name_prefix):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif not results:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return self.graph.root_nodes(key[0])[0]
else: # "__latest__"
return self.graph.leaf_nodes(key[0])[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325).
continue
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Internal (aka same-app) dependencies.
self.add_internal_dependencies(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible.
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement targets.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
# Ensure the replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations.add(key)
else:
self.applied_migrations.discard(key)
# A replacing migration can be used if either all or none of its
# replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is partially applied.
# Remove it from the graph and remap dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node
) from exc
raise exc
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(m in applied for m in self.replacements[parent].replaces):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0], migration[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
|
|
import typing
import pyecore.ecore as Ecore
from pyecore.resources import ResourceSet, URI, Resource
import functools
from pyecore.notification import EObserver
import inspect
from . import TransformationTrace as trace
class ResultObserver(EObserver):
def notifyChanged(self, notif):
print(notif)
class EObjectProxy(object):
def __init__(self, instance):
object.__setattr__(self, 'wrapped', instance)
object.__setattr__(self, 'wrapped_eClass', instance.eClass)
def __getattribute__(self, name):
wrapped = object.__getattribute__(self, 'wrapped')
eClass = object.__getattribute__(self, 'wrapped_eClass')
result = getattr(wrapped, name)
if eClass.findEStructuralFeature(name):
print('access', name, ':', result, 'for', wrapped)
return result
def __eq__(self, other):
return object.__getattribute__(self, 'wrapped').__eq__(other)
def __hash__(self):
return object.__getattribute__(self, 'wrapped').__hash__()
def __setattr__(self, name, value):
wrapped = object.__getattribute__(self, 'wrapped')
if isinstance(value, EObjectProxy):
value = object.__getattribute__(value, 'wrapped')
return setattr(wrapped, name, value)
def __str__(self):
wrapped = object.__getattribute__(self, 'wrapped')
return wrapped.__str__()
def objects(resource):
for elt in resource.contents:
yield elt
yield from elt.eAllContents()
def objects_of_kind(resource, type):
for elt in resource.contents:
if isinstance(elt, type):
yield elt
for x in elt.eAllContents():
if isinstance(x, type):
yield x
class Parameters(object):
def __init__(self, transformation, parameter_names):
self.transformation = transformation
self.parameter_names = parameter_names
def __getitem__(self, item):
if type(item) is str:
return getattr(self, item)
return getattr(self, self.parameter_names[item])
def load_model(model_path):
rset = ResourceSet()
resource = rset.get_resource(model_path)
return resource
class Transformation(object):
def __init__(self, name, inputs, outputs):
self.name = name
self.inputs_def = inputs if inputs else []
self.outputs_def = outputs if outputs else []
self.registed_mapping = []
self._main = None
@property
def inouts(self):
return [k for k in self.inputs_def if k in self.outputs_def]
def main(self, fun):
self._main = fun
return fun
def run(self, clean_mappings_cache=True, resource_set=None, **kwargs):
sp = inspect.currentframe()
context = TransformationExecution(self, resource_set)
sp.f_globals["mycontext"] = context
params = {}
for in_model in self.inputs_def:
try:
param = kwargs.pop(in_model)
if isinstance(param, Ecore.EObject):
if param.eResource:
resource = param.eResource
else:
rset = context.resource_set
resource = rset.create_resource(URI(in_model))
resource.append(param)
elif isinstance(param, Resource):
resource = param
else:
resource = load_model(param)
setattr(context.inputs, in_model, resource)
params[in_model] = resource
if in_model in self.inouts:
setattr(context.outputs, in_model, resource)
params[in_model] = resource
except KeyError as e:
raise type(e)(str(e) + ' is a missing input model'
.format(in_model)) from None
for out_model in list(set(self.outputs_def) - set(self.inouts)):
resource = context.resource_set.create_resource(URI(out_model))
setattr(context.outputs, out_model, resource)
params[out_model] = resource
context.primary_output = context.outputs[0]
self._main(**params)
if clean_mappings_cache:
for mapping in self.registed_mapping:
mapping.cache.cache_clear()
return context
def mapping(self, f=None, output_model=None, when=None):
if not f:
return functools.partial(self.mapping,
output_model=output_model,
when=when)
self.registed_mapping.append(f)
f.__mapping__ = True
result_var_name = 'result'
self_var_name = 'self'
f.self_eclass = typing.get_type_hints(f).get(self_var_name)
if f.self_eclass is None:
raise ValueError("Missing 'self' parameter for mapping: '{}'"
.format(f.__name__))
f.result_eclass = typing.get_type_hints(f).get('return')
f.inout = f.result_eclass is None
output_model_name = output_model or self.outputs_def[0]
f.output_def = None if f.inout else output_model_name
@functools.wraps(f)
def inner(*args, **kwargs):
if f.inout:
index = f.__code__.co_varnames.index(self_var_name)
result = kwargs.get(self_var_name, args[index])
elif f.result_eclass is Ecore.EClass:
result = f.result_eclass('')
else:
result = f.result_eclass()
inputs = [a for a in args if isinstance(a, Ecore.EObject)]
print('CREATE', result, 'FROM', inputs, 'BY', f.__name__)
# Create object for the trace
sp = inspect.currentframe()
context = sp.f_globals["mycontext"]
# try:
# rule = context.trace[f.__name__]
# except Exception:
# rule = trace.Rule(transformation=context.trace, name=f.__name__)
# context.trace.rules.append(rule)
# record = trace.Record()
# for element in args:
# if isinstance(element, Ecore.EObject):
# record.inputs.append(trace.Attribute(old_value=element))
# else:
# record.inputs.append(trace.ObjectReference(element))
# record.outputs.append(trace.ObjectReference(old_value=result))
# rule.records.append(record)
# Inject new parameter
g = f.__globals__
marker = object()
oldvalue = g.get(result_var_name, marker)
g[result_var_name] = result
observer = ResultObserver(notifier=result)
new_args = [EObjectProxy(obj)
if isinstance(obj, Ecore.EObject)
else obj
for obj in args]
for key, value in kwargs.items():
if isinstance(value, Ecore.EObject):
kwargs[key] = EObjectProxy(value)
try:
f(*new_args, **kwargs)
finally:
if oldvalue is marker:
del g[result_var_name]
else:
g[result_var_name] = oldvalue
result.listeners.remove(observer)
if f.output_def and \
result not in context.outputs[f.output_def].contents:
context.outputs[f.output_def].append(result)
return result
if when:
@functools.wraps(inner)
def when_inner(*args, **kwargs):
if when(*args, **kwargs):
return inner(*args, **kwargs)
return when_inner
cached_fun = functools.lru_cache()(inner)
f.cache = cached_fun
return cached_fun
def disjunct(self, f=None, mappings=None):
if not f:
return functools.partial(self.disjunct, mappings=mappings)
@functools.wraps(f)
def inner(*args, **kwargs):
for fun in mappings:
result = fun(*args, **kwargs)
if result is not None:
break
f(*args, **kwargs)
return result
return inner
class TransformationExecution(object):
def __init__(self, transfo, resource_set=None):
# self.trace = trace.TransformationTrace()
self.trace = None # not yet supported
self.inputs = Parameters(transfo, transfo.inputs_def)
self.outputs = Parameters(transfo, transfo.outputs_def)
self.transformation = transfo
self.resource_set = resource_set if resource_set else ResourceSet()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to read data in the graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
def read_batch_examples(file_pattern, batch_size, reader,
randomize_input=True, num_epochs=None,
queue_capacity=10000, num_threads=1,
read_batch_size=1, parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.initialize_all_variables()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern, batch_size=batch_size, reader=reader,
randomize_input=randomize_input, num_epochs=num_epochs,
queue_capacity=queue_capacity, num_threads=num_threads,
read_batch_size=read_batch_size, parse_fn=parse_fn, name=name)
return examples
def read_keyed_batch_examples(
file_pattern, batch_size, reader,
randomize_input=True, num_epochs=None,
queue_capacity=10000, num_threads=1,
read_batch_size=1, parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.initialize_all_variables()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
if isinstance(file_pattern, list):
file_names = file_pattern
if not file_names:
raise ValueError('No files given to dequeue_examples.')
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or (
(not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size > queue_capacity)):
raise ValueError(
'Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and
(read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
# Setup filename queue with shuffling.
with ops.name_scope('file_name_queue') as file_name_queue_scope:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(file_names, name='input'),
shuffle=randomize_input, num_epochs=num_epochs,
name=file_name_queue_scope)
# Create readers, one per thread and set them to read from filename queue.
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
enqueue_many = read_batch_size > 1
if num_epochs is not None:
allow_smaller_final_batch = True
else:
allow_smaller_final_batch = False
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list, batch_size, capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many, name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list, batch_size, capacity=queue_capacity,
enqueue_many=enqueue_many, name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parser_num_threads=None,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.initialize_local_variables() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of queue runners to start for the feature queue,
Adding multiple queue runners for the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parser_num_threads: (Deprecated) The number of threads to parse examples.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
if parser_num_threads:
# TODO(sibyl-Aix6ihai): Remove on Sept 3 2016.
logging.warning('parser_num_threads is deprecated, it will be removed on'
'Sept 3 2016')
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples(
file_pattern, batch_size, reader, randomize_input=randomize_input,
num_epochs=num_epochs, queue_capacity=queue_capacity,
num_threads=reader_num_threads, read_batch_size=batch_size,
parse_fn=parse_fn, name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_queue_runners=num_queue_runners,
name=scope)
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_queue_runners=2,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of queue runners to start for the feature queue,
Adding multiple queue runners for the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
"""
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, ops.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend([tensor.indices, tensor.values, tensor.shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
logging_ops.scalar_summary('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32)
* (1. / feature_queue_capacity))
# Add multiple queue runners so that the queue is always full. Adding more
# than two queue-runners may hog the cpu on the worker to fill up the queue.
for _ in range(num_queue_runners):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
dequeued_tensors = input_queue.dequeue()
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = ops.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
def read_batch_features(file_pattern, batch_size, features, reader,
randomize_input=True, num_epochs=None,
queue_capacity=10000, feature_queue_capacity=100,
reader_num_threads=1, parser_num_threads=1,
parse_fn=None, name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.initialize_local_variables() as shown in the tests.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples.
parser_num_threads: The number of threads to parse examples.
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern, batch_size, features, reader,
randomize_input=randomize_input, num_epochs=num_epochs,
queue_capacity=queue_capacity,
feature_queue_capacity=feature_queue_capacity,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
parse_fn=parse_fn, name=name)
return features
def read_batch_record_features(file_pattern, batch_size, features,
randomize_input=True, num_epochs=None,
queue_capacity=10000, reader_num_threads=1,
parser_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.initialize_local_variables() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
parser_num_threads: The number of threads to parse examples.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern, batch_size=batch_size, features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input, num_epochs=num_epochs,
queue_capacity=queue_capacity, reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads, name=name)
|
|
# Copyright 2020-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "200Mi"
},
"limits": {
"cpu": "500m",
"memory": "1Gi"
},
}
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "security.istio.io/v1beta1",
"kind": "AuthorizationPolicy",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
}
},
"rules": [{
"from": [{
"source": {
"principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
}
}]
}]
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image": f"{frontend_image}:{frontend_tag}",
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}],
"env": [
{
"name": "MINIO_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "accesskey",
"name": "mlpipeline-minio-artifact"
}
}
},
{
"name": "MINIO_SECRET_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "secretkey",
"name": "mlpipeline-minio-artifact"
}
}
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "70Mi"
},
"limits": {
"cpu": "100m",
"memory": "500Mi"
},
}
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:\n', json.dumps(parent, indent=2, sort_keys=True))
print('Desired resources except secrets:\n', json.dumps(desired_resources, indent=2, sort_keys=True))
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": minio_access_key,
"secretkey": minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
return HTTPServer((url, int(controller_port)), Controller)
if __name__ == "__main__":
main()
|
|
"""TestCase and TestSuite artifacts and testing decorators."""
import itertools
import re
import sys
import types
import warnings
from cStringIO import StringIO
from test.bootstrap import config
from test.lib import assertsql, util as testutil
from sqlalchemy.util import decorator
from engines import drop_all_tables
from sqlalchemy import exc as sa_exc, util, types as sqltypes, schema, \
pool, orm
from sqlalchemy.engine import default
from exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\
fails_on, fails_on_everything_except, skip, only_on, exclude, against,\
_server_version
crashes = skip
# sugar ('testing.db'); set here by config() at runtime
db = None
# more sugar, installed by __init__
requires = None
def emits_warning(*messages):
"""Mark a test as emitting a warning.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
# TODO: it would be nice to assert that a named warning was
# emitted. should work with some monkeypatching of warnings,
# and may work on non-CPython if they keep to the spirit of
# warnings.showwarning's docstring.
# - update: jython looks ok, it uses cpython's module
@decorator
def decorate(fn, *args, **kw):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SAWarning))
else:
filters.extend(dict(action='ignore',
message=message,
category=sa_exc.SAWarning)
for message in messages)
for f in filters:
warnings.filterwarnings(**f)
try:
return fn(*args, **kw)
finally:
resetwarnings()
return decorate
def emits_warning_on(db, *warnings):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
spec = db_spec(db)
@decorator
def decorate(fn, *args, **kw):
if isinstance(db, basestring):
if not spec(config.db):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
else:
if not _is_excluded(*db):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
return decorate
def assert_warnings(fn, warnings):
"""Assert that each of the given warnings are emitted by fn."""
canary = []
orig_warn = util.warn
def capture_warnings(*args, **kw):
orig_warn(*args, **kw)
popwarn = warnings.pop(0)
canary.append(popwarn)
eq_(args[0], popwarn)
util.warn = util.langhelpers.warn = capture_warnings
result = emits_warning()(fn)()
assert canary, "No warning was emitted"
return result
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
"""
@decorator
def decorate(fn, *args, **kw):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SADeprecationWarning))
else:
filters.extend(
[dict(action='ignore',
message=message,
category=sa_exc.SADeprecationWarning)
for message in
[ (m.startswith('//') and
('Call to deprecated function ' + m[2:]) or m)
for m in messages] ])
for f in filters:
warnings.filterwarnings(**f)
try:
return fn(*args, **kw)
finally:
resetwarnings()
return decorate
def testing_warn(msg, stacklevel=3):
"""Replaces sqlalchemy.util.warn during tests."""
filename = "test.lib.testing"
lineno = 1
if isinstance(msg, basestring):
warnings.warn_explicit(msg, sa_exc.SAWarning, filename, lineno)
else:
warnings.warn_explicit(msg, filename, lineno)
def resetwarnings():
"""Reset warning behavior to testing defaults."""
util.warn = util.langhelpers.warn = testing_warn
warnings.filterwarnings('ignore',
category=sa_exc.SAPendingDeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SAWarning)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
testutil.lazy_gc()
assert not pool._refs, str(pool._refs)
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if raise_ is None:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return set([tuple(row) for row in results])
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not_(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a, fragment)
def assert_raises(except_cls, callable_, *args, **kw):
try:
callable_(*args, **kw)
success = False
except except_cls, e:
success = True
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
try:
callable_(*args, **kwargs)
assert False, "Callable did not raise an exception"
except except_cls, e:
assert re.search(msg, unicode(e), re.UNICODE), u"%r !~ %s" % (msg, e)
print unicode(e).encode('utf-8')
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
metadata = schema.MetaData(db)
self = args[0]
prev_meta = getattr(self, 'metadata', None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
metadata.drop_all()
self.metadata = prev_meta
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def get_all(self, *keys):
return tuple([self[key] for key in keys])
class AssertsCompiledSQL(object):
def assert_compile(self, clause, result, params=None,
checkparams=None, dialect=None,
checkpositional=None,
use_default_dialect=False,
allow_dialect_select=False):
if use_default_dialect:
dialect = default.DefaultDialect()
elif dialect == None and not allow_dialect_select:
dialect = getattr(self, '__dialect__', None)
if dialect == 'default':
dialect = default.DefaultDialect()
elif dialect is None:
dialect = db.dialect
kw = {}
if params is not None:
kw['column_keys'] = params.keys()
if isinstance(clause, orm.Query):
context = clause._compile_context()
context.statement.use_labels = True
clause = context.statement
c = clause.compile(dialect=dialect, **kw)
param_str = repr(getattr(c, 'params', {}))
# Py3K
#param_str = param_str.encode('utf-8').decode('ascii', 'ignore')
print "\nSQL String:\n" + str(c) + param_str
cc = re.sub(r'[\n\t]', '', str(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
assert type(reflected_c.type) is type(c.type), \
"Type '%s' doesn't correspond to type '%s'" % (reflected_c.type, c.type)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(set([f.column.name for f in c.foreign_keys]), set([f.column.name for f in reflected_c.foreign_keys]))
if c.server_default:
assert isinstance(reflected_c.server_default,
schema.FetchedValue)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(c2.type),\
"On column %r, type '%s' doesn't correspond to type '%s'" % \
(c1.name, c1.type, c2.type)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print repr(result)
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list):
self.assert_(len(result) == len(list),
"result list is not the same size as test list, " +
"for class " + class_.__name__)
for i in range(0, len(list)):
self.assert_row(class_, result[i], list[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(rowobj.__class__ is class_,
"item class is not " + repr(class_))
for key, value in desc.iteritems():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(getattr(rowobj, key) == value,
"attribute %s value %s does not match %s" % (
key, getattr(rowobj, key), value))
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = set([immutabledict(e) for e in expected])
for wrong in itertools.ifilterfalse(lambda o: type(o) == cls, found):
fail('Unexpected type "%s", expected "%s"' % (
type(wrong).__name__, cls.__name__))
if len(found) != len(expected):
fail('Unexpected object count "%s", expected "%s"' % (
len(found), len(expected)))
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.iteritems():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found." % (
cls.__name__, repr(expected_item)))
return True
def assert_sql_execution(self, db, callable_, *rules):
assertsql.asserter.add_rules(rules)
try:
callable_()
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_sql(self, db, callable_, list_, with_sequences=None):
if with_sequences is not None and config.db.name in ('firebird', 'oracle', 'postgresql'):
rules = with_sequences
else:
rules = list_
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(*[
assertsql.ExactSQL(k, v) for k, v in rule.iteritems()
])
else:
newrule = assertsql.ExactSQL(*rule)
newrules.append(newrule)
self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(db, callable_, assertsql.CountStatements(count))
|
|
__author__="UShareSoft"
from texttable import Texttable
from ussclicore.argumentParser import ArgumentParser, ArgumentParserError
from uforgecli.utils.uforgecli_utils import *
from ussclicore.cmd import Cmd, CoreGlobal
from uforgecli.utils import org_utils
from ussclicore.utils.generics_utils import order_list_object_by
from ussclicore.utils import printer
from uforge.objects import uforge
from ussclicore.utils import generics_utils
from uforgecli.utils import uforgecli_utils
from uforgecli.utils.org_utils import org_get
from usergrp_user import UserGroup_User_Cmd
import pyxb
import datetime
import shlex
class Usergrp_Cmd(Cmd, CoreGlobal):
"""user group administration (list/info/create/delete etc)"""
cmd_name = "usergrp"
def __init__(self):
self.generate_sub_commands()
super(Usergrp_Cmd, self).__init__()
def generate_sub_commands(self):
if not hasattr(self, 'subCmds'):
self.subCmds = {}
user = UserGroup_User_Cmd()
self.subCmds[user.cmd_name] = user
def arg_list(self):
doParser = ArgumentParser(add_help=True, description="List all the user groups for a given organization. If not organization is provided the default organization is used.")
optional = doParser.add_argument_group("optional arguments")
optional.add_argument('--org', dest='org', type=str, required=False, help="The organization name. If no organization is provided, then the default organization is used.")
return doParser
def do_list(self, args):
try:
doParser = self.arg_list()
doArgs = doParser.parse_args(shlex.split(args))
org = org_get(self.api, doArgs.org)
allUsergrp = self.api.Usergroups.Getall(Name=org.name)
if allUsergrp is None:
printer.out("No user groups found.")
return 0
allUsergrp = allUsergrp.userGroups.userGroup
table = Texttable(200)
table.set_cols_align(["l", "r"])
table.header(["Name", "# Members"])
for item in allUsergrp:
table.add_row([item.admin.name, str(len(item.members.member))])
print table.draw() + "\n"
printer.out("Found " + str(len(allUsergrp)) + " user group in [" + org.name + "].")
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
self.help_list()
except Exception as e:
return handle_uforge_exception(e)
def help_list(self):
doParser = self.arg_list()
doParser.print_help()
def arg_info(self):
doParser = ArgumentParser(add_help=True, description="Prints out all the information for a user group within an organization")
mandatory = doParser.add_argument_group("mandatory arguments")
optional = doParser.add_argument_group("optional arguments")
mandatory.add_argument('--name', dest='name', type=str, required=True, help="Name of the user group")
optional.add_argument('--org', dest='org', type=str, required=False, help="The organization name. If no organization is provided, then the default organization is used.")
return doParser
def do_info(self, args):
try:
doParser = self.arg_info()
doArgs = doParser.parse_args(shlex.split(args))
org = org_get(self.api, doArgs.org)
printer.out("Getting user group [" + org.name + "] from the default organization ...")
allUsergrp = self.api.Usergroups.Getall(Name=org.name)
if allUsergrp is None:
printer.out("No user groups found in [" + org.name + "].")
return 0
allUsergrp = allUsergrp.userGroups.userGroup
for item in allUsergrp:
if item.admin.name == doArgs.name:
printer.out("Displaying info on [" + item.admin.name + "]:\n")
printer.out("Name : " + item.admin.name)
if len(item.members.member) > 0:
table = Texttable(200)
table.set_cols_align(["l"])
table.header(["Members"])
for item2 in item.members.member:
table.add_row([item2.name])
print table.draw() + "\n"
return 0
else:
printer.out("No members found in this user group.")
return 0
printer.out("The user group [" + doArgs.name + " was not found in [" + org.name + "].")
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
self.help_info()
except Exception as e:
return handle_uforge_exception(e)
def help_info(self):
doParser = self.arg_info()
doParser.print_help()
def arg_create(self):
doParser = ArgumentParser(add_help=True, description="Create a new user group in the specified organization")
mandatory = doParser.add_argument_group("mandatory arguments")
optional = doParser.add_argument_group("optional arguments")
mandatory.add_argument('--name', dest='name', type=str, required=True, help="Name of the user group")
mandatory.add_argument('--email', dest='email', type=str, required=True, help="The email address associated to this user group (the email cannot be used by another user in the platform)")
mandatory.add_argument('--usergrpPassword', dest='usergrpPassword', type=str, required=True, help="The password of the user group administrator")
optional.add_argument('--org', dest='org', type=str, required=False, help="The organization name. If no organization is provided, then the default organization is used.")
optional.add_argument('--accounts', dest='accounts', nargs='+', required=False, help="A list of users to be added to this user group during creation (example: --accounts userA userB userC).")
return doParser
def do_create(self, args):
try:
doParser = self.arg_create()
doArgs = doParser.parse_args(shlex.split(args))
org = org_get(self.api, doArgs.org)
newUsergrp = userGroup()
newUser = user()
newUser.loginName = doArgs.name
newUser.email = doArgs.email
newUser.password = doArgs.usergrpPassword
newUsergrp.admin = newUser
newUsergrp.members = pyxb.BIND()
if doArgs.accounts is not None:
for item in doArgs.accounts:
addNewUser = user()
addNewUser.loginName = item
newUsergrp.members.append(addNewUser)
printer.out("[" + addNewUser.loginName + "] has been added to user group.")
result = self.api.Usergroups.Create(Org=org.name,body=newUsergrp)
printer.out("User group [" + newUser.loginName + "] has been successfully created", printer.OK)
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
self.help_create()
except Exception as e:
return handle_uforge_exception(e)
def help_create(self):
doParser = self.arg_create()
doParser.print_help()
def arg_delete(self):
doParser = ArgumentParser(add_help=True, description="Delete an user group from the specified organization")
mandatory = doParser.add_argument_group("mandatory arguments")
optional = doParser.add_argument_group("optional arguments")
mandatory.add_argument('--name', dest='name', type=str, required=True, help="Name of the user group")
optional.add_argument('--org', dest='org', type=str, required=False, help="The organization name. If no organization is provided, then the default organization is used.")
return doParser
def do_delete(self, args):
try:
doParser = self.arg_delete()
doArgs = doParser.parse_args(shlex.split(args))
org = org_get(self.api, doArgs.org)
allUsergrp = self.api.Usergroups.Getall(Name=org.name)
if allUsergrp is None:
printer.out("No user groups found in [" + org.name + "].")
return 0
allUsergrp = allUsergrp.userGroups.userGroup
for item in allUsergrp:
if item.admin.name == doArgs.name:
result = self.api.Usergroups(item.dbId).Delete()
printer.out("[" + item.admin.name + "] has been successfully deleted.", printer.OK)
return 0
printer.out("[" + doArgs.name + "] was not found in [" + org.name + "].")
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
self.help_delete()
except Exception as e:
return handle_uforge_exception(e)
def help_delete(self):
doParser = self.arg_delete()
doParser.print_help()
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from past.utils import old_div
from math import *
import proteus.MeshTools
from proteus import Domain
from proteus.default_n import *
from proteus.Profiling import logEvent
import os
try:
from .parameters import *
except:
from parameters import *
AUTOMATED_TEST=True
#ct.test_case=1 #1 or 2
# see parameters.py
# 1: falling drop in 2D
# 2: falling drop in 3D
# ----- PARAMETERS FOR CLSVOF ----- #
useCLSVOF=True
epsFactHeaviside_clsvof=1.5 #epsilon parameter on heaviside functions
lambdaFact_clsvof=10.0 #lambda parameter in paper
computeMetrics_clsvof=0 #0: no metrics, 1: at EOS (needs exact solution) or 2: EOS and ETS
eps_tolerance_clsvof=True #Set tol on nonlinear solver to machine zero?
#clsvof_nl_atol_res # tol on nonlinear solver. If eps_tolerance=False. Search below.
# ----- PARAMETERS FOR ELLIPTIC REDISTANCING ----- #
EXPLICIT_VOF=True
EXPLICIT_NCLS=True
ELLIPTIC_REDISTANCING=2
alpha_REDISTANCING=1.0E6 #'inf'
# ----- PARAMETERS FOR STABILIZATION OF NS ----- #
USE_SUPG_NS=0
ARTIFICIAL_VISCOSITY_NS=2
# ----- DIMENSIONS AND REFINEMENT ----- #
if ct.test_case==1:
nd=2
else:
nd=3
if ct.test_case==1:
structured = False#True
else:
structured = False
# refinement
if AUTOMATED_TEST:
Refinement = 1
else:
Refinement = 2
# ----- PHYSICAL PARAMETERS ----- #
# Water
rho_0 = 998.2
nu_0 = 1.004e-6
# Air
rho_1 = 1.205
nu_1 = 1.500e-5
# Surface tension
sigma_01 = 72.8E-3
# Gravity
if ct.test_case==1:
g = [0.0, -9.8, 0.0]
else:
g = [0.0, 0.0, -9.8]
# ----- Discretization -- input options ----- #
genMesh = False#True
movingDomain = False
applyRedistancing = True
useOldPETSc = False
useSuperlu = True
timeDiscretization = 'vbdf'#vbdf'#'vbdf' # 'vbdf', 'be', 'flcbdf'
spaceOrder = 2
pspaceOrder = 1
useHex = False
useRBLES = 0.0
useMetrics = 1.0
applyCorrection = True
useVF = 0.0
useOnlyVF = False
openTop=True#False
# Input checks
if spaceOrder not in [1, 2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useRBLES not in [0.0, 1.0]:
print("INVALID: useRBLES" + useRBLES)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
if spaceOrder == 1:
hFactor = 1.0
if useHex:
quad=True
basis = C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 2)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 2)
else:
basis = C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 3)
elif spaceOrder == 2:
hFactor = 0.5
if useHex:
quad=True
basis = C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 4)
else:
basis = C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 5)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 5)
if pspaceOrder == 1:
if useHex:
pbasis = C0_AffineLinearOnCubeWithNodalBasis
else:
pbasis = C0_AffineLinearOnSimplexWithNodalBasis
elif pspaceOrder == 2:
if useHex:
pbasis = C0_AffineLagrangeOnCubeWithNodalBasis
else:
pbasis = C0_AffineQuadraticOnSimplexWithNodalBasis
# Domain and mesh
if ct.test_case==1: #2D
L = (1.0 , 2.0)
#he = old_div(L[0],float(4*Refinement-1))
he = 0.25
#he*=0.5
#he*=0.5
elif ct.test_case==2: #3D
L = (1.0, 1.0, 2.0)
he = 0.5#old_div(L[0],float(4*Refinement-1))
weak_bc_penalty_constant = 1.0E6
nLevels = 1
#parallelPartitioningType = proteus.MeshTools.MeshParallelPartitioningTypes.element
parallelPartitioningType = proteus.MeshTools.MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 0
if useHex:
raise("Not implemented")
else:
boundaries = ['bottom', 'right', 'top', 'left', 'front', 'back']
boundaryTags = dict([(key, i + 1) for (i, key) in enumerate(boundaries)])
if structured:
nnx = 4 * Refinement**2 + 1
if nd==2:
nny = 2*nnx
else:
nnx = int(old_div((nnx - 1),2)) + 1
nny = nnx
nnz = 2*nnx
triangleFlag=1
domain = Domain.RectangularDomain(L)
domain.boundaryTags = boundaryTags
he = old_div(L[0],(nnx - 1))
else:
if nd==2:
vertices = [[0.0, 0.0], #0
[L[0], 0.0], #1
[L[0], L[1]], #2
[0.0, L[1]]] #3
vertexFlags = [boundaryTags['bottom'],
boundaryTags['bottom'],
boundaryTags['top'],
boundaryTags['top']]
segments = [[0, 1],
[1, 2],
[2, 3],
[3, 0]]
segmentFlags = [boundaryTags['bottom'],
boundaryTags['right'],
boundaryTags['top'],
boundaryTags['left']]
regions = [[1.2, 0.6]]
regionFlags = [1]
domain = Domain.PlanarStraightLineGraphDomain(vertices=vertices,
vertexFlags=vertexFlags,
segments=segments,
segmentFlags=segmentFlags,
regions=regions,
regionFlags=regionFlags)
else:
vertices=[[0.0,0.0,0.0],#0
[L[0],0.0,0.0],#1
[L[0],L[1],0.0],#2
[0.0,L[1],0.0],#3
[0.0,0.0,L[2]],#4
[L[0],0.0,L[2]],#5
[L[0],L[1],L[2]],#6
[0.0,L[1],L[2]]]#7
vertexFlags=[boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left']]
facets=[[[0,1,2,3]],
[[0,1,5,4]],
[[1,2,6,5]],
[[2,3,7,6]],
[[3,0,4,7]],
[[4,5,6,7]]]
facetFlags=[boundaryTags['bottom'],
boundaryTags['front'],
boundaryTags['right'],
boundaryTags['back'],
boundaryTags['left'],
boundaryTags['top']]
regions=[[0.5*L[0],0.5*L[1],0.5*L[2]]]
regionFlags=[1]
domain = Domain.PiecewiseLinearComplexDomain(vertices=vertices,
vertexFlags=vertexFlags,
facets=facets,
facetFlags=facetFlags,
regions=regions,
regionFlags=regionFlags)
#go ahead and add a boundary tags member
domain.boundaryTags = boundaryTags
if nd==2:
#domain.writePoly("mesh2D")
#domain.writePLY("mesh2D")
#domain.writeAsymptote("mesh2D")
domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"mesh2D"
domain.MeshOptions.triangleOptions = "VApq30Dena%8.8f" % (old_div((he ** 2), 2.0),)
#triangleOptions = "VApen"
else:
#domain.writePoly("mesh3D")
#domain.writePLY("mesh3D")
#domain.writeAsymptote("mesh3D")
domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"mesh3D"
domain.MeshOptions.triangleOptions="VApq1.4q12feena%21.16e" % (old_div((he**3),6.0),)
#triangleOptions="VApfeena0.002"
logEvent("""Mesh generated using: tetgen -%s %s""" % (triangleOptions, domain.polyfile + ".poly"))
domain.MeshOptions.genMesh=genMesh
# Numerical parameters
ns_forceStrongDirichlet = False
ns_sed_forceStrongDirichlet = False
if useMetrics:
ns_shockCapturingFactor = 0.5
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.5
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.5
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = True
kappa_shockCapturingFactor = 0.25
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.25
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ns_sed_shockCapturingFactor = 0.9
ns_sed_lag_shockCapturing = True
ns_sed_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
vos_shockCapturingFactor = 0.9
vos_lag_shockCapturing = True
vos_sc_uref = 1.0
vos_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_vos = epsFact_consrv_heaviside = epsFact_consrv_dirac = \
epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 0.1
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = 1.0e-12#max(1.0e-10, 0.01 * he ** 2)
ns_sed_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
vof_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
vos_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
ls_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
rd_nl_atol_res = max(1.0e-10, 0.05 * he)
mcorr_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
clsvof_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
kappa_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
dissipation_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
phi_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
pressure_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
#turbulence
ns_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
ns_sed_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
# Sediment
rho_s = rho_0
nu_s = 10000.0*nu_0
dragAlpha = 0.0
# Time stepping
if AUTOMATED_TEST:
T=0.1
dt_fixed = 0.1
else:
T=5.0
dt_fixed = 0.01
dt_init = min(0.1*dt_fixed,0.001)
runCFL=0.33
nDTout = int(round(old_div(T,dt_fixed)))
##########################################
# Signed Distance #
##########################################
def signedDistance(x):
xB = 0.5
if ct.test_case==1:
yB = 1.5
rB = 0.25
zB = 0.0
else:
yB = 0.5
rB = 0.362783167859781
zB = 1.5
# dist to center of bubble
if nd==2:
r = np.sqrt((x[0]-xB)**2 + (x[1]-yB)**2)
else:
r = np.sqrt((x[0]-xB)**2 + (x[1]-yB)**2 + (x[2]-zB)**2)
# dist to surface of bubble
dB = -(rB - r)
return dB
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Scripts for the Brapa bites paper
Tang et al. (2012) Altered Patterns of Fractionation and Exon Deletions in
Brassica rapa Support a Two-Step Model of Paleohexaploidy. Genetics.
<http://www.genetics.org/content/190/4/1563.short>
"""
from jcvi.graphics.base import plt, Rectangle, Polygon, CirclePolygon, savefig
from jcvi.graphics.glyph import RoundLabel, arrowprops, TextCircle
from jcvi.graphics.chromosome import Chromosome
from jcvi.utils.iter import pairwise
from jcvi.apps.base import OptionParser, ActionDispatcher, fname
def main():
actions = (
('excision', 'show intra-chromosomal recombination'),
('bites', 'show the bites calling pipeline'),
('scenario', 'show step-wise genome merger events in brapa'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def excision(args):
"""
%prog excision
Illustrate the mechanism of illegitimate recombination.
"""
p = OptionParser(__doc__)
opts, args = p.parse_args(args)
fig = plt.figure(1, (5, 5))
root = fig.add_axes([0, 0, 1, 1])
plt.plot((.2, .8), (.6, .6), 'r-', lw=3)
plt.plot((.4, .6), (.6, .6), 'b>-', mfc='g', mec='w', ms=12, lw=3)
plt.plot((.3, .7), (.5, .5), 'r-', lw=3)
plt.plot((.5, ), (.5, ), 'b>-', mfc='g', mec='w', ms=12, lw=3)
# Circle excision
plt.plot((.5, ), (.45, ), 'b>-', mfc='g', mec='w', ms=12, lw=3)
circle = CirclePolygon((.5, .4), .05, fill=False, lw=3, ec="b")
root.add_patch(circle)
arrow_dist = .07
ar_xpos, ar_ypos = .5, .52
root.annotate(" ", (ar_xpos, ar_ypos),
(ar_xpos, ar_ypos + arrow_dist),
arrowprops=arrowprops)
RoundLabel(root, .2, .64, "Gene")
RoundLabel(root, .3, .54, "Excision")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = fname() + ".pdf"
savefig(figname, dpi=300)
def bites(args):
"""
%prog bites
Illustrate the pipeline for automated bite discovery.
"""
p = OptionParser(__doc__)
opts, args = p.parse_args()
fig = plt.figure(1, (6, 6))
root = fig.add_axes([0, 0, 1, 1])
# HSP pairs
hsps = (((50, 150), (60, 180)),
((190, 250), (160, 235)),
((300, 360), (270, 330)),
((430, 470), (450, 490)),
((570, 620), (493, 543)),
((540, 555), (370, 385)), # non-collinear hsps
)
titlepos = (.9, .65, .4)
titles = ("Compare orthologous region",
"Find collinear HSPs",
"Scan paired gaps")
ytip = .01
mrange = 650.
m = lambda x: x / mrange * .7 + .1
for i, (ya, title) in enumerate(zip(titlepos, titles)):
yb = ya - .1
plt.plot((.1, .8), (ya, ya), "-", color="gray", lw=2, zorder=1)
plt.plot((.1, .8), (yb, yb), "-", color="gray", lw=2, zorder=1)
RoundLabel(root, .5, ya + 4 * ytip, title)
root.text(.9, ya, "A. thaliana", ha="center", va="center")
root.text(.9, yb, "B. rapa", ha="center", va="center")
myhsps = hsps
if i >= 1:
myhsps = hsps[:-1]
for (a, b), (c, d) in myhsps:
a, b, c, d = [m(x) for x in (a, b, c, d)]
r1 = Rectangle((a, ya - ytip), b - a, 2 * ytip, fc='r', lw=0, zorder=2)
r2 = Rectangle((c, yb - ytip), d - c, 2 * ytip, fc='r', lw=0, zorder=2)
r3 = Rectangle((a, ya - ytip), b - a, 2 * ytip, fill=False, zorder=3)
r4 = Rectangle((c, yb - ytip), d - c, 2 * ytip, fill=False, zorder=3)
r5 = Polygon(((a, ya - ytip), (c, yb + ytip),
(d, yb + ytip), (b, ya - ytip)),
fc='r', alpha=.2)
rr = (r1, r2, r3, r4, r5)
if i == 2:
rr = rr[:-1]
for r in rr:
root.add_patch(r)
# Gap pairs
hspa, hspb = zip(*myhsps)
gapa, gapb = [], []
for (a, b), (c, d) in pairwise(hspa):
gapa.append((b + 1, c - 1))
for (a, b), (c, d) in pairwise(hspb):
gapb.append((b + 1, c - 1))
gaps = zip(gapa, gapb)
tpos = titlepos[-1]
yy = tpos - .05
for i, ((a, b), (c, d)) in enumerate(gaps):
i += 1
a, b, c, d = [m(x) for x in (a, b, c, d)]
xx = (a + b + c + d) / 4
TextCircle(root, xx, yy, str(i))
# Bites
ystart = .24
ytip = .05
bites = (("Bite(40=>-15)", True),
("Bite(50=>35)", False),
("Bite(70=>120)", False),
("Bite(100=>3)", True))
for i, (bite, selected) in enumerate(bites):
xx = .15 if (i % 2 == 0) else .55
yy = ystart - i / 2 * ytip
i += 1
TextCircle(root, xx, yy, str(i))
color = "k" if selected else "gray"
root.text(xx + ytip, yy, bite, size=10, color=color, va="center")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = fname() + ".pdf"
savefig(figname, dpi=300)
def scenario(args):
"""
%prog scenario
Illustration of the two-step genome merger process for B. rapa companion paper.
"""
p = OptionParser(__doc__)
opts, args = p.parse_args()
fig = plt.figure(1, (5, 5))
root = fig.add_axes([0, 0, 1, 1])
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
# Layout format: (x, y, label, (chr lengths))
anc = (.5, .9, "Ancestor", (1,))
s1 = (.2, .6, "Genome I", (1,))
s2 = (.5, .6, "Genome II", (1,))
s3 = (.8, .6, "Genome III", (1,))
tetra = (.35, .4, "Tetraploid I / II", (.5, .9))
hexa = (.5, .1, "Hexaploid I / II / III", (.36, .46, .9))
labels = (anc, s1, s2, s3, tetra, hexa)
connections = ((anc, s1), (anc, s2), (anc, s3),\
(s1, tetra), (s2, tetra),
(tetra, hexa), (s3, hexa))
xinterval = .02
yratio = .05
for xx, yy, label, chrl in labels:
#RoundLabel(root, xx, yy, label)
root.text(xx, yy, label, ha="center", va="center")
offset = len(label) * .012
for i, c in enumerate(chrl):
ya = yy + yratio * c
yb = yy - yratio * c
Chromosome(root, xx - offset + i * xinterval, ya, yb, width=.01)
# Comments
comments = ((.15, .33, "II dominant"),
(.25, .03, "III dominant"))
for xx, yy, c in comments:
root.text(xx, yy, c, size=9, ha="center", va="center")
# Branches
tip = .04
for a, b in connections:
xa, ya, la, chra = a
xb, yb, lb, chrb = b
plt.plot((xa, xb), (ya - tip, yb + 2 * tip), 'k-', lw=2, alpha=.5)
figname = fname() + ".pdf"
savefig(figname, dpi=300)
if __name__ == '__main__':
main()
|
|
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from survey.models import Survey, Questionnaire
from . import create_surveys, create_questionnaires, login
class QuestionnaireTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.client = login(user="questionnaire-user", email="questionnaire-user@example.com", password="password")
f = open('survey/tests/resources/survey.json')
cls.contents = f.read()
def setUp(self):
create_surveys()
create_questionnaires()
def test_questionnaire(self):
questionnaire1 = Questionnaire.objects.get(questionnaire_id='1')
questionnaire2 = Questionnaire.objects.get(questionnaire_id='2')
self.assertEqual("Test Questionnaire 1", questionnaire1.title)
self.assertEqual("Test Questionnaire 2", questionnaire2.title)
self.assertEqual("questionnaire overview 1", questionnaire1.overview)
self.assertEqual("questionnaire overview 2", questionnaire2.overview)
self.assertEqual(Survey.objects.get(survey_id='1'), questionnaire1.survey)
self.assertEqual(Survey.objects.get(survey_id='2'), questionnaire2.survey)
self.assertFalse(questionnaire1.reviewed)
self.assertFalse(questionnaire2.reviewed)
def test_check_question_count(self):
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
# check that survey one has a single questionnaire with id 1
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 1)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[0])
# check that survey two has a single questionnaire with id 2
survey = response.context['object_list'][1]
self.assertEqual(Survey.objects.get(survey_id='2'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 1)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='2'), questionnaire_set[0])
def test_add_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
# now check that survey 1 has two questionnaires
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 2)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='3'), questionnaire_set[0])
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[1])
def test_add_questionnaire_fails_when_overview_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title': 'Test Questionnaire 4', 'questionnaire_id': '4'}, follow=True)
self.assertContains(response, "This field is required")
def test_add_questionnaire_fails_when_title_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'questionnaire_id': '4', 'overview': 'questionnaire overview 4'}, follow=True)
self.assertContains(response, "This field is required")
def test_add_questionnaire_fails_when_id_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title': 'Test Questionnaire 4', 'overview': 'questionnaire overview 4'}, follow=True)
self.assertContains(response, "This field is required")
def test_reviewed(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
# now check that survey 1 has two questionnaires and the reviewed state is correct
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 2)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='3'), questionnaire_set[0])
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[1])
self.assertFalse(questionnaire_set[0].reviewed)
self.assertFalse(questionnaire_set[1].reviewed)
questionnaire = Questionnaire.objects.get(questionnaire_id=1)
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='1')
self.assertTrue(questionnaire.reviewed)
def test_reviewed_false_after_add_question(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.reviewed)
# check the reviewed status is true
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertTrue(questionnaire_set[0].reviewed)
# now add a question
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
# and check the reviewed status is false
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertFalse(questionnaire_set[0].reviewed)
def test_published_a_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-summary", kwargs={'slug': questionnaire.id}),follow=True)
# check we cannot make it live
self.assertNotContains(response, 'publish')
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.reviewed)
# check we can publish
self.assertContains(response, 'publish')
response = QuestionnaireTestCase.client.get(reverse("survey:publish-questionnaire", kwargs={'slug' :questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.published)
def test_locked_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# check we can't modify the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Locked for editing")
def test_unlocked_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# check we can't modify the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Locked for editing")
# unlock the questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), '{"unlock":"true"}', content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Unlocked")
# check the new user can modify it now
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
def test_user_cannot_not_unlock_another_users_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# attempt to unlock the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), '{"unlock":"true"}', content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Locked for editing")
|
|
"""The tests for mqtt camera component."""
import json
from unittest.mock import patch
import pytest
from homeassistant.components import camera
from homeassistant.components.mqtt.camera import MQTT_CAMERA_ATTRIBUTES_BLOCKED
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
camera.DOMAIN: {"platform": "mqtt", "name": "test", "topic": "test_topic"}
}
async def test_run_camera_setup(hass, hass_client_no_auth, mqtt_mock):
"""Test that it fetches the given payload."""
topic = "test/camera"
await async_setup_component(
hass,
"camera",
{"camera": {"platform": "mqtt", "topic": topic, "name": "Test Camera"}},
)
await hass.async_block_till_done()
url = hass.states.get("camera.test_camera").attributes["entity_picture"]
async_fire_mqtt_message(hass, topic, "beer")
client = await hass_client_no_auth()
resp = await client.get(url)
assert resp.status == 200
body = await resp.text()
assert body == "beer"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, MQTT_CAMERA_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one camera per unique_id."""
config = {
camera.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, camera.DOMAIN, config)
async def test_discovery_removal_camera(hass, mqtt_mock, caplog):
"""Test removal of discovered camera."""
data = json.dumps(DEFAULT_CONFIG[camera.DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, camera.DOMAIN, data)
async def test_discovery_update_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
data1 = '{ "name": "Beer", "topic": "test_topic"}'
data2 = '{ "name": "Milk", "topic": "test_topic"}'
await help_test_discovery_update(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
data1 = '{ "name": "Beer", "topic": "test_topic"}'
with patch(
"homeassistant.components.mqtt.camera.MqttCamera.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, ["test_topic"]
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, "test_topic", b"ON"
)
|
|
"""
#;+
#; NAME:
#; absline
#; Version 1.0
#;
#; PURPOSE:
#; Module for absorption line kinematics
#; 29-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, imp
from astropy.io import fits, ascii
from astropy import units as u
from astropy.convolution import convolve, Box1DKernel
#from astropy import constants as const
from xastropy.xutils import xdebug as xdb
from xastropy import spec as xspec
########################## ##########################
########################## ##########################
class Kin_Abs(object):
""" Class for kinematics on an absorption line
Attributes
----------
wrest: float
Rest wavelength of line analyzed
vmnx: tuple (vmin,vmax)
Velocity range for analysis
JXP on 11 Dec 2014
"""
# Initialize
def __init__(self, wrest, vmnx):
# Absorption system
self.wrest = wrest
self.vmnx = vmnx
# Data
self.kin_data = {}
self.keys = ['flg', 'Dv', 'fedg', 'fmm', 'delta_v', 'X_fcover',
'v_peak', 'zero_pk', 'JF_fcover']
self.key_dtype = ['i4', 'f4', 'f4', 'f4', 'f4', 'f4',
'f4', 'f4', 'f4']
# Init
for key in self.keys:
self.kin_data[key] = 0
#xdb.set_trace()
# Access the data and return the value
def __getitem__(self, item):
try:
return self.kin_data[item]
except KeyError:
raise KeyError
########################## ##########################
def mk_pix_stau(self, spec, kbin=22.*u.km/u.s, debug=False, **kwargs):
""" Generate the smoothed tau array for kinematic tests
Parameters
----------
spec: Spectrum1D class
Input spectrum
velo is expected to have been filled already
fill: bool (True)
Fill the dictionary with some items that other kin programs may need
Returns
-------
out_kin : dict
Dictionary of kinematic measurements
JXP on 11 Dec 2014
"""
# Calcualte dv
imn = np.argmin( np.fabs(spec.velo) )
dv = np.abs( spec.velo[imn] - spec.velo[imn+1] )
# Test for bad pixels
pixmin = np.argmin( np.fabs( spec.velo-self.vmnx[0] ) )
pixmax = np.argmin( np.fabs( spec.velo-self.vmnx[1] ) )
pix = np.arange(pixmin, pixmax+1)
npix = len(pix)
badzero=np.where((spec.flux[pix] == 0) & (spec.sig[pix] <= 0))[0]
if len(badzero) > 0:
if np.max(badzero)-np.min(badzero) >= 5:
raise ValueError('orig_kin: too many or too large sections of bad data')
spec.flux[pix[badzero]] = np.mean(np.array([spec.flux[pix[np.min(badzero)-1]],
spec.flux[pix[np.max(badzero)+1]]]))
xdb.set_trace() # Should add sig too
# Generate the tau array
tau = np.zeros(npix)
gd = np.where((spec.flux[pix] > spec.sig[pix]/2.) &
(spec.sig[pix] > 0.) )
if len(gd) == 0:
raise ValueError('orig_kin: Profile too saturated.')
tau[gd] = np.log(1./spec.flux[pix[gd]])
sat = (pix == pix)
sat[gd] = False
tau[sat] = np.log(2./spec.sig[pix[sat]])
# Smooth
nbin = (np.round(kbin/dv)).value
kernel = Box1DKernel(nbin, mode='center')
stau = convolve(tau, kernel, boundary='fill', fill_value=0.)
if debug is True:
xdb.xplot(spec.velo[pix], tau, stau)
# Fill
self.stau = stau
self.pix = pix
########################## ##########################
def orig_kin(self, spec, kbin=22., per=0.05, get_stau=False, debug=False, **kwargs):
""" Measure a standard suite of absorption line kinematics
Parameters
----------
spec: Spectrum1D class
Input spectrum
velo is expected to have been filled already
fill: bool (True)
Fill the dictionary with some items that other kin programs may need
Returns
-------
out_kin : dict
Dictionary of kinematic measurements
JXP on 21 Nov 2014
"""
# Generate stau and pix?
if (self.stau is None) | (get_stau is True):
self.mk_pix_stau(spec, kbin=kbin)
# Dv (usually dv90)
tottau = np.sum( self.stau )
cumtau = np.cumsum(self.stau) / tottau
lft = (np.where(cumtau > per)[0])[0]
rgt = (np.where(cumtau > (1.-per))[0])[0] - 1
self.kin_data['Dv'] = np.round(np.abs(spec.velo[self.pix[rgt]]-spec.velo[self.pix[lft]]))
#xdb.set_trace()
# Mean/Median
vcen = (spec.velo[self.pix[rgt]]+spec.velo[self.pix[lft]])/2.
mean = self.kin_data['Dv']/2.
imn = np.argmin( np.fabs(cumtau-0.5) )
self.kin_data['fmm'] = np.abs( (spec.velo[self.pix[imn]]-vcen)/mean )
# fedg
imx = np.argmax(self.stau)
self.kin_data['fedg'] = np.abs( (spec.velo[self.pix[imx]]-vcen) / mean )
# Two-peak :: Not ported.. Not even to XIDL!
# Set flag
if (self.kin_data['flg'] % 2) < 1:
self.kin_data['flg'] = 1
########################## ##########################
def cgm_kin(self, spec, per=0.05, debug=False, cov_thresh=0.5,
dv_zeropk=15.*u.km/u.s, do_orig_kin=False, get_stau=False, **kwargs):
""" Some new tests, invented in the context of CGM studies.
Some are thanks to John Forbes.
This code is usually run after orig_kin. You should probably run them
separately if you plan to modify the default settings of either.
Parameters
----------
spec: Spectrum1D class
Input spectrum
velo is expected to have been filled already
cov_thresh: float (0.5)
Parameter for the X_fcover test
JXP on 11 Dec 2014
"""
# Generate stau and pix?
if (self.stau is None) | (get_stau is True):
self.mk_pix_stau(spec, **kwargs)
# Original kin?
if do_orig_kin is True:
self.orig_kin(spec)
# voff -- Velocity centroid of profile relative to zsys
self.kin_data['delta_v'] = np.sum(
spec.velo[self.pix] * self.stau ) / np.sum( self.stau )
# ###
# X "Covering" test
tottau = np.sum( self.stau )
cumtau = np.cumsum(self.stau) / tottau
lft = (np.where(cumtau > per)[0])[0]
rgt = (np.where(cumtau > (1.-per))[0])[0] - 1
inpix = range(lft,rgt+1)
tau_covering = np.mean( self.stau[inpix] )
i_cover = np.where( self.stau[inpix] > cov_thresh*tau_covering)[0]
self.kin_data['X_fcover'] = float(len(i_cover)) / float(len(inpix))
# ###
# Peak -- Peak optical depth velocity
imx = np.argmax(self.stau)
self.kin_data['v_peak'] = spec.velo[self.pix[imx]]
# ###
# Zero peak -- Ratio of peak optical depth to that within 15 km/s of zero
tau_zero = self.stau[imx]
if (self.vmnx[0] > 0.) | (self.vmnx[1] < 0.):
#; Not covered
#; Assuming zero value
self.kin_data['zero_pk'] = 0.
else:
zpix = np.where( np.abs(spec.velo[self.pix]) < dv_zeropk)[0]
if len(zpix) == 0:
raise ValueError('cgm_kin: Problem here..')
mx_ztau = np.max(self.stau[zpix])
self.kin_data['zero_pk'] = np.max([0. , np.min( [mx_ztau/tau_zero,1.])])
# ###
# Forbes "Covering"
dv = np.abs(spec.velo[self.pix[1]]-spec.velo[self.pix[0]])
forbes_fcover = dv * np.sum( self.stau ) / tau_zero
self.kin_data['JF_fcover'] = forbes_fcover
# Set flag
if (self.kin_data['flg'] % 4) < 2:
self.kin_data['flg'] += 2
# Perform all the measurements
def fill_kin(self, spec, **kwargs):
# Setup
self.mk_pix_stau(spec, **kwargs)
# Original kinematics
self.orig_kin(spec, **kwargs)
# Original kinematics
self.cgm_kin(spec, **kwargs)
# Output
def __repr__(self):
return ('[{:s}: {:g}]'.format(
self.__class__.__name__, self.wrest) )
#### ###############################
#### ###############################
# Testing
if __name__ == '__main__':
flg_test = 0
flg_test = 1 # First test of orig_kin
# First test
if (flg_test % 2**1) >= 2**0:
# Grab spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = xspec.readwrite.readspec(spec_fil)
vmnx = (-25., 80.)
wrest = 1741.5490
zabs = 2.309
# Generate velo
spec.velo = spec.relative_vel( (1+zabs)*wrest )
kin = Kin_Abs(wrest, vmnx)
# Call kin
kin.fill_kin(spec)
print('Kin results = {:g}, {:g}, {:g}'.format(kin['Dv'],
kin['fmm'],
kin['fedg'] ))
|
|
import numpy
from magma import pars
import os
from rdkit import Chem
class FragmentEngine(object):
def __init__(self, mol, max_broken_bonds, max_water_losses, ionisation_mode, skip_fragmentation, molcharge):
try:
self.mol = Chem.MolFromMolBlock(str(mol))
self.accept = True
self.natoms = self.mol.GetNumAtoms()
except:
self.accept = False
return
self.max_broken_bonds = max_broken_bonds
self.max_water_losses = max_water_losses
self.ionisation_mode = ionisation_mode
self.skip_fragmentation = skip_fragmentation
self.molcharge = molcharge
self.atom_masses = []
self.atomHs = []
self.neutral_loss_atoms = []
self.bonded_atoms = [] # [[list of atom numbers]]
self.bonds = set([])
self.bondscore = {}
self.new_fragment = 0
self.template_fragment = 0
self.fragment_masses = ((max_broken_bonds + max_water_losses) * 2 + 1) * [0]
self.fragment_info = [[0, 0, 0]]
self.avg_score = None
for x in range(self.natoms):
self.bonded_atoms.append([])
atom = self.mol.GetAtomWithIdx(x)
self.atomHs.append(atom.GetNumImplicitHs() + atom.GetNumExplicitHs())
self.atom_masses.append(pars.mims[atom.GetSymbol()] + pars.Hmass * (self.atomHs[x]))
if atom.GetSymbol() == 'O' and self.atomHs[x] == 1 and len(atom.GetBonds()) == 1:
self.neutral_loss_atoms.append(x)
if atom.GetSymbol() == 'N' and self.atomHs[x] == 2 and len(atom.GetBonds()) == 1:
self.neutral_loss_atoms.append(x)
for bond in self.mol.GetBonds():
a1, a2 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
self.bonded_atoms[a1].append(a2)
self.bonded_atoms[a2].append(a1)
bondbits = 1 << a1 | 1 << a2
bondscore = pars.typew[bond.GetBondType()] * \
pars.heterow[bond.GetBeginAtom().GetSymbol() != 'C' or bond.GetEndAtom().GetSymbol() != 'C']
self.bonds.add(bondbits)
self.bondscore[bondbits] = bondscore
def extend(self, atom):
for a in self.bonded_atoms[atom]:
atombit = 1 << a
if atombit & self.template_fragment and not atombit & self.new_fragment:
self.new_fragment = self.new_fragment | atombit
self.extend(a)
def generate_fragments(self):
frag = (1 << self.natoms) - 1
all_fragments = set([frag])
total_fragments = set([frag])
current_fragments = set([frag])
new_fragments = set([frag])
self.add_fragment(frag, self.calc_fragment_mass(frag), 0, 0)
if self.skip_fragmentation:
self.convert_fragments_table()
return len(self.fragment_info)
# generate fragments for max_broken_bond steps
for step in range(self.max_broken_bonds):
# loop over all fragments to be fragmented
for fragment in current_fragments:
# loop over all atoms
for atom in range(self.natoms):
# in the fragment
if (1 << atom) & fragment:
# remove the atom
self.template_fragment = fragment ^ (1 << atom)
list_ext_atoms = set([])
extended_fragments = set([])
# find all its neighbor atoms
for a in self.bonded_atoms[atom]:
# present in the fragment
if (1 << a) & self.template_fragment:
list_ext_atoms.add(a)
# in case of one bonded atom, the new fragment is the remainder of the old fragment
if len(list_ext_atoms) == 1:
extended_fragments.add(self.template_fragment)
else:
# otherwise extend each neighbor atom to a complete fragment
for a in list_ext_atoms:
# except when deleted atom is in a ring and a previous extended
# fragment already contains this neighbor atom, then
# calculate fragment only once
for frag in extended_fragments:
if (1 << a) & frag:
break
else:
# extend atom to complete fragment
self.new_fragment = 1 << a
self.extend(a)
extended_fragments.add(self.new_fragment)
for frag in extended_fragments:
# add extended fragments, if not yet present, to the collection
if frag not in all_fragments:
all_fragments.add(frag)
bondbreaks, score = self.score_fragment(frag)
if bondbreaks <= self.max_broken_bonds and score < (pars.missingfragmentpenalty + 5):
new_fragments.add(frag)
total_fragments.add(frag)
self.add_fragment(
frag, self.calc_fragment_mass(frag), score, bondbreaks)
current_fragments = new_fragments
new_fragments = set([])
# number of OH losses
for step in range(self.max_water_losses):
# loop of all fragments
for fi in self.fragment_info:
# on which to apply neutral loss rules
if fi[2] == self.max_broken_bonds + step:
fragment = fi[0]
# loop over all atoms in the fragment
for atom in self.neutral_loss_atoms:
if (1 << atom) & fragment:
frag = fragment ^ (1 << atom)
# add extended fragments, if not yet present, to the collection
if frag not in total_fragments:
total_fragments.add(frag)
bondbreaks, score = self.score_fragment(frag)
if score < (pars.missingfragmentpenalty + 5):
self.add_fragment(
frag, self.calc_fragment_mass(frag), score, bondbreaks)
self.convert_fragments_table()
return len(self.fragment_info)
def score_fragment(self, fragment):
score = 0
bondbreaks = 0
for bond in self.bonds:
if 0 < (fragment & bond) < bond:
score += self.bondscore[bond]
bondbreaks += 1
if score == 0:
print("score=0: ", fragment, bondbreaks)
return bondbreaks, score
def score_fragment_rel2parent(self, fragment, parent):
score = 0
for bond in self.bonds:
if 0 < (fragment & bond) < (bond & parent):
score += self.bondscore[bond]
return score
def calc_fragment_mass(self, fragment):
fragment_mass = 0.0
for atom in range(self.natoms):
if fragment & (1 << atom):
fragment_mass += self.atom_masses[atom]
return fragment_mass
def add_fragment(self, fragment, fragmentmass, score, bondbreaks):
mass_range = ((self.max_broken_bonds + self.max_water_losses - bondbreaks) * [0] +
list(numpy.arange(-bondbreaks + self.ionisation_mode * (1 - self.molcharge),
bondbreaks + self.ionisation_mode * (1 - self.molcharge) + 1) * pars.Hmass + fragmentmass) +
(self.max_broken_bonds + self.max_water_losses - bondbreaks) * [0])
if bondbreaks == 0:
# make sure that fragmentmass is included
mass_range[self.max_broken_bonds + self.max_water_losses -
self.ionisation_mode] = fragmentmass
self.fragment_masses += mass_range
self.fragment_info.append([fragment, score, bondbreaks])
def convert_fragments_table(self):
self.fragment_masses_np = numpy.array(self.fragment_masses).reshape(
len(self.fragment_info), (self.max_broken_bonds + self.max_water_losses) * 2 + 1)
def calc_avg_score(self):
self.avg_score = numpy.average(self.scores)
def get_avg_score(self):
return self.avg_score
def find_fragments(self, mass, parent, precision, mz_precision_abs):
result = numpy.where(numpy.where(self.fragment_masses_np < max(mass * precision, mass + mz_precision_abs),
self.fragment_masses_np, 0) > min(mass / precision, mass - mz_precision_abs))
fragment_set = []
for i in range(len(result[0])):
fid = result[0][i]
fragment_set.append(self.fragment_info[fid] +
[self.fragment_masses_np[fid][self.max_broken_bonds + self.max_water_losses - self.ionisation_mode * (1 - self.molcharge)]] +
[self.ionisation_mode * (1 - self.molcharge) + result[1][i] - self.max_broken_bonds - self.max_water_losses])
return fragment_set
def get_fragment_info(self, fragment, deltaH):
atomlist = []
elements = {'C': 0, 'H': 0, 'N': 0, 'O': 0, 'F': 0,
'P': 0, 'S': 0, 'Cl': 0, 'Br': 0, 'I': 0}
for atom in range(self.natoms):
if ((1 << atom) & fragment):
atomlist.append(atom)
elements[self.mol.GetAtomWithIdx(atom).GetSymbol()] += 1
elements['H'] += self.atomHs[atom]
formula = ''
for el in ('C', 'H', 'N', 'O', 'F', 'P', 'S', 'Cl', 'Br', 'I'):
nel = elements[el]
if nel > 0:
formula += el
if nel > 1:
formula += str(nel)
atomstring = ','.join(str(a) for a in atomlist)
return atomstring, atomlist, formula, fragment2inchikey(self.mol, atomlist)
def get_natoms(self):
return self.natoms
def accepted(self):
return self.accept
def fragment2inchikey(mol, atomlist):
emol = Chem.EditableMol(mol)
for atom in reversed(range(mol.GetNumAtoms())):
if atom not in atomlist:
emol.RemoveAtom(atom)
frag = emol.GetMol()
return Chem.MolToSmiles(frag)
|
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'COMMITTEE NAME', 'number': '3'},
{'name': 'CHANGE OF ADDRESS', 'number': '4'},
{'name': 'STREET 1', 'number': '5'},
{'name': 'STREET 2', 'number': '6'},
{'name': 'CITY', 'number': '7'},
{'name': 'STATE', 'number': '8'},
{'name': 'ZIP', 'number': '9'},
{'name': 'ACTIVITY PRIMARY', 'number': '10'},
{'name': 'ACTIVITY GENERAL', 'number': '11'},
{'name': 'REPORT CODE', 'number': '12'},
{'name': 'ELECTION CODE', 'number': '13'},
{'name': 'DATE OF ELECTION', 'number': '14'},
{'name': 'STATE OF ELECTION', 'number': '15'},
{'name': 'COVERAGE FROM DATE', 'number': '16'},
{'name': 'COVERAGE THROUGH DATE', 'number': '17'},
{'name': 'TREASURER LAST NAME', 'number': '18'},
{'name': 'TREASURER FIRST NAME', 'number': '19'},
{'name': 'TREASURER MIDDLE NAME', 'number': '20'},
{'name': 'TREASURER PREFIX', 'number': '21'},
{'name': 'TREASURER SUFFIX', 'number': '22'},
{'name': 'DATE SIGNED', 'number': '23'},
{'name': 'Cash on Hand Beginning Period', 'number': '24-6.'},
{'name': 'Total Receipts', 'number': '25-7.'},
{'name': 'SubTotal', 'number': '26-8.'},
{'name': 'Total Disbursements', 'number': '27-9.'},
{'name': 'Cash on Hand Close of Period', 'number': '28-10.'},
{'name': 'Debts to', 'number': '29-11.'},
{'name': 'Debts by', 'number': '30-12.'},
{'name': 'Expenditures Subject to Limits', 'number': '31-13.'},
{'name': 'Net Contributions', 'number': '32-14.'},
{'name': 'Net Operating Expenditures', 'number': '33-15.'},
{'name': 'Federal Funds', 'number': '34-16.'},
{'name': 'Individuals', 'number': '35-17(a)'},
{'name': 'Political Party Committees', 'number': '36-17(b)'},
{'name': 'Other Political Committees (PACs)', 'number': '37-17(c)'},
{'name': 'The Candidate', 'number': '38-17(d)'},
{'name': 'Total Contributions', 'number': '39-17(e)'},
{'name': 'Transfers From Aff/Other Party Committees', 'number': '40-18.'},
{'name': 'Received from or Guaranteed by Cand.', 'number': '41-19(a)'},
{'name': 'Other Loans', 'number': '42-19(b)'},
{'name': 'Total Loans', 'number': '43-19(c)'},
{'name': 'Operating', 'number': '44-20(a)'},
{'name': 'Fundraising', 'number': '45-20(b)'},
{'name': 'Legal and Accounting', 'number': '46-20(c)'},
{'name': 'Total offsets to Expenditures', 'number': '47-20(d)'},
{'name': 'Other Receipts', 'number': '48-21.'},
{'name': 'Total Receipts', 'number': '49-22.'},
{'name': 'Operating Expenditures', 'number': '50-23.'},
{'name': 'Transfers to Other Authorized Committees', 'number': '51-24.'},
{'name': 'Fundraising Disbursements', 'number': '52-25.'},
{'name': 'Exempt Legal & Accounting Disbursement', 'number': '53-26.'},
{'name': 'Made or guaranteed by Candidate', 'number': '54-27(a)'},
{'name': 'Other Repayments', 'number': '55-27(b)'},
{'name': 'Total Loan Repayments Made', 'number': '56-27(c)'},
{'name': 'Individuals', 'number': '57-28(a)'},
{'name': 'Political Party Committees', 'number': '58-28(b)'},
{'name': 'Other Political Committees', 'number': '59-28(c)'},
{'name': 'Total Contributions Refunds', 'number': '60-28(d)'},
{'name': 'Other Disbursements', 'number': '61-29.'},
{'name': 'Total Disbursements', 'number': '62-30.'},
{'name': 'Items on Hand to be Liquidated', 'number': '63-31.'},
{'name': 'ALABAMA', 'number': '64'},
{'name': 'ALASKA', 'number': '65'},
{'name': 'ARIZONA', 'number': '66'},
{'name': 'ARKANSAS', 'number': '67'},
{'name': 'CALIFORNIA', 'number': '68'},
{'name': 'COLORADO', 'number': '69'},
{'name': 'CONNECTICUT', 'number': '70'},
{'name': 'DELAWARE', 'number': '71'},
{'name': 'DIST OF COLUMBIA', 'number': '72'},
{'name': 'FLORIDA', 'number': '73'},
{'name': 'GEORGIA', 'number': '74'},
{'name': 'HAWAII', 'number': '75'},
{'name': 'IDAHO', 'number': '76'},
{'name': 'ILLINOIS', 'number': '77'},
{'name': 'INDIANA', 'number': '78'},
{'name': 'IOWA', 'number': '79'},
{'name': 'KANSAS', 'number': '80'},
{'name': 'KENTUCKY', 'number': '81'},
{'name': 'LOUISIANA', 'number': '82'},
{'name': 'MAINE', 'number': '83'},
{'name': 'MARYLAND', 'number': '84'},
{'name': 'MASSACHUSETTS', 'number': '85'},
{'name': 'MICHIGAN', 'number': '86'},
{'name': 'MINNESOTA', 'number': '87'},
{'name': 'MISSISSIPPI', 'number': '88'},
{'name': 'MISSOURI', 'number': '89'},
{'name': 'MONTANA', 'number': '90'},
{'name': 'NEBRASKA', 'number': '91'},
{'name': 'NEVADA', 'number': '92'},
{'name': 'NEW HAMPSHIRE', 'number': '93'},
{'name': 'NEW JERSEY', 'number': '94'},
{'name': 'NEW MEXICO', 'number': '95'},
{'name': 'NEW YORK', 'number': '96'},
{'name': 'NORTH CAROLINA', 'number': '97'},
{'name': 'NORTH DAKOTA', 'number': '98'},
{'name': 'OHIO', 'number': '99'},
{'name': 'OKLAHOMA', 'number': '100'},
{'name': 'OREGON', 'number': '101'},
{'name': 'PENNSYLVANIA', 'number': '102'},
{'name': 'RHODE ISLAND', 'number': '103'},
{'name': 'SOUTH CAROLINA', 'number': '104'},
{'name': 'SOUTH DAKOTA', 'number': '105'},
{'name': 'TENNESSEE', 'number': '106'},
{'name': 'TEXAS', 'number': '107'},
{'name': 'UTAH', 'number': '108'},
{'name': 'VERMONT', 'number': '109'},
{'name': 'VIRGINIA', 'number': '110'},
{'name': 'WASHINGTON', 'number': '111'},
{'name': 'WEST VIRGINIA', 'number': '112'},
{'name': 'WISCONSIN', 'number': '113'},
{'name': 'WYOMING', 'number': '114'},
{'name': 'PUERTO RICO', 'number': '115'},
{'name': 'GUAM', 'number': '116'},
{'name': 'VIRGIN ISLANDS', 'number': '117'},
{'name': 'TOTALS', 'number': '118'},
{'name': 'Federal Funds', 'number': '119-16.'},
{'name': 'Individuals', 'number': '120-17(a)'},
{'name': 'Political Party Committees', 'number': '121-17(b)'},
{'name': 'Other Political Committees (PACs)', 'number': '122-17(c)'},
{'name': 'The Candidate', 'number': '123-17(d)'},
{'name': 'Total contributions (Other than Loans)', 'number': '124-17(e)'},
{'name': 'Transfers From Aff/Other Party Committees', 'number': '125-18.'},
{'name': 'Received from or Guaranteed by Cand.', 'number': '126-19(a)'},
{'name': 'Other Loans', 'number': '127-19(b)'},
{'name': 'Total Loans', 'number': '128-19(c)'},
{'name': 'Operating', 'number': '129-20(a)'},
{'name': 'Fundraising', 'number': '130-20(b)'},
{'name': 'Legal and Accounting', 'number': '131-20(c)'},
{'name': 'Total Offsets to Operating Expenditures', 'number': '132-20(d)'},
{'name': 'Other Receipts', 'number': '133-21.'},
{'name': 'Total Receipts', 'number': '134-22.'},
{'name': 'Operating Expenditures', 'number': '135-23.'},
{'name': 'Transfers to Other Authorized Committees', 'number': '136-24.'},
{'name': 'Fundraising Disbursements', 'number': '137-25.'},
{'name': 'Exempt Legal & Accounting Disbursement', 'number': '138-26.'},
{'name': 'Made or Guaranteed by the Candidate', 'number': '139-27(a)'},
{'name': 'Other Repayments', 'number': '140-27(b)'},
{'name': 'Total Loan Repayments Made', 'number': '141-27(c)'},
{'name': 'Individuals', 'number': '142-28(a)'},
{'name': 'Political Party Committees', 'number': '143-28(b)'},
{'name': 'Other Political Committees', 'number': '144-28(c)'},
{'name': 'Total Contributions Refunds', 'number': '145-28(d)'},
{'name': 'Other Disbursements', 'number': '146-29.'},
{'name': 'Total Disbursements', 'number': '147-30.'},
{'name': 'ALABAMA', 'number': '148'},
{'name': 'ALASKA', 'number': '149'},
{'name': 'ARIZONA', 'number': '150'},
{'name': 'ARKANSAS', 'number': '151'},
{'name': 'CALIFORNIA', 'number': '152'},
{'name': 'COLORADO', 'number': '153'},
{'name': 'CONNECTICUT', 'number': '154'},
{'name': 'DELAWARE', 'number': '155'},
{'name': 'DIST OF COLUMBIA', 'number': '156'},
{'name': 'FLORIDA', 'number': '157'},
{'name': 'GEORGIA', 'number': '158'},
{'name': 'HAWAII', 'number': '159'},
{'name': 'IDAHO', 'number': '160'},
{'name': 'ILLINOIS', 'number': '161'},
{'name': 'INDIANA', 'number': '162'},
{'name': 'IOWA', 'number': '163'},
{'name': 'KANSAS', 'number': '164'},
{'name': 'KENTUCKY', 'number': '165'},
{'name': 'LOUISIANA', 'number': '166'},
{'name': 'MAINE', 'number': '167'},
{'name': 'MARYLAND', 'number': '168'},
{'name': 'MASSACHUSETTS', 'number': '169'},
{'name': 'MICHIGAN', 'number': '170'},
{'name': 'MINNESOTA', 'number': '171'},
{'name': 'MISSISSIPPI', 'number': '172'},
{'name': 'MISSOURI', 'number': '173'},
{'name': 'MONTANA', 'number': '174'},
{'name': 'NEBRASKA', 'number': '175'},
{'name': 'NEVADA', 'number': '176'},
{'name': 'NEW HAMPSHIRE', 'number': '177'},
{'name': 'NEW JERSEY', 'number': '178'},
{'name': 'NEW MEXICO', 'number': '179'},
{'name': 'NEW YORK', 'number': '180'},
{'name': 'NORTH CAROLINA', 'number': '181'},
{'name': 'NORTH DAKOTA', 'number': '182'},
{'name': 'OHIO', 'number': '183'},
{'name': 'OKLAHOMA', 'number': '184'},
{'name': 'OREGON', 'number': '185'},
{'name': 'PENNSYLVANIA', 'number': '186'},
{'name': 'RHODE ISLAND', 'number': '187'},
{'name': 'SOUTH CAROLINA', 'number': '188'},
{'name': 'SOUTH DAKOTA', 'number': '189'},
{'name': 'TENNESSEE', 'number': '190'},
{'name': 'TEXAS', 'number': '191'},
{'name': 'UTAH', 'number': '192'},
{'name': 'VERMONT', 'number': '193'},
{'name': 'VIRGINIA', 'number': '194'},
{'name': 'WASHINGTON', 'number': '195'},
{'name': 'WEST VIRGINIA', 'number': '196'},
{'name': 'WISCONSIN', 'number': '197'},
{'name': 'WYOMING', 'number': '198'},
{'name': 'PUERTO RICO', 'number': '199'},
{'name': 'GUAM', 'number': '200'},
{'name': 'VIRGIN ISLANDS', 'number': '201'},
{'name': 'TOTALS', 'number': '202'},
]
self.fields_names = self.hash_names(self.fields)
|
|
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features
)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features
)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate init_centroids_ throughout rather than centroids_ since
the centroids are just a view of the init_centroids_ .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to init_centroids_.
centroids_ : ndarray
view of init_centroids_.
squared_norm_ : ndarray
view of init_sq_norm_.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
n_samples = len(self.subclusters_)
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
index : int, optional
Index of the array in the original data. This enables to
retrieve the final subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
CFNode.centroids_ is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to self.child_.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_
)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not has_partial_fit and not is_fitted:
raise ValueError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the centroids_ of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
if not hasattr(self, 'subcluster_centers_'):
raise ValueError("Fit training data before predicting")
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
|
# -*- coding: utf-8 -*-
""" Httplib2 threaded cookie layer
This class extends httplib2, adding support for:
- Cookies, guarded for cross-site redirects
- Thread safe ConnectionPool and LockableCookieJar classes
- HttpProcessor thread class
- HttpRequest object
"""
# (C) 2007 Pywikipedia bot team, 2007
# (C) 2006 Httplib 2 team, 2006
# (C) 2007 Metaweb Technologies, Inc.
#
# Partially distributed under the MIT license
# Partially distributed under Metaweb Technologies, Incs license
# which is compatible with the MIT license
__version__ = '$Id$'
__docformat__ = 'epytext'
# standard python libraries
import re
import threading
import time
import logging
import urllib
import cookielib
import sys
import pywikibot
from pywikibot import config
_logger = "comm.threadedhttp"
# easy_install safeguarded dependencies
try:
import httplib2
except ImportError:
try:
import pkg_resources
pkg_resources.require("httplib2")
except ImportError:
pywikibot.error(
u"Error: You need the python module setuptools to use this module")
sys.exit(1)
class ConnectionPool(object):
"""A thread-safe connection pool."""
def __init__(self, maxnum=5):
"""
@param maxnum: Maximum number of connections per identifier.
The pool drops excessive connections added.
"""
pywikibot.debug(u"Creating connection pool.", _logger)
self.connections = {}
self.lock = threading.Lock()
self.maxnum = maxnum
def __del__(self):
"""Destructor to close all connections in the pool."""
self.lock.acquire()
try:
pywikibot.debug(u"Closing connection pool (%s connections)"
% len(self.connections),
_logger)
for key in self.connections:
for connection in self.connections[key]:
connection.close()
except AttributeError:
pass # this shows up when logger has been destroyed first
finally:
self.lock.release()
def __repr__(self):
return self.connections.__repr__()
def pop_connection(self, identifier):
"""Get a connection from identifier's connection pool.
@param identifier: The pool identifier
@return: A connection object if found, None otherwise
"""
self.lock.acquire()
try:
if identifier in self.connections:
if len(self.connections[identifier]) > 0:
pywikibot.debug(u"Retrieved connection from '%s' pool."
% identifier,
_logger)
return self.connections[identifier].pop()
return None
finally:
self.lock.release()
def push_connection(self, identifier, connection):
"""Add a connection to identifier's connection pool.
@param identifier: The pool identifier
@param connection: The connection to add to the pool
"""
self.lock.acquire()
try:
if identifier not in self.connections:
self.connections[identifier] = []
if len(self.connections[identifier]) == self.maxnum:
pywikibot.debug(u"closing %s connection %r"
% (identifier, connection),
_logger)
connection.close()
del connection
else:
self.connections[identifier].append(connection)
finally:
self.lock.release()
class LockableCookieJar(cookielib.LWPCookieJar):
"""CookieJar with integrated Lock object."""
def __init__(self, *args, **kwargs):
cookielib.LWPCookieJar.__init__(self, *args, **kwargs)
self.lock = threading.Lock()
class Http(httplib2.Http):
"""Subclass of httplib2.Http that stores cookies.
Overrides httplib2's internal redirect support to prevent cookies being
eaten by the wrong sites.
"""
def __init__(self, *args, **kwargs):
"""
@param cookiejar: (optional) CookieJar to use. A new one will be
used when not supplied.
@param connection_pool: (optional) Connection pool to use. A new one
will be used when not supplied.
@param max_redirects: (optional) The maximum number of redirects to
follow. 5 is default.
"""
try:
self.cookiejar = kwargs.pop('cookiejar')
except KeyError:
self.cookiejar = LockableCookieJar()
try:
self.connection_pool = kwargs.pop('connection_pool')
except KeyError:
self.connection_pool = ConnectionPool()
self.max_redirects = kwargs.pop('max_redirects', 5)
if len(args) < 3:
kwargs.setdefault('proxy_info', config.proxy)
httplib2.Http.__init__(self, *args, **kwargs)
def request(self, uri, method="GET", body=None, headers=None,
max_redirects=None, connection_type=None):
"""Start an HTTP request.
@param uri: The uri to retrieve
@param method: (optional) The HTTP method to use. Default is 'GET'
@param body: (optional) The request body. Default is no body.
@param headers: (optional) Additional headers to send. Defaults
include C{connection: keep-alive}, C{user-agent} and
C{content-type}.
@param max_redirects: (optional) The maximum number of redirects to
use for this request. The class instance's max_redirects is
default
@param connection_type: (optional) see L{httplib2.Http.request}
@return: (response, content) tuple
"""
if max_redirects is None:
max_redirects = self.max_redirects
if headers is None:
headers = {}
# Prepare headers
headers.pop('cookie', None)
req = DummyRequest(uri, headers)
self.cookiejar.lock.acquire()
try:
self.cookiejar.add_cookie_header(req)
finally:
self.cookiejar.lock.release()
headers = req.headers
# Wikimedia squids: add connection: keep-alive to request headers
# unless overridden
headers['connection'] = headers.pop('connection', 'keep-alive')
# determine connection pool key and fetch connection
(scheme, authority, request_uri, defrag_uri) = httplib2.urlnorm(
httplib2.iri2uri(uri))
conn_key = scheme+":"+authority
connection = self.connection_pool.pop_connection(conn_key)
if connection is not None:
self.connections[conn_key] = connection
# Redirect hack: we want to regulate redirects
follow_redirects = self.follow_redirects
self.follow_redirects = False
pywikibot.debug(u"%r" % (
(uri.replace("%7C","|"), method, body,
headers, max_redirects,
connection_type),),
_logger)
try:
(response, content) = httplib2.Http.request(
self, uri, method, body, headers,
max_redirects, connection_type)
except Exception, e: # what types?
# return exception instance to be retrieved by the calling thread
return e
self.follow_redirects = follow_redirects
# return connection to pool
self.connection_pool.push_connection(conn_key,
self.connections[conn_key])
del self.connections[conn_key]
# First write cookies
self.cookiejar.lock.acquire()
try:
self.cookiejar.extract_cookies(DummyResponse(response), req)
finally:
self.cookiejar.lock.release()
# Check for possible redirects
redirectable_response = ((response.status == 303) or
(response.status in [300, 301, 302, 307] and
method in ["GET", "HEAD"]))
if self.follow_redirects and (max_redirects > 0) \
and redirectable_response:
(response, content) = self._follow_redirect(
uri, method, body, headers, response, content, max_redirects)
return (response, content)
def _follow_redirect(self, uri, method, body, headers, response,
content, max_redirects):
"""Internal function to follow a redirect recieved by L{request}"""
(scheme, authority, absolute_uri, defrag_uri) = httplib2.urlnorm(
httplib2.iri2uri(uri))
if self.cache:
cachekey = defrag_uri
else:
cachekey = None
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if "location" not in response and response.status != 300:
raise httplib2.RedirectMissingLocation(
"Redirected but the response is missing a Location: header.",
response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if "location" in response:
location = response['location']
(scheme, authority, path, query, fragment) = httplib2.parse_uri(
location)
if authority == None:
response['location'] = httplib2.urlparse.urljoin(uri, location)
pywikibot.debug(u"Relative redirect: changed [%s] to [%s]"
% (location, response['location']),
_logger)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if "content-location" not in response:
response['content-location'] = absolute_uri
httplib2._updateCache(headers, response, content, self.cache,
cachekey)
headers.pop('if-none-match', None)
headers.pop('if-modified-since', None)
if "location" in response:
location = response['location']
redirect_method = ((response.status == 303) and
(method not in ["GET", "HEAD"])
) and "GET" or method
return self.request(location, redirect_method, body=body,
headers=headers,
max_redirects=max_redirects - 1)
else:
raise RedirectLimit(
"Redirected more times than redirection_limit allows.",
response, content)
class HttpRequest(object):
"""Object wrapper for HTTP requests that need to block origin thread.
Usage:
>>> request = HttpRequest('http://www.google.com')
>>> queue.put(request)
>>> request.lock.acquire()
>>> print request.data
C{request.lock.acquire()} will block until the data is available.
"""
def __init__(self, *args, **kwargs):
"""See C{Http.request} for parameters."""
self.args = args
self.kwargs = kwargs
self.data = None
self.lock = threading.Semaphore(0)
class HttpProcessor(threading.Thread):
"""Thread object to spawn multiple HTTP connection threads."""
def __init__(self, queue, cookiejar, connection_pool):
"""
@param queue: The C{Queue.Queue} object that contains L{HttpRequest}
objects.
@param cookiejar: The C{LockableCookieJar} cookie object to share among
requests.
@param connection_pool: The C{ConnectionPool} object which contains
connections to share among requests.
"""
threading.Thread.__init__(self)
self.queue = queue
self.http = Http(cookiejar=cookiejar, connection_pool=connection_pool)
def run(self):
# The Queue item is expected to either an HttpRequest object
# or None (to shut down the thread)
pywikibot.debug(u"Thread started, waiting for requests.", _logger)
while (True):
item = self.queue.get()
if item is None:
pywikibot.debug(u"Shutting down thread.", _logger)
return
try:
item.data = self.http.request(*item.args, **item.kwargs)
finally:
if item.lock:
item.lock.release()
# Metaweb Technologies, Inc. License:
# ========================================================================
# The following dummy classes are:
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
class DummyRequest(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, url, headers=None):
self.url = url
self.headers = headers
self.origin_req_host = cookielib.request_host(self)
self.type, r = urllib.splittype(url)
self.host, r = urllib.splithost(r)
if self.host:
self.host = urllib.unquote(self.host)
def get_full_url(self):
return self.url
def get_origin_req_host(self):
# TODO to match urllib2 this should be different for redirects
return self.origin_req_host
def get_type(self):
return self.type
def get_host(self):
return self.host
def get_header(self, key, default=None):
return self.headers.get(key.lower(), default)
def has_header(self, key):
return key in self.headers
def add_unredirected_header(self, key, val):
# TODO this header should not be sent on redirect
self.headers[key.lower()] = val
def is_unverifiable(self):
# TODO to match urllib2, this should be set to True when the
# request is the result of a redirect
return False
class DummyResponse(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyMessage(self.response)
class DummyMessage(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.get(k.lower(), None)
if k not in self.response:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response[k])]
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import tractor.api.author as author
import IECore
import Gaffer
import GafferDispatch
class TractorDispatcher( GafferDispatch.Dispatcher ) :
def __init__( self, name = "TractorDispatcher" ) :
GafferDispatch.Dispatcher.__init__( self, name )
self["service"] = Gaffer.StringPlug( defaultValue = '"*"' )
self["envKey"] = Gaffer.StringPlug()
## Emitted prior to spooling the Tractor job, to allow
# custom modifications to be applied.
#
# Slots should have the signature `slot( dispatcher, job )`,
# where dispatcher is the TractorDispatcher and job will
# be the instance of tractor.api.author.Job that is about
# to be spooled.
@classmethod
def preSpoolSignal( cls ) :
return cls.__preSpoolSignal
__preSpoolSignal = Gaffer.Signal2()
def _doDispatch( self, rootBatch ) :
# Construct an object to track everything we need
# to generate the job. I have a suspicion that at
# some point we'll want a Dispatcher::Job base class
# which _doDispatch() must return, in which case this
# might just be member data for a subclass of one of those.
dispatchData = {}
dispatchData["scriptNode"] = rootBatch.preTasks()[0].node().scriptNode()
dispatchData["scriptFile"] = os.path.join( self.jobDirectory(), os.path.basename( dispatchData["scriptNode"]["fileName"].getValue() ) or "untitled.gfr" )
dispatchData["batchesToTasks"] = {}
dispatchData["scriptNode"].serialiseToFile( dispatchData["scriptFile"] )
# Create a Tractor job and set its basic properties.
context = Gaffer.Context.current()
job = author.Job(
## \todo Remove these manual substitutions once #887 is resolved.
title = context.substitute( self["jobName"].getValue() ) or "untitled",
service = context.substitute( self["service"].getValue() ),
envkey = context.substitute( self["envKey"].getValue() ).split(),
)
# Populate the job with tasks from the batch tree
# that was prepared by our base class.
batchesToTasks = {}
for upstreamBatch in rootBatch.preTasks() :
self.__buildJobWalk( job, upstreamBatch, dispatchData )
# Signal anyone who might want to make just-in-time
# modifications to the job.
self.preSpoolSignal()( self, job )
# Save a copy of our job script to the job directory.
# This isn't strictly necessary because we'll spool via
# the python API, but it's useful for debugging and also
# means that the job directory provides a complete record
# of the job.
with open( self.jobDirectory() + "/job.alf", "w" ) as alf :
alf.write( "# Generated by Gaffer " + Gaffer.About.versionString() + "\n\n" )
alf.write( job.asTcl() )
# Finally, we can spool the job.
job.spool( block = True )
def __buildJobWalk( self, tractorParent, batch, dispatchData ) :
task = self.__acquireTask( batch, dispatchData )
tractorParent.addChild( task )
if batch.blindData().get( "tractorDispatcher:visited" ) :
return
for upstreamBatch in batch.preTasks() :
self.__buildJobWalk( task, upstreamBatch, dispatchData )
batch.blindData()["tractorDispatcher:visited"] = IECore.BoolData( True )
def __acquireTask( self, batch, dispatchData ) :
# If we've already created a task for this batch, then
# just return it. The Tractor API will take care of turning
# it into an Instance if we add it as a subtask of more than
# one parent.
task = dispatchData["batchesToTasks"].get( batch )
if task is not None :
return task
# Make a task.
nodeName = batch.node().relativeName( dispatchData["scriptNode"] )
frames = str( IECore.frameListFromList( [ int( x ) for x in batch.frames() ] ) )
task = author.Task( title = nodeName + " " + frames )
# Generate a `gaffer execute` command line suitable for
# executing the batch.
args = [
"gaffer", "execute",
"-script", dispatchData["scriptFile"],
"-nodes", nodeName,
"-frames", frames,
]
scriptContext = dispatchData["scriptNode"].context()
contextArgs = []
for entry in [ k for k in batch.context().keys() if k != "frame" and not k.startswith( "ui:" ) ] :
if entry not in scriptContext.keys() or batch.context()[entry] != scriptContext[entry] :
contextArgs.extend( [ "-" + entry, repr( batch.context()[entry] ) ] )
if contextArgs :
args.extend( [ "-context" ] + contextArgs )
# Create a Tractor command to execute that command line, and add
# it to the task.
command = author.Command( argv = args )
task.addCommand( command )
# Apply any custom dispatch settings to the command.
tractorPlug = batch.node()["dispatcher"].getChild( "tractor" )
if tractorPlug is not None :
## \todo Remove these manual substitutions once #887 is resolved.
# Note though that we will need to use `with batch.context()` to
# ensure the substitutions occur in the right context.
command.service = batch.context().substitute( tractorPlug["service"].getValue() )
command.tags = batch.context().substitute( tractorPlug["tags"].getValue() ).split()
# Remember the task for next time, and return it.
dispatchData["batchesToTasks"][batch] = task
return task
@staticmethod
def _setupPlugs( parentPlug ) :
if "tractor" in parentPlug :
return
parentPlug["tractor"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
parentPlug["tractor"]["service"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
parentPlug["tractor"]["tags"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
IECore.registerRunTimeTyped( TractorDispatcher, typeName = "GafferTractor::TractorDispatcher" )
GafferDispatch.Dispatcher.registerDispatcher( "Tractor", TractorDispatcher, TractorDispatcher._setupPlugs )
|
|
from __future__ import unicode_literals
import time
import random
import statistics
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
stats = {}
class MyClientProtocol(WebSocketClientProtocol):
def __init__(self, *args, **kwargs):
WebSocketClientProtocol.__init__(self, *args, **kwargs)
self.fingerprint = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for i in range(16))
stats[self.fingerprint] = {}
def onConnect(self, response):
self.opened = time.time()
self.sent = 0
self.last_send = None
self.received = 0
self.corrupted = 0
self.out_of_order = 0
self.latencies = []
def onOpen(self):
def hello():
if self.last_send is None:
if self.sent >= self.factory.num_messages:
self.sendClose()
return
self.last_send = time.time()
self.sendMessage(("%s:%s" % (self.sent, self.fingerprint)).encode("ascii"))
self.sent += 1
else:
# Wait for receipt of ping
pass
self.factory.reactor.callLater(1.0 / self.factory.message_rate, hello)
hello()
def onMessage(self, payload, isBinary):
# Detect receive-before-send
if self.last_send is None:
self.corrupted += 1
print("CRITICAL: Socket %s received before sending: %s" % (self.fingerprint, payload))
return
num, fingerprint = payload.decode("ascii").split(":")
if fingerprint != self.fingerprint:
self.corrupted += 1
try:
if int(num) != self.received:
self.out_of_order += 1
except ValueError:
self.corrupted += 1
self.latencies.append(time.time() - self.last_send)
self.received += 1
self.last_send = None
def onClose(self, wasClean, code, reason):
if hasattr(self, "sent"):
stats[self.fingerprint] = {
"sent": self.sent,
"received": self.received,
"corrupted": self.corrupted,
"out_of_order": self.out_of_order,
"latencies": self.latencies,
"connect": True,
}
else:
stats[self.fingerprint] = {
"sent": 0,
"received": 0,
"corrupted": 0,
"out_of_order": 0,
"connect": False,
}
class Benchmarker(object):
"""
Performs benchmarks against WebSockets.
"""
def __init__(self, url, num, concurrency, rate, messages, spawn):
self.url = url
self.num = num
self.concurrency = concurrency
self.rate = rate
self.spawn = spawn
self.messages = messages
self.factory = WebSocketClientFactory(
args.url,
)
self.factory.protocol = MyClientProtocol
self.factory.num_messages = self.messages
self.factory.message_rate = self.rate
def loop(self):
self.spawn_loop()
self.progress_loop()
def spawn_loop(self):
self.spawn_connections()
reactor.callLater(0.1, self.spawn_loop)
def progress_loop(self):
self.print_progress()
reactor.callLater(1, self.progress_loop)
def spawn_connections(self):
# Stop spawning if we did the right total number
max_to_spawn = self.num - len(stats)
if max_to_spawn <= 0:
return
# Don't spawn too many at once
max_to_spawn = min(max_to_spawn, int(self.spawn / 10.0))
# Decode connection args
host, port = self.url.split("://")[1].split(":")
port = int(port)
# Only spawn enough to get up to concurrency
open_protocols = len([x for x in stats.values() if not x])
to_spawn = min(max(self.concurrency - open_protocols, 0), max_to_spawn)
for _ in range(to_spawn):
reactor.connectTCP(host, port, self.factory)
def print_progress(self):
open_protocols = len([x for x in stats.values() if not x])
print("%s open, %s total" % (
open_protocols,
len(stats),
))
if open_protocols == 0 and len(stats) >= self.num:
reactor.stop()
self.print_stats()
def percentile(self, values, fraction):
"""
Returns a percentile value (e.g. fraction = 0.95 -> 95th percentile)
"""
values = sorted(values)
stopat = int(len(values) * fraction)
if stopat == len(values):
stopat -= 1
return values[stopat]
def print_stats(self):
# Collect stats together
latencies = []
num_good = 0
num_incomplete = 0
num_failed = 0
num_corruption = 0
num_out_of_order = 0
for entry in stats.values():
latencies.extend(entry.get("latencies", []))
if not entry['connect']:
num_failed += 1
elif entry['sent'] != entry['received']:
num_incomplete += 1
elif entry['corrupted']:
num_corruption += 1
elif entry['out_of_order']:
num_out_of_order += 1
else:
num_good += 1
if latencies:
# Some analysis on latencies
latency_mean = statistics.mean(latencies)
latency_median = statistics.median(latencies)
latency_stdev = statistics.stdev(latencies)
latency_95 = self.percentile(latencies, 0.95)
latency_99 = self.percentile(latencies, 0.99)
# Print results
print("-------")
print("Sockets opened: %s" % len(stats))
if latencies:
print("Latency stats: Mean %.3fs Median %.3fs Stdev %.3f 95%% %.3fs 95%% %.3fs" % (
latency_mean,
latency_median,
latency_stdev,
latency_95,
latency_99,
))
print("Good sockets: %s (%.2f%%)" % (num_good, (float(num_good) / len(stats))*100))
print("Incomplete sockets: %s (%.2f%%)" % (num_incomplete, (float(num_incomplete) / len(stats))*100))
print("Corrupt sockets: %s (%.2f%%)" % (num_corruption, (float(num_corruption) / len(stats))*100))
print("Out of order sockets: %s (%.2f%%)" % (num_out_of_order, (float(num_out_of_order) / len(stats))*100))
print("Failed to connect: %s (%.2f%%)" % (num_failed, (float(num_failed) / len(stats))*100))
if __name__ == '__main__':
import sys
import argparse
from twisted.python import log
from twisted.internet import reactor
# log.startLogging(sys.stdout)
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-n", "--num", type=int, default=100, help="Total number of sockets to open")
parser.add_argument("-c", "--concurrency", type=int, default=10, help="Number of sockets to open at once")
parser.add_argument("-r", "--rate", type=float, default=1, help="Number of messages to send per socket per second")
parser.add_argument("-m", "--messages", type=int, default=5, help="Number of messages to send per socket before close")
parser.add_argument("-s", "--spawn", type=int, default=30, help="Number of sockets to spawn per second, max")
args = parser.parse_args()
benchmarker = Benchmarker(
url=args.url,
num=args.num,
concurrency=args.concurrency,
rate=args.rate,
messages=args.messages,
spawn=args.spawn,
)
benchmarker.loop()
reactor.run()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import re
import random
import logging
import threading
import datetime
import time
import six
import requests
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.utils import timezone
from django.conf import settings
from swat4tracker import celery_app as app
from tracker import models, utils, config
from tracker.signals import live_servers_detected, dead_servers_detected
logger = logging.getLogger(__name__)
class ServerQueryThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.server = kwargs.pop('server')
self.live = kwargs.pop('live') # online servers set
self.semaphore = kwargs.pop('semaphore')
super(ServerQueryThread, self).__init__(*args, **kwargs)
def run(self):
try:
self.server.query()
except:
pass
else:
# keep track of alive servers
self.live.add(self.server)
finally:
self.semaphore.release()
@app.task(bind=True, ignore_result=True, max_retries=1, default_retry_delay=60)
def fetch_server_list(self, url, pattern):
"""
Fetch a url ``url`` and parse the response contents using an uncompiled pattern ``pattern``.
A compiled pattern must yield MatchObj the following named groups: ip, port.
"""
timeout = 2
headers = {
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
try:
response = requests.get(url, headers=headers, timeout=timeout)
except Exception as exc:
logger.debug('failed to fetch %s (%s, %s)' % (url, type(exc), exc))
raise self.retry(exc=exc)
# compile the pattern
pattern = re.compile(pattern, flags=re.M)
for match in pattern.finditer(response.text):
try:
obj, created = models.Server.objects.get_or_create(
ip=match.group('addr'),
port=match.group('port'),
defaults={'enabled': True, 'listed': True}
)
except Exception as e:
logger.debug('failed to add %s:%s (%s, %s)'
% (match.group('addr'), match.group('port'), type(e), e)
)
else:
# relist an existing server
if not created and obj.enabled and not obj.listed:
obj.listed = True
obj.save(update_fields=['listed'])
logger.debug('relisted %s' % obj)
if created:
logger.debug('added %s:%s from %s' % (match.group('addr'), match.group('port'), url))
@app.task(ignore_result=True)
def update_server_list():
"""
Fetch the URLs defined in config.SERVER_URLS
then attempt to parse the result for ip:port pairs.
"""
for url, pattern in config.SERVER_LIST_URLS:
fetch_server_list.apply_async(kwargs=locals())
@app.task(ignore_result=True, expires=5)
def query_listed_servers(time_delta, interval):
"""
Attempt to query listed servers every ``interval`` for a total of ``time``.
Args:
time_delta - execution time (seconds/timedelta obj)
interval - time between query cycles (seconds/timedelta obj)
"""
# list of available servers
servers_listed = set()
# list of servers that have responded to a query
servers_live = set()
# enforce max number of concurrently running threads
semaphore = threading.Semaphore(config.MAX_STATUS_CONNECTIONS)
interval = utils.force_timedelta(interval).seconds
# calculate the time the task must be stopped at
stop_time = time.time() + utils.force_timedelta(time_delta).seconds
# cache queryset
servers = models.Server.objects.listed()
while time.time() < stop_time:
threads = []
for server in servers:
# keep track of servers being queried
servers_listed.add(server)
semaphore.acquire()
thread = ServerQueryThread(server=server, semaphore=semaphore, live=servers_live)
threads.append(thread)
# queue the thread
thread.start()
# block untill all threads finished
[thread.join() for thread in threads]
# sleep for a while
time.sleep(interval)
# servers that have replied at least once
live_servers_detected.send(sender=None, servers=servers_live)
# servers that have never replied by the end of the task
dead_servers_detected.send(sender=None, servers=servers_listed-servers_live)
@app.task(ignore_result=True)
def update_popular(time_delta):
"""
Update the profile popular fields such as name, country, loadout, etc
that belong to players who have played just now or ``time_delta`` ago.
Args:
time_delta - time in past relative to the current time (seconds/timedelta obj)
"""
min_date = timezone.now() - utils.force_timedelta(time_delta)
queryset = (
models.Profile.objects
.select_for_update()
.select_related('game_last')
.filter(game_last__date_finished__gte=min_date)
)
with transaction.atomic():
# update the popular fields
for profile in queryset:
profile.update_popular()
profile.save()
@app.task(ignore_result=True)
def update_ranks(time_delta):
"""
Update Rank entries that belong to players who have played just now or ``time_delta`` ago.
Args:
time_delta - time in past relative to the current time (seconds/timedelta obj)
"""
min_date = timezone.now() - utils.force_timedelta(time_delta)
queryset = (
models.Profile.objects
.popular()
.select_related('game_last')
.filter(game_last__date_finished__gte=min_date)
)
for profile in queryset:
# aggregate stats relative to the last game's date
year = profile.last_seen.year
period = models.Rank.get_period_for_year(year)
with transaction.atomic():
# aggregate stats for the specified period
stats = profile.aggregate_mode_stats(models.Profile.SET_STATS_ALL, *period)
models.Rank.objects.store_many(stats, year, profile)
@app.task(ignore_result=True)
def update_positions(*args):
"""
Rank up year specific leaderboards.
Args:
*args - years
A zero or a negative value is considered a relative year to the current year
Suppose 2014 is the current year, then 0 is 2014, -1 is 2013 and so on
"""
years = []
current_year = timezone.now().year
for arg in args:
# relative to the current year (0, -1, -2)
if arg <= 0:
years.append(arg + current_year)
# year as is (2013, 2014, 2015)
else:
years.append(arg)
# use the current year as fallback
if not years:
years.append(current_year)
# rank up all leaderboard entries for every listed year
for year in years:
models.Rank.objects.rank(year)
@app.task(ignore_result=True)
def update_server_country(pk):
"""
Detect and update the server's country.
"""
obj = models.Server.objects.get(pk=pk)
isp, created = models.ISP.objects.match_or_create(obj.ip)
try:
if not isp.country:
raise AssertionError
# country is either empty or the isp is None
except (AssertionError, AttributeError):
pass
else:
models.Server.objects.filter(pk=pk).update(country=isp.country)
|
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = [
"setup_plotting",
"test_emcee_functions",
"test_dynesty_functions",
"test_pymc3_model",
"Angle",
]
import sys
import traceback
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as tt
import pymc3 as pm
import pymc3.distributions.transforms as tr
from pymc3.distributions import generate_samples
import emcee
if not emcee.__version__.startswith("3"):
raise ImportError(
"For emcee, version 3.0 or greater is needed. "
"You can install that using: "
"'pip install emcee==3.0rc2'"
)
def setup_plotting():
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["mathtext.fontset"] = "custom"
def emcee_loglike_ref(params, x, y):
bperp, theta, logs = params
m = np.tan(theta)
b = bperp / np.cos(theta)
model = m * x + b
return -0.5 * np.sum((y - model) ** 2 / np.exp(2 * logs) + 2 * logs)
def dynesty_prior_transform_ref(u):
bperp = -10 + 20 * u[0]
theta = -0.5 * np.pi + np.pi * u[1]
logs = -10 + 20 * u[2]
return np.array([bperp, theta, logs])
def test_emcee_get_params(emcee_get_params_impl):
for m, b, logs in product(
[-3.5, 5.0, 0.0], [-9.7, 0.0, 1e-4, 5.0], [0.5, -2.0, 5.0]
):
theta = np.arctan(m)
bperp = b * np.cos(theta)
mtest, btest, logstest = emcee_get_params_impl(
np.array([bperp, theta, logs])
)
if not np.allclose(m, mtest):
raise ValueError("Incorrect m calculation")
if not np.allclose(b, btest):
raise ValueError("Incorrect b calculation")
if not np.allclose(logs, logstest):
raise ValueError("Incorrect logs calculation")
def test_emcee_logprior(emcee_logprior_impl):
ref = emcee_logprior_impl(np.zeros(3))
for i, name in enumerate(["bperp", "theta", "logs"]):
coord = np.zeros(3)
for delta in [0.1, -1.05]:
coord[i] = delta
val = emcee_logprior_impl(coord)
if not np.allclose(ref, val):
raise ValueError("Incorrect logprior for {0}".format(name))
for delta in [11.1, -15.05]:
coord[i] = delta
val = emcee_logprior_impl(coord)
if not (np.isinf(val) and val < 0):
raise ValueError(
(
"logprior for {0} should return -np.inf "
"outside of bounds"
).format(name)
)
val = emcee_logprior_impl(np.array([12.0, 0.5 * np.pi + 0.1, -11.6]))
if not (np.isinf(val) and val < 0):
raise ValueError("logprior should return -np.inf outside of bounds")
def test_emcee_loglike(emcee_loglike_impl, x, y):
ref_zero = emcee_loglike_ref(np.zeros(3), x, y)
impl_zero = emcee_loglike_impl(np.zeros(3))
for m, b, logs in product(
[-3.5, 5.0, 0.0], [-9.7, 0.0, 1e-4, 5.0], [0.5, -2.0, 5.0]
):
theta = np.arctan(m)
bperp = b * np.cos(theta)
coords = np.array([bperp, theta, logs])
ref_val = emcee_loglike_ref(coords, x, y)
impl_val = emcee_loglike_impl(coords)
if not (
np.isfinite(impl_val)
and np.allclose(ref_val - ref_zero, impl_val - impl_zero)
):
raise ValueError(
"Incorrect loglike for parameters: {0}".format(coords)
)
def test_emcee_logprob(emcee_logprob_impl, x, y):
ref_zero = emcee_loglike_ref(np.zeros(3), x, y)
impl_zero = emcee_logprob_impl(np.zeros(3))
for m, b, logs in product(
[-3.5, 5.0, 0.0], [-9.7, 0.0, 1e-4, 5.0], [0.5, -2.0, 5.0]
):
theta = np.arctan(m)
bperp = b * np.cos(theta)
coords = np.array([bperp, theta, logs])
ref_val = emcee_loglike_ref(coords, x, y)
impl_val = emcee_logprob_impl(coords)
if not (
np.isfinite(impl_val)
and np.allclose(ref_val - ref_zero, impl_val - impl_zero)
):
raise ValueError(
"Incorrect logprob for parameters: {0}".format(coords)
)
impl_val = emcee_logprob_impl(np.array([-12.0, 0.1, 5.6]))
if not (np.isinf(impl_val) and impl_val < 0):
raise ValueError("logprob should return -np.inf outside of bounds")
def test_dynesty_prior_transform(dynesty_prior_transform_impl):
for u in product(
np.linspace(0, 1, 5), np.linspace(0, 1, 10), np.linspace(0, 1, 12)
):
ref = dynesty_prior_transform_ref(np.array(u))
impl = dynesty_prior_transform_impl(np.array(u))
if not np.allclose(ref, impl):
raise ValueError(
"Invalid prior_transform for coordinates: {0}".format(u)
)
def test_function(name, test_func, *args):
sys.stderr.write("Testing '{0}'... ".format(name))
try:
test_func(*args)
except Exception:
sys.stderr.write("FAILED with the following error:\n")
traceback.print_exc()
else:
sys.stderr.write("PASSED! :)\n")
def test_emcee_functions(
emcee_get_params_impl,
emcee_logprior_impl,
emcee_loglike_impl,
emcee_logprob_impl,
x,
y,
):
test_function(
"emcee_get_params", test_emcee_get_params, emcee_get_params_impl
)
test_function("emcee_logprior", test_emcee_logprior, emcee_logprior_impl)
test_function(
"emcee_loglike", test_emcee_loglike, emcee_loglike_impl, x, y
)
test_function(
"emcee_logprob", test_emcee_logprob, emcee_logprob_impl, x, y
)
def test_dynesty_functions(
dynesty_get_params_impl,
dynesty_prior_transform_impl,
dynesty_loglike_impl,
x,
y,
):
test_function(
"dynesty_get_params", test_emcee_get_params, dynesty_get_params_impl
)
test_function(
"dynesty_prior_transform",
test_dynesty_prior_transform,
dynesty_prior_transform_impl,
)
test_function(
"dynesty_loglike", test_emcee_loglike, dynesty_loglike_impl, x, y
)
def get_args_for_theano_function(point=None, model=None):
model = pm.modelcontext(model)
if point is None:
point = model.test_point
return [point[k.name] for k in model.vars]
def get_theano_function_for_var(var, model=None, **kwargs):
model = pm.modelcontext(model)
kwargs["on_unused_input"] = kwargs.get("on_unused_input", "ignore")
return theano.function(model.vars, var, **kwargs)
def _test_pymc3_model(pymc3_model, x, y):
named_vars = pymc3_model.named_vars
for name in ["bperp", "theta", "logs"]:
if name not in named_vars:
raise ValueError("Variable {0} missing from model".format(name))
if name + "_interval__" not in named_vars:
raise ValueError(
"Variable {0} should be a pm.Uniform distribution".format(name)
)
for name in ["m", "b"]:
if name not in named_vars:
raise ValueError(
"pm.Deterministic variable {0} missing from model".format(name)
)
observed = []
for k, v in named_vars.items():
if isinstance(v, pm.model.ObservedRV):
observed.append((k, v))
if len(observed) != 1:
raise ValueError(
"There should be exactly one observed variable, not {0}".format(
len(observed)
)
)
with pm.Model() as pymc3_model_ref:
bperp = pm.Uniform("bperp", lower=-10, upper=10)
theta = pm.Uniform("theta", lower=-0.5 * np.pi, upper=0.5 * np.pi)
logs = pm.Uniform("logs", lower=-10, upper=10)
m = pm.Deterministic("m", pm.math.tan(theta))
b = pm.Deterministic("b", bperp / pm.math.cos(theta))
model = m * x + b
pm.Normal("loglike", mu=model, sd=pm.math.exp(logs), observed=y)
varlist_ref = pymc3_model_ref.unobserved_RVs
names_ref = [v.name for v in varlist_ref]
func_ref = get_theano_function_for_var(varlist_ref, model=pymc3_model_ref)
varlist_impl = pymc3_model.unobserved_RVs
names_impl = [v.name for v in varlist_impl]
func_impl = get_theano_function_for_var(varlist_impl, model=pymc3_model)
for vec in product(
np.linspace(-100, 100, 5),
np.linspace(-100, 100, 10),
np.linspace(-100, 100, 12),
):
args_ref = get_args_for_theano_function(
{
"bperp_interval__": vec[0],
"theta_interval__": vec[1],
"logs_interval__": vec[2],
},
model=pymc3_model_ref,
)
args_impl = get_args_for_theano_function(
{
"bperp_interval__": vec[0],
"theta_interval__": vec[1],
"logs_interval__": vec[2],
},
model=pymc3_model,
)
ref = dict(zip(names_ref, func_ref(*args_ref)))
impl = dict(zip(names_impl, func_impl(*args_impl)))
for k, v in ref.items():
if k not in impl:
raise ValueError("Parameter {0} not in model".format(k))
if not np.allclose(v, impl[k]):
raise ValueError(
"Invalid calculation of parameter {0}".format(k)
)
def test_pymc3_model(pymc3_model, x, y):
test_function("pymc3_model", _test_pymc3_model, pymc3_model, x, y)
class AngleTransform(tr.Transform):
"""Reference: exoplanet.dfm.io"""
name = "angle"
def __init__(self, *args, **kwargs):
self.regularized = kwargs.pop("regularized", 10.0)
super(AngleTransform, self).__init__(*args, **kwargs)
def backward(self, y):
return tt.arctan2(y[0], y[1])
def forward(self, x):
return tt.concatenate(
(tt.shape_padleft(tt.sin(x)), tt.shape_padleft(tt.cos(x))), axis=0
)
def forward_val(self, x, point=None):
return np.array([np.sin(x), np.cos(x)])
def jacobian_det(self, y):
sm = tt.sum(tt.square(y), axis=0)
if self.regularized is not None:
return self.regularized * tt.log(sm) - 0.5 * sm
return -0.5 * sm
class Angle(pm.Continuous):
"""An angle constrained to be in the range -pi to pi
The actual sampling is performed in the two dimensional vector space
``(sin(theta), cos(theta))`` so that the sampler doesn't see a
discontinuity at pi.
"""
def __init__(self, *args, **kwargs):
transform = kwargs.pop("transform", None)
if transform is None:
if "regularized" in kwargs:
transform = AngleTransform(
regularized=kwargs.pop("regularized")
)
else:
transform = AngleTransform()
kwargs["transform"] = transform
shape = kwargs.get("shape", None)
if shape is None:
testval = 0.0
else:
testval = np.zeros(shape)
kwargs["testval"] = kwargs.pop("testval", testval)
super(Angle, self).__init__(*args, **kwargs)
def _random(self, size=None):
return np.random.uniform(-np.pi, np.pi, size)
def random(self, point=None, size=None):
return generate_samples(
self._random,
dist_shape=self.shape,
broadcast_shape=self.shape,
size=size,
)
def logp(self, value):
return tt.zeros_like(tt.as_tensor_variable(value))
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for XML reports from coverage.py."""
import os
import re
import coverage
from tests.coveragetest import CoverageTest
class XmlTestHelpers(CoverageTest):
"""Methods to use from XML tests."""
def run_mycode(self):
"""Run mycode.py, so we can report on it."""
self.make_file("mycode.py", "print('hello')\n")
self.run_command("coverage run mycode.py")
def run_doit(self):
"""Construct a simple sub-package."""
self.make_file("sub/__init__.py")
self.make_file("sub/doit.py", "print('doit!')")
self.make_file("main.py", "import sub.doit")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
return cov
def make_tree(self, width, depth, curdir="."):
"""Make a tree of packages.
Makes `width` directories, named d0 .. d{width-1}. Each directory has
__init__.py, and `width` files, named f0.py .. f{width-1}.py. Each
directory also has `width` sub-directories, in the same fashion, until
a depth of `depth` is reached.
"""
if depth == 0:
return
def here(p):
"""A path for `p` in our currently interesting directory."""
return os.path.join(curdir, p)
for i in range(width):
next_dir = here("d{0}".format(i))
self.make_tree(width, depth-1, next_dir)
if curdir != ".":
self.make_file(here("__init__.py"), "")
for i in range(width):
filename = here("f{0}.py".format(i))
self.make_file(filename, "# {0}\n".format(filename))
class XmlReportTest(XmlTestHelpers, CoverageTest):
"""Tests of the XML reports from coverage.py."""
def test_default_file_placement(self):
self.run_mycode()
self.run_command("coverage xml")
self.assert_exists("coverage.xml")
def test_argument_affects_xml_placement(self):
self.run_mycode()
self.run_command("coverage xml -o put_it_there.xml")
self.assert_doesnt_exist("coverage.xml")
self.assert_exists("put_it_there.xml")
def test_config_file_directory_does_not_exist(self):
self.run_mycode()
self.run_command("coverage xml -o nonexistent/put_it_there.xml")
self.assert_doesnt_exist("coverage.xml")
self.assert_doesnt_exist("put_it_there.xml")
self.assert_exists("nonexistent/put_it_there.xml")
def test_config_affects_xml_placement(self):
self.run_mycode()
self.make_file(".coveragerc", "[xml]\noutput = xml.out\n")
self.run_command("coverage xml")
self.assert_doesnt_exist("coverage.xml")
self.assert_exists("xml.out")
def test_no_data(self):
# https://bitbucket.org/ned/coveragepy/issue/210
self.run_command("coverage xml")
self.assert_doesnt_exist("coverage.xml")
def test_no_source(self):
# Written while investigating a bug, might as well keep it.
# https://bitbucket.org/ned/coveragepy/issue/208
self.make_file("innocuous.py", "a = 4")
cov = coverage.Coverage()
self.start_import_stop(cov, "innocuous")
os.remove("innocuous.py")
cov.xml_report(ignore_errors=True)
self.assert_exists("coverage.xml")
def test_filename_format_showing_everything(self):
cov = self.run_doit()
cov.xml_report(outfile="-")
xml = self.stdout()
doit_line = re_line(xml, "class.*doit")
self.assertIn('filename="sub/doit.py"', doit_line)
def test_filename_format_including_filename(self):
cov = self.run_doit()
cov.xml_report(["sub/doit.py"], outfile="-")
xml = self.stdout()
doit_line = re_line(xml, "class.*doit")
self.assertIn('filename="sub/doit.py"', doit_line)
def test_filename_format_including_module(self):
cov = self.run_doit()
import sub.doit # pylint: disable=import-error
cov.xml_report([sub.doit], outfile="-")
xml = self.stdout()
doit_line = re_line(xml, "class.*doit")
self.assertIn('filename="sub/doit.py"', doit_line)
def test_reporting_on_nothing(self):
# Used to raise a zero division error:
# https://bitbucket.org/ned/coveragepy/issue/250
self.make_file("empty.py", "")
cov = coverage.Coverage()
empty = self.start_import_stop(cov, "empty")
cov.xml_report([empty], outfile="-")
xml = self.stdout()
empty_line = re_line(xml, "class.*empty")
self.assertIn('filename="empty.py"', empty_line)
self.assertIn('line-rate="1"', empty_line)
def test_empty_file_is_100_not_0(self):
# https://bitbucket.org/ned/coveragepy/issue/345
cov = self.run_doit()
cov.xml_report(outfile="-")
xml = self.stdout()
init_line = re_line(xml, 'filename="sub/__init__.py"')
self.assertIn('line-rate="1"', init_line)
class XmlPackageStructureTest(XmlTestHelpers, CoverageTest):
"""Tests about the package structure reported in the coverage.xml file."""
def package_and_class_tags(self, cov):
"""Run an XML report on `cov`, and get the package and class tags."""
self.captured_stdout.truncate(0)
cov.xml_report(outfile="-")
packages_and_classes = re_lines(self.stdout(), r"<package |<class ")
scrubs = r' branch-rate="0"| complexity="0"| line-rate="[\d.]+"'
return clean("".join(packages_and_classes), scrubs)
def assert_package_and_class_tags(self, cov, result):
"""Check the XML package and class tags from `cov` match `result`."""
self.assertMultiLineEqual(
self.package_and_class_tags(cov),
clean(result)
)
def test_package_names(self):
self.make_tree(width=1, depth=3)
self.make_file("main.py", """\
from d0.d0 import f0
""")
cov = coverage.Coverage(source=".")
self.start_import_stop(cov, "main")
self.assert_package_and_class_tags(cov, """\
<package name=".">
<class filename="main.py" name="main.py">
<package name="d0">
<class filename="d0/__init__.py" name="__init__.py">
<class filename="d0/f0.py" name="f0.py">
<package name="d0.d0">
<class filename="d0/d0/__init__.py" name="__init__.py">
<class filename="d0/d0/f0.py" name="f0.py">
""")
def test_package_depth(self):
self.make_tree(width=1, depth=4)
self.make_file("main.py", """\
from d0.d0 import f0
""")
cov = coverage.Coverage(source=".")
self.start_import_stop(cov, "main")
cov.config["xml:package_depth"] = 1
self.assert_package_and_class_tags(cov, """\
<package name=".">
<class filename="main.py" name="main.py">
<package name="d0">
<class filename="d0/__init__.py" name="__init__.py">
<class filename="d0/d0/__init__.py" name="d0/__init__.py">
<class filename="d0/d0/d0/__init__.py" name="d0/d0/__init__.py">
<class filename="d0/d0/d0/f0.py" name="d0/d0/f0.py">
<class filename="d0/d0/f0.py" name="d0/f0.py">
<class filename="d0/f0.py" name="f0.py">
""")
cov.config["xml:package_depth"] = 2
self.assert_package_and_class_tags(cov, """\
<package name=".">
<class filename="main.py" name="main.py">
<package name="d0">
<class filename="d0/__init__.py" name="__init__.py">
<class filename="d0/f0.py" name="f0.py">
<package name="d0.d0">
<class filename="d0/d0/__init__.py" name="__init__.py">
<class filename="d0/d0/d0/__init__.py" name="d0/__init__.py">
<class filename="d0/d0/d0/f0.py" name="d0/f0.py">
<class filename="d0/d0/f0.py" name="f0.py">
""")
cov.config["xml:package_depth"] = 3
self.assert_package_and_class_tags(cov, """\
<package name=".">
<class filename="main.py" name="main.py">
<package name="d0">
<class filename="d0/__init__.py" name="__init__.py">
<class filename="d0/f0.py" name="f0.py">
<package name="d0.d0">
<class filename="d0/d0/__init__.py" name="__init__.py">
<class filename="d0/d0/f0.py" name="f0.py">
<package name="d0.d0.d0">
<class filename="d0/d0/d0/__init__.py" name="__init__.py">
<class filename="d0/d0/d0/f0.py" name="f0.py">
""")
def re_lines(text, pat):
"""Return a list of lines that match `pat` in the string `text`."""
lines = [l for l in text.splitlines(True) if re.search(pat, l)]
return lines
def re_line(text, pat):
"""Return the one line in `text` that matches regex `pat`."""
lines = re_lines(text, pat)
assert len(lines) == 1
return lines[0]
def clean(text, scrub=None):
"""Clean text to prepare it for comparison.
Remove text matching `scrub`, and leading whitespace. Convert backslashes
to forward slashes.
"""
if scrub:
text = re.sub(scrub, "", text)
text = re.sub(r"(?m)^\s+", "", text)
text = re.sub(r"\\", "/", text)
return text
|
|
# -*- coding: utf-8 -*-
"""
Tests for the function sta.spike_triggered_average
:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import math
import numpy as np
import scipy
from numpy.testing import assert_array_equal
from numpy.testing.utils import assert_array_almost_equal
from neo import AnalogSignalArray, SpikeTrain
from elephant.conversion import BinnedSpikeTrain
import quantities as pq
from quantities import ms, mV, Hz
import elephant.sta as sta
import warnings
class sta_TestCase(unittest.TestCase):
def setUp(self):
self.asiga0 = AnalogSignalArray(np.array([
np.sin(np.arange(0, 20 * math.pi, 0.1))]).T,
units='mV', sampling_rate=10 / ms)
self.asiga1 = AnalogSignalArray(np.array([
np.sin(np.arange(0, 20 * math.pi, 0.1)),
np.cos(np.arange(0, 20 * math.pi, 0.1))]).T,
units='mV', sampling_rate=10 / ms)
self.asiga2 = AnalogSignalArray(np.array([
np.sin(np.arange(0, 20 * math.pi, 0.1)),
np.cos(np.arange(0, 20 * math.pi, 0.1)),
np.tan(np.arange(0, 20 * math.pi, 0.1))]).T,
units='mV', sampling_rate=10 / ms)
self.st0 = SpikeTrain(
[9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi],
units='ms', t_stop=self.asiga0.t_stop)
self.lst = [SpikeTrain(
[9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi],
units='ms', t_stop=self.asiga1.t_stop),
SpikeTrain([30, 35, 40], units='ms', t_stop=self.asiga1.t_stop)]
#***********************************************************************
#************************ Test for typical values **********************
def test_spike_triggered_average_with_n_spikes_on_constant_function(self):
'''Signal should average to the input'''
const = 13.8
x = const * np.ones(201)
asiga = AnalogSignalArray(
np.array([x]).T, units='mV', sampling_rate=10 / ms)
st = SpikeTrain([3, 5.6, 7, 7.1, 16, 16.3], units='ms', t_stop=20)
window_starttime = -2 * ms
window_endtime = 2 * ms
STA = sta.spike_triggered_average(
asiga, st, (window_starttime, window_endtime))
a = int(((window_endtime - window_starttime) *
asiga.sampling_rate).simplified)
cutout = asiga[0: a]
cutout.t_start = window_starttime
assert_array_almost_equal(STA, cutout, 12)
def test_spike_triggered_average_with_shifted_sin_wave(self):
'''Signal should average to zero'''
STA = sta.spike_triggered_average(
self.asiga0, self.st0, (-4 * ms, 4 * ms))
target = 5e-2 * mV
self.assertEqual(np.abs(STA).max().dimensionality.simplified,
pq.Quantity(1, "V").dimensionality.simplified)
self.assertLess(np.abs(STA).max(), target)
def test_only_one_spike(self):
'''The output should be the same as the input'''
x = np.arange(0, 20, 0.1)
y = x**2
sr = 10 / ms
z = AnalogSignalArray(np.array([y]).T, units='mV', sampling_rate=sr)
spiketime = 8 * ms
spiketime_in_ms = int((spiketime / ms).simplified)
st = SpikeTrain([spiketime_in_ms], units='ms', t_stop=20)
window_starttime = -3 * ms
window_endtime = 5 * ms
STA = sta.spike_triggered_average(
z, st, (window_starttime, window_endtime))
cutout = z[int(((spiketime + window_starttime) * sr).simplified):
int(((spiketime + window_endtime) * sr).simplified)]
cutout.t_start = window_starttime
assert_array_equal(STA, cutout)
def test_usage_of_spikes(self):
st = SpikeTrain([16.5 * math.pi, 17.5 * math.pi,
18.5 * math.pi, 19.5 * math.pi], units='ms', t_stop=20 * math.pi)
STA = sta.spike_triggered_average(
self.asiga0, st, (-math.pi * ms, math.pi * ms))
self.assertEqual(STA.annotations['used_spikes'], 3)
self.assertEqual(STA.annotations['unused_spikes'], 1)
#***********************************************************************
#**** Test for an invalid value, to check that the function raises *****
#********* an exception or returns an error code ***********************
def test_analog_signal_of_wrong_type(self):
'''Analog signal given as list, but must be AnalogSignalArray'''
asiga = [0, 1, 2, 3, 4]
self.assertRaises(TypeError, sta.spike_triggered_average,
asiga, self.st0, (-2 * ms, 2 * ms))
def test_spiketrain_of_list_type_in_wrong_sense(self):
st = [10, 11, 12]
self.assertRaises(TypeError, sta.spike_triggered_average,
self.asiga0, st, (1 * ms, 2 * ms))
def test_spiketrain_of_nonlist_and_nonspiketrain_type(self):
st = (10, 11, 12)
self.assertRaises(TypeError, sta.spike_triggered_average,
self.asiga0, st, (1 * ms, 2 * ms))
def test_forgotten_AnalogSignalArray_argument(self):
self.assertRaises(TypeError, sta.spike_triggered_average,
self.st0, (-2 * ms, 2 * ms))
def test_one_smaller_nrspiketrains_smaller_nranalogsignals(self):
'''Number of spiketrains between 1 and number of analogsignals'''
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga2, self.lst, (-2 * ms, 2 * ms))
def test_more_spiketrains_than_analogsignals_forbidden(self):
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga0, self.lst, (-2 * ms, 2 * ms))
def test_spike_earlier_than_analogsignal(self):
st = SpikeTrain([-1 * math.pi, 2 * math.pi],
units='ms', t_start=-2 * math.pi, t_stop=20 * math.pi)
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga0, st, (-2 * ms, 2 * ms))
def test_spike_later_than_analogsignal(self):
st = SpikeTrain(
[math.pi, 21 * math.pi], units='ms', t_stop=25 * math.pi)
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga0, st, (-2 * ms, 2 * ms))
def test_impossible_window(self):
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga0, self.st0, (-2 * ms, -5 * ms))
def test_window_larger_than_signal(self):
self.assertRaises(ValueError, sta.spike_triggered_average,
self.asiga0, self.st0, (-15 * math.pi * ms, 15 * math.pi * ms))
def test_wrong_window_starttime_unit(self):
self.assertRaises(TypeError, sta.spike_triggered_average,
self.asiga0, self.st0, (-2 * mV, 2 * ms))
def test_wrong_window_endtime_unit(self):
self.assertRaises(TypeError, sta.spike_triggered_average,
self.asiga0, self.st0, (-2 * ms, 2 * Hz))
def test_window_borders_as_complex_numbers(self):
self.assertRaises(TypeError, sta.spike_triggered_average, self.asiga0,
self.st0, ((-2 * math.pi + 3j) * ms, (2 * math.pi + 3j) * ms))
#***********************************************************************
#**** Test for an empty value (where the argument is a list, array, ****
#********* vector or other container datatype). ************************
def test_empty_analogsignal(self):
asiga = AnalogSignalArray([], units='mV', sampling_rate=10 / ms)
st = SpikeTrain([5], units='ms', t_stop=10)
self.assertRaises(ValueError, sta.spike_triggered_average,
asiga, st, (-1 * ms, 1 * ms))
def test_one_spiketrain_empty(self):
'''Test for one empty SpikeTrain, but existing spikes in other'''
st = [SpikeTrain(
[9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi],
units='ms', t_stop=self.asiga1.t_stop),
SpikeTrain([], units='ms', t_stop=self.asiga1.t_stop)]
STA = sta.spike_triggered_average(self.asiga1, st, (-1 * ms, 1 * ms))
cmp_array = AnalogSignalArray(np.array([np.zeros(20, dtype=float)]).T,
units='mV', sampling_rate=10 / ms)
cmp_array = cmp_array / 0.
cmp_array.t_start = -1 * ms
assert_array_equal(STA[:, 1], cmp_array[:, 0])
def test_all_spiketrains_empty(self):
st = SpikeTrain([], units='ms', t_stop=self.asiga1.t_stop)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger warnings.
STA = sta.spike_triggered_average(
self.asiga1, st, (-1 * ms, 1 * ms))
self.assertEqual("No spike at all was either found or used "
"for averaging", str(w[-1].message))
nan_array = np.empty(20)
nan_array.fill(np.nan)
cmp_array = AnalogSignalArray(np.array([nan_array, nan_array]).T,
units='mV', sampling_rate=10 / ms)
assert_array_equal(STA, cmp_array)
# =========================================================================
# Tests for new scipy verison (with scipy.signal.coherence)
# =========================================================================
@unittest.skipIf(not hasattr(scipy.signal, 'coherence'), "Please update scipy "
"to a version >= 0.16")
class sfc_TestCase_new_scipy(unittest.TestCase):
def setUp(self):
# standard testsignals
tlen0 = 100 * pq.s
f0 = 20. * pq.Hz
fs0 = 1 * pq.ms
t0 = np.arange(
0, tlen0.rescale(pq.s).magnitude,
fs0.rescale(pq.s).magnitude) * pq.s
self.anasig0 = AnalogSignalArray(
np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
self.st0 = SpikeTrain(
np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
t_start=0 * pq.ms, t_stop=tlen0)
self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0)
# shortened analogsignals
self.anasig1 = self.anasig0.time_slice(1 * pq.s, None)
self.anasig2 = self.anasig0.time_slice(None, 99 * pq.s)
# increased sampling frequency
fs1 = 0.1 * pq.ms
self.anasig3 = AnalogSignalArray(
np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
units=pq.mV, t_start=0 * pq.ms, sampling_period=fs1)
self.bst1 = BinnedSpikeTrain(
self.st0.time_slice(self.anasig3.t_start, self.anasig3.t_stop),
binsize=fs1)
# analogsignal containing multiple traces
self.anasig4 = AnalogSignalArray(
np.array([
np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
np.sin(4 * np.pi * (f0 * t0).simplified.magnitude)]).
transpose(),
units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
# shortened spike train
self.st3 = SpikeTrain(
np.arange(
(tlen0.rescale(pq.ms).magnitude * .25),
(tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
t_start=0 * pq.ms, t_stop=tlen0)
self.bst3 = BinnedSpikeTrain(self.st3, binsize=fs0)
self.st4 = SpikeTrain(np.arange(
(tlen0.rescale(pq.ms).magnitude * .25),
(tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
t_start=5 * fs0, t_stop=tlen0 - 5 * fs0)
self.bst4 = BinnedSpikeTrain(self.st4, binsize=fs0)
# spike train with incompatible binsize
self.bst5 = BinnedSpikeTrain(self.st3, binsize=fs0 * 2.)
# spike train with same binsize as the analog signal, but with
# bin edges not aligned to the time axis of the analog signal
self.bst6 = BinnedSpikeTrain(
self.st3, binsize=fs0, t_start=4.5 * fs0, t_stop=tlen0 - 4.5 * fs0)
# =========================================================================
# Tests for correct input handling
# =========================================================================
def test_wrong_input_type(self):
self.assertRaises(TypeError,
sta.spike_field_coherence,
np.array([1, 2, 3]), self.bst0)
self.assertRaises(TypeError,
sta.spike_field_coherence,
self.anasig0, [1, 2, 3])
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig0.duplicate_with_new_array([]), self.bst0)
def test_start_stop_times_out_of_range(self):
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig1, self.bst0)
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig2, self.bst0)
def test_non_matching_input_binning(self):
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig0, self.bst1)
def test_incompatible_spiketrain_analogsignal(self):
# These spike trains have incompatible binning (binsize or alignment to
# time axis of analog signal)
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig0, self.bst5)
self.assertRaises(ValueError,
sta.spike_field_coherence,
self.anasig0, self.bst6)
def test_signal_dimensions(self):
# single analogsignal trace and single spike train
s_single, f_single = sta.spike_field_coherence(self.anasig0, self.bst0)
self.assertEqual(len(f_single.shape), 1)
self.assertEqual(len(s_single.shape), 1)
# multiple analogsignal traces and single spike train
s_multi, f_multi = sta.spike_field_coherence(self.anasig4, self.bst0)
self.assertEqual(len(f_multi.shape), 1)
self.assertEqual(len(s_multi.shape), 2)
# frequencies are identical since same sampling frequency was used
# in both cases and data length is the same
assert_array_equal(f_single, f_multi)
# coherences of s_single and first signal in s_multi are identical,
# since first analogsignal trace in anasig4 is same as in anasig0
assert_array_equal(s_single, s_multi[:, 0])
def test_non_binned_spiketrain_input(self):
s, f = sta.spike_field_coherence(self.anasig0, self.st0)
f_ind = np.where(f >= 19.)[0][0]
max_ind = np.argmax(s[1:]) + 1
self.assertEqual(f_ind, max_ind)
self.assertAlmostEqual(s[f_ind], 1., delta=0.01)
# =========================================================================
# Tests for correct return values
# =========================================================================
def test_spike_field_coherence_perfect_coherence(self):
# check for detection of 20Hz peak in anasig0/bst0
s, f = sta.spike_field_coherence(
self.anasig0, self.bst0, window='boxcar')
f_ind = np.where(f >= 19.)[0][0]
max_ind = np.argmax(s[1:]) + 1
self.assertEqual(f_ind, max_ind)
self.assertAlmostEqual(s[f_ind], 1., delta=0.01)
def test_output_frequencies(self):
nfft = 256
_, f = sta.spike_field_coherence(self.anasig3, self.bst1, nfft=nfft)
# check number of frequency samples
self.assertEqual(len(f), nfft / 2 + 1)
# check values of frequency samples
assert_array_almost_equal(
f, np.linspace(
0, self.anasig3.sampling_rate.rescale('Hz').magnitude / 2,
nfft / 2 + 1) * pq.Hz)
def test_short_spiketrain(self):
# this spike train has the same length as anasig0
s1, f1 = sta.spike_field_coherence(
self.anasig0, self.bst3, window='boxcar')
# this spike train has the same spikes as above, but is shorter than
# anasig0
s2, f2 = sta.spike_field_coherence(
self.anasig0, self.bst4, window='boxcar')
# the results above should be the same, nevertheless
assert_array_equal(s1.magnitude, s2.magnitude)
assert_array_equal(f1.magnitude, f2.magnitude)
# =========================================================================
# Tests for old scipy verison (without scipy.signal.coherence)
# =========================================================================
@unittest.skipIf(hasattr(scipy.signal, 'coherence'), 'Applies only for old '
'scipy versions (<0.16)')
class sfc_TestCase_old_scipy(unittest.TestCase):
def setUp(self):
# standard testsignals
tlen0 = 100 * pq.s
f0 = 20. * pq.Hz
fs0 = 1 * pq.ms
t0 = np.arange(
0, tlen0.rescale(pq.s).magnitude,
fs0.rescale(pq.s).magnitude) * pq.s
self.anasig0 = AnalogSignalArray(
np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
self.st0 = SpikeTrain(
np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
t_start=0 * pq.ms, t_stop=tlen0)
self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0)
def test_old_scipy_version(self):
self.assertRaises(AttributeError, sta.spike_field_coherence,
self.anasig0, self.bst0)
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools for deblending overlapping sources labeled in
a segmentation image.
"""
import warnings
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from .core import SegmentationImage
from .detect import _make_binary_structure, _detect_sources
from ..utils._convolution import _filter_data
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['deblend_sources']
def deblend_sources(data, segment_img, npixels, kernel=None, labels=None,
nlevels=32, contrast=0.001, mode='exponential',
connectivity=8, relabel=True):
"""
Deblend overlapping sources labeled in a segmentation image.
Sources are deblended using a combination of multi-thresholding and
`watershed segmentation
<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`_. In
order to deblend sources, there must be a saddle between them.
Parameters
----------
data : array_like
The data array.
segment_img : `~photutils.segmentation.SegmentationImage` or array_like (int)
A segmentation image, either as a
`~photutils.segmentation.SegmentationImage` object or an
`~numpy.ndarray`, with the same shape as ``data`` where sources
are labeled by different positive integer values. A value of
zero is reserved for the background.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
kernel : array-like or `~astropy.convolution.Kernel2D`, optional
The array of the kernel used to filter the image before
thresholding. Filtering the image will smooth the noise and
maximize detectability of objects with a shape similar to the
kernel.
labels : int or array-like of int, optional
The label numbers to deblend. If `None` (default), then all
labels in the segmentation image will be deblended.
nlevels : int, optional
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels`` levels spaced
exponentially or linearly (see the ``mode`` keyword) between its
minimum and maximum values within the source segment.
contrast : float, optional
The fraction of the total (blended) source flux that a local
peak must have (at any one of the multi-thresholds) to be
considered as a separate object. ``contrast`` must be between 0
and 1, inclusive. If ``contrast = 0`` then every local peak
will be made a separate object (maximum deblending). If
``contrast = 1`` then no deblending will occur. The default is
0.001, which will deblend sources with a 7.5 magnitude
difference.
mode : {'exponential', 'linear'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. For reference,
SourceExtractor uses 8-connected pixels.
relabel : bool
If `True` (default), then the segmentation image will be
relabeled such that the labels are in consecutive order starting
from 1.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background.
See Also
--------
:func:`photutils.segmentation.detect_sources`
"""
if not isinstance(segment_img, SegmentationImage):
segment_img = SegmentationImage(segment_img)
if segment_img.shape != data.shape:
raise ValueError('The data and segmentation image must have '
'the same shape')
if labels is None:
labels = segment_img.labels
labels = np.atleast_1d(labels)
segment_img.check_labels(labels)
if kernel is not None:
data = _filter_data(data, kernel, mode='constant', fill_value=0.0)
last_label = segment_img.max_label
segm_deblended = object.__new__(SegmentationImage)
segm_deblended._data = np.copy(segment_img.data)
for label in labels:
source_slice = segment_img.slices[segment_img.get_index(label)]
source_data = data[source_slice]
source_segm = object.__new__(SegmentationImage)
source_segm._data = np.copy(segment_img.data[source_slice])
source_segm.keep_labels(label) # include only one label
source_deblended = _deblend_source(
source_data, source_segm, npixels, nlevels=nlevels,
contrast=contrast, mode=mode, connectivity=connectivity)
if not np.array_equal(source_deblended.data.astype(bool),
source_segm.data.astype(bool)):
raise ValueError(f'Deblending failed for source "{label}". '
'Please ensure you used the same pixel '
'connectivity in detect_sources and '
'deblend_sources. If this issue persists, '
'then please inform the developers.')
if source_deblended.nlabels > 1:
source_deblended.relabel_consecutive(start_label=1)
# replace the original source with the deblended source
source_mask = (source_deblended.data > 0)
segm_tmp = segm_deblended.data
segm_tmp[source_slice][source_mask] = (
source_deblended.data[source_mask] + last_label)
segm_deblended.__dict__ = {} # reset cached properties
segm_deblended._data = segm_tmp
last_label += source_deblended.nlabels
if relabel:
segm_deblended.relabel_consecutive()
return segm_deblended
def _deblend_source(data, segment_img, npixels, nlevels=32, contrast=0.001,
mode='exponential', connectivity=8):
"""
Deblend a single labeled source.
Parameters
----------
data : array_like
The cutout data array for a single source. ``data`` should also
already be smoothed by the same filter used in
:func:`~photutils.segmentation.detect_sources`, if applicable.
segment_img : `~photutils.segmentation.SegmentationImage`
A cutout `~photutils.segmentation.SegmentationImage` object with
the same shape as ``data``. ``segment_img`` should contain only
*one* source label.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
nlevels : int, optional
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels`` levels spaced
exponentially or linearly (see the ``mode`` keyword) between its
minimum and maximum values within the source segment.
contrast : float, optional
The fraction of the total (blended) source flux that a local
peak must have (at any one of the multi-thresholds) to be
considered as a separate object. ``contrast`` must be between 0
and 1, inclusive. If ``contrast = 0`` then every local peak
will be made a separate object (maximum deblending). If
``contrast = 1`` then no deblending will occur. The default is
0.001, which will deblend sources with a 7.5 magnitude
difference.
mode : {'exponential', 'linear'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. For reference,
SourceExtractor uses 8-connected pixels.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background. Note that the
returned `SegmentationImage` may *not* have consecutive labels.
"""
from scipy.ndimage import label as ndilabel
from skimage.segmentation import watershed
if nlevels < 1:
raise ValueError(f'nlevels must be >= 1, got "{nlevels}"')
if contrast < 0 or contrast > 1:
raise ValueError(f'contrast must be >= 0 and <= 1, got "{contrast}"')
segm_mask = (segment_img.data > 0)
source_values = data[segm_mask]
source_sum = float(np.nansum(source_values))
source_min = np.nanmin(source_values)
source_max = np.nanmax(source_values)
if source_min == source_max:
return segment_img # no deblending
if mode == 'exponential' and source_min < 0:
warnings.warn(f'Source "{segment_img.labels[0]}" contains negative '
'values, setting deblending mode to "linear"',
AstropyUserWarning)
mode = 'linear'
steps = np.arange(1., nlevels + 1)
if mode == 'exponential':
if source_min == 0:
source_min = source_max * 0.01
thresholds = source_min * ((source_max / source_min) **
(steps / (nlevels + 1)))
elif mode == 'linear':
thresholds = source_min + ((source_max - source_min) /
(nlevels + 1)) * steps
else:
raise ValueError(f'"{mode}" is an invalid mode; mode must be '
'"exponential" or "linear"')
# suppress NoDetectionsWarning during deblending
warnings.filterwarnings('ignore', category=NoDetectionsWarning)
mask = ~segm_mask
segments = _detect_sources(data, thresholds, npixels=npixels,
connectivity=connectivity, mask=mask,
deblend_skip=True)
selem = _make_binary_structure(data.ndim, connectivity)
# define the sources (markers) for the watershed algorithm
nsegments = len(segments)
if nsegments == 0: # no deblending
return segment_img
else:
for i in range(nsegments - 1):
segm_lower = segments[i].data
segm_upper = segments[i + 1].data
relabel = False
# if the are more sources at the upper level, then
# remove the parent source(s) from the lower level,
# but keep any sources in the lower level that do not have
# multiple children in the upper level
for label in segments[i].labels:
mask = (segm_lower == label)
# checks for 1-to-1 label mapping n -> m (where m >= 0)
upper_labels = segm_upper[mask]
upper_labels = np.unique(upper_labels[upper_labels != 0])
if upper_labels.size >= 2:
relabel = True
segm_lower[mask] = segm_upper[mask]
if relabel:
segm_new = object.__new__(SegmentationImage)
segm_new._data = ndilabel(segm_lower, structure=selem)[0]
segments[i + 1] = segm_new
else:
segments[i + 1] = segments[i]
# Deblend using watershed. If any sources do not meet the
# contrast criterion, then remove the faintest such source and
# repeat until all sources meet the contrast criterion.
markers = segments[-1].data
mask = segment_img.data.astype(bool)
remove_marker = True
while remove_marker:
markers = watershed(-data, markers, mask=mask, connectivity=selem)
labels = np.unique(markers[markers != 0])
flux_frac = np.array([np.sum(data[markers == label])
for label in labels]) / source_sum
remove_marker = any(flux_frac < contrast)
if remove_marker:
# remove only the faintest source (one at a time)
# because several faint sources could combine to meet the
# contrast criterion
markers[markers == labels[np.argmin(flux_frac)]] = 0.
segm_new = object.__new__(SegmentationImage)
segm_new._data = markers
return segm_new
|
|
from __future__ import unicode_literals
import json
import csv
from modularodm import Q
from django.views.generic import ListView, DetailView, FormView, UpdateView
from django.views.defaults import permission_denied, bad_request
from django.core.urlresolvers import reverse
from django.http import JsonResponse, Http404, HttpResponse
from django.shortcuts import redirect
from admin.common_auth.logs import (
update_admin_log,
ACCEPT_PREREG,
REJECT_PREREG,
COMMENT_PREREG,
)
from admin.pre_reg import serializers
from admin.pre_reg.forms import DraftRegistrationForm
from admin.pre_reg.utils import sort_drafts, SORT_BY
from framework.exceptions import PermissionsError
from framework.guid.model import Guid
from website.exceptions import NodeStateError
from website.files.models import FileNode
from website.project.model import DraftRegistration, Node
from website.prereg.utils import get_prereg_schema
from website.project.metadata.schemas import from_json
from admin.base.utils import PreregAdmin
class DraftListView(PreregAdmin, ListView):
template_name = 'pre_reg/draft_list.html'
ordering = '-date'
context_object_name = 'draft'
def get_queryset(self):
query = (
Q('registration_schema', 'eq', get_prereg_schema()) &
Q('approval', 'ne', None)
)
ordering = self.get_ordering()
if 'initiator' in ordering:
return DraftRegistration.find(query).sort(ordering)
if ordering == SORT_BY['title']:
return DraftRegistration.find(query).sort(
'registration_metadata.q1.value')
if ordering == SORT_BY['n_title']:
return DraftRegistration.find(query).sort(
'-registration_metadata.q1.value')
return sort_drafts(DraftRegistration.find(query), ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'drafts': [
serializers.serialize_draft_registration(d, json_safe=False)
for d in query_set
],
'page': page,
'p': self.get_paginate_by(query_set),
'SORT_BY': SORT_BY,
'order': self.get_ordering(),
'status': self.request.GET.get('status', 'all'),
}
def get_paginate_by(self, queryset):
return int(self.request.GET.get('p', 10))
def get_paginate_orphans(self):
return int(self.get_paginate_by(None) / 11.0) + 1
def get_ordering(self):
return self.request.GET.get('order_by', self.ordering)
class DraftDownloadListView(DraftListView):
def get(self, request, *args, **kwargs):
try:
queryset = map(serializers.serialize_draft_registration,
self.get_queryset())
except AttributeError:
raise Http404('A draft was malformed.')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=prereg.csv;'
response['Cache-Control'] = 'no-cache'
keys = queryset[0].keys()
keys.remove('registration_schema')
writer = csv.DictWriter(response, fieldnames=keys)
writer.writeheader()
for draft in queryset:
draft.pop('registration_schema')
draft.update({'initiator': draft['initiator']['username']})
writer.writerow(
{k: v.encode('utf8') if isinstance(v, unicode) else v
for k, v in draft.items()}
)
return response
class DraftDetailView(PreregAdmin, DetailView):
template_name = 'pre_reg/draft_detail.html'
context_object_name = 'draft'
def get_object(self, queryset=None):
draft = DraftRegistration.load(self.kwargs.get('draft_pk'))
self.checkout_files(draft)
try:
return serializers.serialize_draft_registration(draft)
except AttributeError:
raise Http404('{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('draft_pk')
))
def checkout_files(self, draft):
prereg_user = self.request.user.osf_user
for item in get_metadata_files(draft):
item.checkout = prereg_user
item.save()
class DraftFormView(PreregAdmin, FormView):
template_name = 'pre_reg/draft_form.html'
form_class = DraftRegistrationForm
context_object_name = 'draft'
def dispatch(self, request, *args, **kwargs):
self.draft = DraftRegistration.load(self.kwargs.get('draft_pk'))
if self.draft is None:
raise Http404('{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('draft_pk')
))
return super(DraftFormView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
flags = self.draft.flags
self.initial = {
'notes': self.draft.notes,
'assignee': flags.get('assignee'),
'payment_sent': flags.get('payment_sent'),
'proof_of_publication': flags.get('proof_of_publication'),
}
return super(DraftFormView, self).get_initial()
def get_context_data(self, **kwargs):
kwargs.setdefault('draft', serializers.serialize_draft_registration(
self.draft,
json_safe=False
))
kwargs.setdefault('IMMEDIATE', serializers.IMMEDIATE)
return super(DraftFormView, self).get_context_data(**kwargs)
def form_valid(self, form):
if 'approve_reject' in form.changed_data:
osf_user = self.request.user.osf_user
try:
if form.cleaned_data.get('approve_reject') == 'approve':
flag = ACCEPT_PREREG
message = 'Approved'
self.draft.approve(osf_user)
else:
flag = REJECT_PREREG
message = 'Rejected'
self.draft.reject(osf_user)
except PermissionsError as e:
return permission_denied(self.request, e)
self.checkin_files(self.draft)
update_admin_log(self.request.user.id, self.kwargs.get('draft_pk'),
'Draft Registration', message, flag)
admin_settings = form.cleaned_data
self.draft.notes = admin_settings.get('notes', self.draft.notes)
del admin_settings['approve_reject']
del admin_settings['notes']
self.draft.flags = admin_settings
self.draft.save()
return super(DraftFormView, self).form_valid(form)
def checkin_files(self, draft):
for item in get_metadata_files(draft):
item.checkout = None
item.save()
def get_success_url(self):
return '{}?page={}'.format(reverse('pre_reg:prereg'),
self.request.POST.get('page', 1))
class CommentUpdateView(PreregAdmin, UpdateView):
context_object_name = 'draft'
def post(self, request, *args, **kwargs):
try:
data = json.loads(request.body).get('schema_data', {})
draft = DraftRegistration.load(self.kwargs.get('draft_pk'))
draft.update_metadata(data)
draft.save()
log_message = list()
for key, value in data.iteritems():
comments = data.get(key, {}).get('comments', [])
for comment in comments:
log_message.append('{}: {}'.format(key, comment['value']))
update_admin_log(
user_id=request.user.id,
object_id=draft._id,
object_repr='Draft Registration',
message='Comments: <p>{}</p>'.format('</p><p>'.join(log_message)),
action_flag=COMMENT_PREREG
)
return JsonResponse(serializers.serialize_draft_registration(draft))
except AttributeError:
raise Http404('{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('draft_pk')
))
except NodeStateError as e:
return bad_request(request, e)
def view_file(request, node_id, provider, file_id):
fp = FileNode.load(file_id)
wb_url = fp.generate_waterbutler_url()
return redirect(wb_url)
def get_metadata_files(draft):
data = draft.registration_metadata
for q, question in get_file_questions('prereg-prize.json'):
if not isinstance(data[q]['value'], dict):
for i, file_info in enumerate(data[q]['extra']):
provider = file_info['data']['provider']
if provider != 'osfstorage':
raise Http404(
'File does not exist in OSFStorage ({}: {})'.format(
q, question
))
file_guid = file_info.get('fileId')
if file_guid is None:
node = Node.load(file_info.get('nodeId'))
path = file_info['data'].get('path')
item = FileNode.resolve_class(
provider,
FileNode.FILE
).get_or_create(node, path)
file_guid = item.get_guid(create=True)._id
data[q]['extra'][i]['fileId'] = file_guid
draft.update_metadata(data)
draft.save()
else:
guid = Guid.load(file_guid)
item = guid.referent
if item is None:
raise Http404(
'File with guid "{}" in "{}" does not exist'.format(
file_guid, question
))
yield item
continue
for i, file_info in enumerate(data[q]['value']['uploader']['extra']):
provider = file_info['data']['provider']
if provider != 'osfstorage':
raise Http404(
'File does not exist in OSFStorage ({}: {})'.format(
q, question
))
file_guid = file_info.get('fileId')
if file_guid is None:
node = Node.load(file_info.get('nodeId'))
path = file_info['data'].get('path')
item = FileNode.resolve_class(
provider,
FileNode.FILE
).get_or_create(node, path)
file_guid = item.get_guid(create=True)._id
data[q]['value']['uploader']['extra'][i]['fileId'] = file_guid
draft.update_metadata(data)
draft.save()
else:
guid = Guid.load(file_guid)
item = guid.referent
if item is None:
raise Http404(
'File with guid "{}" in "{}" does not exist'.format(
file_guid, question
))
yield item
def get_file_questions(json_file):
uploader = {
'id': 'uploader',
'type': 'osf-upload',
'format': 'osf-upload-toggle'
}
questions = []
schema = from_json(json_file)
for item in schema['pages']:
for question in item['questions']:
if question['type'] == 'osf-upload':
questions.append((question['qid'], question['title']))
continue
properties = question.get('properties')
if properties is None:
continue
if uploader in properties:
questions.append((question['qid'], question['title']))
return questions
|
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
import warnings
from sys import version_info
import numpy as np
from scipy import interpolate
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet
from sklearn.linear_model import LassoLarsCV
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actualy, the parameters alpha = 0 should not be alowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.9, 0.95], cv=3,
max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.l1_ratio_, 0.95)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.9, 0.95], cv=3,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.l1_ratio_, 0.95)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
# Test that explicit warm restart...
clf = ElasticNet(alpha=1.0, max_iter=50)
clf.fit(X, y)
clf2 = ElasticNet(alpha=0.1, max_iter=50)
clf2.fit(X, y, coef_init=clf.coef_.copy())
#... and implicit warm restart are equivalent.
clf3 = ElasticNet(alpha=1.0, max_iter=50, warm_start=True)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.1)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_lasso_alpha_warning():
check_warnings() # Skip if unsupported Python version
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
clf.fit(X, Y)
assert_greater(len(w), 0) # warnings should be raised
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
from __future__ import division, print_function, unicode_literals
# collision_model tests
# circles1 test
import cocos.collision_model as cm
import cocos.euclid as eu
from math import sin, cos, radians
class Obj_with_shape(object):
def __init__(self, name, cshape):
self.name = name
self.cshape = cshape
def create_obj_with_circle(name, center, r):
shape = cm.CircleShape(center, r)
obj = Obj_with_shape(name, shape)
return obj
def pprint_container(heading, container):
sl = [s.name for s in container]
sl.sort()
print(heading)
for s in sl:
print("\t%s"%s)
# see circle1_data.png for visualization, was ploted with func
# plot_circle_data1
def circle_data1(offset):
r1 = 1.5
center = eu.Vector2(0.0, 0.0) + offset
center_circle = create_obj_with_circle('center', center, r1)
r2 = 0.3
d1 = (r1 + r2) - 0.1
angles = [a for a in range(360, 1, -360//12)]
ring_touching = set()
for a in angles:
center = d1 * eu.Vector2(cos(radians(a)), sin(radians(a))) + offset
circle = create_obj_with_circle("ring_touching, distance 0.0, angle %3s"%a, center, r2)
ring_touching.add(circle)
near_distance = 0.1
d2 = (r1 + r2) + near_distance
angles = [a for a in range(360, 1, -360//12)]
ring_near = set()
for a in angles:
center = d2 * eu.Vector2(cos(radians(a)), sin(radians(a))) + offset
circle = create_obj_with_circle("ring_near, distance 0.1, angle %3s"%a,
center, r2)
ring_near.add(circle)
far_distance = 0.2
d3 = (r1 + r2) + far_distance
angles = [a for a in range(360, 1, -360//12)]
ring_far = set()
for a in angles:
center = d3 * eu.Vector2(cos(radians(a)), sin(radians(a))) + offset
circle = create_obj_with_circle("ring_far, distance 0.2, angle %3s"%a,
center, r2)
ring_far.add(circle)
return (center_circle, ring_touching, ring_near, ring_far,
near_distance, far_distance, angles)
def plot_circle_data1(offset):
(center_circle, ring_touching, ring_near, ring_far,
near_distance, far_distance, angles) = circle_data1(offset)
def get_params(obj):
x, y = obj.cshape.center
return obj.name, x, y, obj.cshape.r
import pylab
fig = pylab.figure(figsize=(6.0, 6.0)) #size in inches
fig.suptitle('circle_data1', fontsize=12)
pylab.axes().set_aspect('equal') # aspect ratio
name, x, y, r = get_params(center_circle)
cir = pylab.Circle((x,y), radius=r, ec='black', fc='none')
pylab.gca().add_patch(cir)
xmax = r
for ring, color in [ (ring_touching, 'r'), (ring_near, 'g'),
(ring_far, 'b') ]:
for obj in ring:
name, x, y, r = get_params(obj)
cir = pylab.Circle((x,y), radius=r, ec=color, fc='none')
pylab.gca().add_patch(cir)
ymin = -(xmax + (2*r + far_distance)*3)
xmax = xmax + (2*r + far_distance)*2
# axis: xmin, xmax, ymin, ymax
pylab.axis([-xmax, xmax, ymin, xmax])
# make legends
labels = ['center_circle', 'overlapping ring', 'near ring', 'far ring']
colors = ['black', 'r', 'g', 'b' ]
dummys = [ pylab.Line2D([0,1], [0,1], lw=2.0, c=e) for e in colors]
pylab.gca().legend(dummys, labels, ncol=2, loc='lower center')
#pylab.show()
pylab.savefig('circle1_data.png')
def pytest_generate_tests(metafunc):
param_names = ['variant', 'ctor_args', 'offset']
cases = {
"BruteForce":
('CollisionManagerBruteForce', [], (2.2, 3.7)),
"Grid, target don't crosses bucket edges":
('CollisionManagerGrid',
[0.0, 100.0, 0.0, 100.0, 4.0, 4.0],
(2.0, 2.0)
),
"Grid, target crosses horizontal bucket edge":
('CollisionManagerGrid',
[0.0, 100.0, 0.0, 100.0, 4.0, 4.0],
(2.0, 4.0)
),
"Grid, target crosses vertical bucket edge":
('CollisionManagerGrid',
[0.0, 100.0, 0.0, 100.0, 4.0, 4.0],
(4.0, 2.0)
),
"Grid, target crosses vertical and horizontal bucket edges":
('CollisionManagerGrid',
[0.0, 100.0, 0.0, 100.0, 4.0, 4.0],
(4.0, 4.0)
),
"Grid, target bigger than cell":
('CollisionManagerGrid',
[0.0, 100.0, 0.0, 100.0, 2.0, 2.0],
(2.0, 2.0)
),
}
for name in cases:
metafunc.addcall(id=name, funcargs=dict(zip(param_names, cases[name])))
fe = 1.0e-4
def test_collman_circles(variant, ctor_args, offset):
"""
Tests collision manager basic behavoir when shape is circle,
test data is defined by circle_data1(offset),
the collision manager variant is determined by 'variant',
and 'ctor_args' provide the parameters needed to instantiate the
collision manager.
'variant' is the class name to use, and the class definition is assumed
to be in the module cocos.collision_model
"""
emptyset = set()
(center_circle, ring_touching, ring_near, ring_far,
near_distance, far_distance, angles) = circle_data1(eu.Vector2(*offset))
collman_cls = getattr(cm, variant)
collman = collman_cls(*ctor_args)
collman.add(center_circle)
for ring in [ ring_touching, ring_near, ring_far ]:
for circle in ring:
collman.add(circle)
# all objs in data1 are known
assert collman.knows(center_circle)
for ring in [ ring_touching, ring_near, ring_far ]:
for circle in ring:
collman.knows(circle)
# the set of know objects is exactly the set of objects in data1
assert collman.known_objs() == (set([center_circle]) | ring_touching |
ring_near | ring_far)
touching_result = collman.objs_colliding(center_circle)
# no repetitions
touching = set(touching_result)
assert len(touching) == len(touching_result)
# obj is not in objs_colliding_with(obj)
assert center_circle not in touching
# any in ring_touching is touching
assert ring_touching <= touching
# none of ring_near touches
assert (ring_near & touching) == emptyset
# none of ring_far touches
assert (ring_far & touching) == emptyset
# no extraneous values in touching
assert ring_touching == touching
# the generator form of touching gives same result than objs_colliding
touching_from_generator = [e for e in collman.iter_colliding(center_circle)]
assert len(touching_from_generator) == len(touching)
assert set(touching_from_generator) == touching
# test with short near_distance, should be same as before
short_distance = 0.9 * near_distance
nears1_result = collman.objs_near(center_circle, short_distance)
# no repetitions
nears1 = set(nears1_result)
assert len(nears1) == len(nears1_result)
# expected due to short near_distance
assert nears1 == ring_touching
# similar using objs_near_wdistance
od_list = collman.objs_near_wdistance(center_circle, short_distance)
assert len(od_list) == len(ring_touching)
assert set([a for a,b in od_list]) == ring_touching
# and distances are near 0.0
for a,d in od_list:
assert abs(d-0.0)<fe
# test with near distance to accept ring_near and reject ring_far
n_distance = 0.9*near_distance + 0.1*far_distance
nears2 = set(collman.objs_near(center_circle, n_distance))
assert (ring_far & nears2) == emptyset
pprint_container('nears2:', nears2)
pprint_container('ring_near', ring_near)
assert nears2 == (ring_touching | ring_near)
# similar using objs_near_wdistance
od_list = collman.objs_near_wdistance(center_circle, n_distance)
nears2 = set(od_list)
assert len(od_list) == len(nears2)
assert set([a for a,b in od_list]) == (ring_touching | ring_near)
# and distances match
for a,d in od_list:
assert ((a in ring_touching and abs(d) < fe) or
(a in ring_near and abs(d-near_distance)<fe))
# all_collisions
li_touching_center = []
d = {}
for a in angles:
d[" angle %3s"%a] = []
for obj, other in collman.iter_all_collisions():
# no collision with itself
assert obj is not other
if obj.name == 'center' or other.name == 'center':
if obj.name == 'center':
li_touching_center.append(other)
else:
li_touching_center.append(obj)
else:
obj_ring, obj_distance, obj_angle = obj.name.split(',')
other_ring, other_distance, other_angle = obj.name.split(',')
assert obj_angle == other_angle
assert obj_angle in d
if id(obj)>id(other):
obj, other = other, obj
d[obj_angle].append((obj, other))
# the ones touching center are correct
assert len(li_touching_center) == len(touching)
assert set(li_touching_center) == touching
# all the others
for k in d:
# no repetitions
li = d[k]; si = set(li)
assert len(li) == len(si)
# all collisions for the angle
assert len(si) == 3
# removing center_circle
collman.remove_tricky(center_circle)
assert not collman.knows(center_circle)
assert center_circle not in collman.known_objs()
assert collman.known_objs() == (ring_touching | ring_near | ring_far)
# any_near, with obj not known
r = center_circle.cshape.r
small = create_obj_with_circle('small', center_circle.cshape.center,
r - near_distance*2.0)
# param 'near_distance' selected to obtain return None
assert collman.any_near(small, near_distance/2.0) is None
# param 'near_distance' selected to obtain a known object (weak)
assert collman.any_near(small, near_distance*2.1) is not None
# any near with known object
collman.add(small)
assert collman.any_near(small, near_distance/2.0) is None
#plot_circle_data1(eu.Vector2(0.0, 0.0))
|
|
import unittest
from aiohttp import web
from aiohttp.test_utils import TestClient, TestServer, loop_context
from mocker.http_handlers import handle_factory
class TestHttpHandlers(unittest.TestCase):
def setUp(self):
self.DATA_PATH = './tests/data'
self.HOST = '127.0.0.1'
self.PORT = 8000
self.SERVER_ADDRESS = (self.HOST, self.PORT)
self.app = web.Application()
self.app.add_routes([
web.get('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.post('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.put('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.patch('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.head('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.delete('/{tail:.*}', handle_factory(self.DATA_PATH)),
web.options('/{tail:.*}', handle_factory(self.DATA_PATH)),
])
self.test_server = TestServer(self.app, host=self.HOST, port=self.PORT)
# web.run_app(self.app, host=self.HOST, port=self.PORT)
def test_mock_exists(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_head(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.head('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_exists_post(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.post('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_exists_put(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.put('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_exists_patch(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.patch('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_exists_delete(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.delete('/test')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_does_not_exists(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-not-exists')
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_with_no_response_key(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-no-response')
self.assertEqual(response.status, 500)
self.assertEqual(response.reason, 'Internal Server Error')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
response_json = await response.json()
self.assertDictEqual(
response_json,
{
'message': 'You must specify a "response" key in your mock'
}
)
loop.run_until_complete(test())
def test_mock_exists_but_invalid_json(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-invalid-json')
self.assertEqual(response.status, 500)
self.assertEqual(response.reason, 'Internal Server Error')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
response_json = await response.json()
self.assertDictEqual(
response_json,
{
'message': 'Mock file is not a valid JSON'
}
)
loop.run_until_complete(test())
def test_mock_exists_no_content_type(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-no-content-type')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/json; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_no_server_header(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-no-server-header')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertIn('Mocker/', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_no_date_header(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-no-server-header')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertIn('Date', headers)
loop.run_until_complete(test())
def test_mock_exists_custom_content_type(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-custom-content-type')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Content-Type'], 'application/mocker; charset=utf-8')
self.assertIn('Mocker/', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_custom_server_header(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-custom-server-header')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertIn('MockerCustom', headers['Server'])
loop.run_until_complete(test())
def test_mock_exists_custom_date_header(self):
with loop_context() as loop:
async def test():
async with TestClient(self.test_server, loop=loop) as client:
# nonlocal client
response = await client.get('/test-custom-date-header')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
headers = response.headers
self.assertEqual(headers['Date'], '20180201')
loop.run_until_complete(test())
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kudu.compat import unittest, long
from kudu.tests.common import KuduTestBase
from kudu.client import Partitioning
import kudu
import datetime
from pytz import utc
class TestClient(KuduTestBase, unittest.TestCase):
def setUp(self):
pass
def test_table_basics(self):
table = self.client.table(self.ex_table)
self.assertEqual(table.name, self.ex_table)
self.assertEqual(table.num_columns, len(self.schema))
self.assertIsNotNone(table.id)
def test_table_column(self):
table = self.client.table(self.ex_table)
cols = [(table['key'], 'key', 'int32'),
(table[1], 'int_val', 'int32'),
(table[-1], 'unixtime_micros_val', 'unixtime_micros')]
for col, name, type in cols:
assert col.name == name
assert col.parent is table
result_repr = repr(col)
expected_repr = ('Column({0}, parent={1}, type={2})'
.format(name, self.ex_table, type))
assert result_repr == expected_repr
def test_table_schema_retains_reference(self):
import gc
table = self.client.table(self.ex_table)
schema = table.schema
table = None
gc.collect()
repr(schema)
def test_table_exists(self):
self.assertFalse(self.client.table_exists('nonexistent-table'))
self.assertTrue(self.client.table_exists(self.ex_table))
def test_list_tables(self):
schema = self.example_schema()
partitioning = self.example_partitioning()
to_create = ['foo1', 'foo2', 'foo3']
for name in to_create:
self.client.create_table(name, schema, partitioning)
result = self.client.list_tables()
expected = [self.ex_table] + to_create
assert sorted(result) == expected
result = self.client.list_tables('foo')
assert sorted(result) == to_create
for name in to_create:
self.client.delete_table(name)
def test_is_multimaster(self):
assert self.client.is_multimaster
def test_delete_table(self):
name = "peekaboo"
self.client.create_table(name, self.schema, self.partitioning)
self.client.delete_table(name)
assert not self.client.table_exists(name)
# Should raise a more meaningful exception at some point
with self.assertRaises(kudu.KuduNotFound):
self.client.delete_table(name)
def test_table_nonexistent(self):
self.assertRaises(kudu.KuduNotFound, self.client.table,
'__donotexist__')
def test_create_table_with_different_replication_factors(self):
name = "different_replica_table"
# Test setting the number of replicas for 1, 3 and 5 provided that the
# number does not exceed the number of tservers
for n_replicas in [n for n in [1, 3, 5] if n <= self.NUM_TABLET_SERVERS]:
try:
self.client.create_table(
name, self.schema,
partitioning=Partitioning().add_hash_partitions(['key'], 2),
n_replicas=n_replicas)
assert n_replicas == self.client.table(name).num_replicas
finally:
try:
self.client.delete_table(name)
except:
pass
def test_create_partitioned_table(self):
name = 'partitioned_table'
try:
self.client.create_table(
name, self.schema,
partitioning=Partitioning().add_hash_partitions(['key'], 2))
# TODO: once the Python client can list partition info, assert that it was
# created successfully here.
self.client.delete_table(name)
self.client.create_table(
name, self.schema,
partitioning=Partitioning()
.set_range_partition_columns(['key'])
.add_range_partition_split({'key': 10})
.add_range_partition_split([20])
.add_range_partition_split((30,))
)
self.client.delete_table(name)
self.client.create_table(
name, self.schema,
partitioning=Partitioning().add_hash_partitions(['key'],
2,
seed=342310))
self.client.delete_table(name)
finally:
try:
self.client.delete_table(name)
except:
pass
def test_insert_nonexistent_field(self):
table = self.client.table(self.ex_table)
op = table.new_insert()
self.assertRaises(KeyError, op.__setitem__, 'doesntexist', 12)
def test_insert_and_mutate_rows(self):
nrows = 100
table = self.client.table(self.ex_table)
session = self.client.new_session()
for i in range(nrows):
op = table.new_insert((i, i*2, 'hello_%d' % i))
session.apply(op)
# Cannot apply the same insert twice, C++ client does not indicate an
# error
self.assertRaises(Exception, session.apply, op)
# synchronous
session.flush()
# Update a row, upsert another one
op = table.new_update()
op['key'] = 1
op['int_val'] = 111
op['string_val'] = 'updated'
# Insert datetime without timezone specified, will be assumed
# to be UTC
op['unixtime_micros_val'] = datetime.datetime(2016, 10, 30, 10, 12)
session.apply(op)
op = table.new_upsert({0: 2,
1: 222,
2: 'upserted'})
session.apply(op)
session.flush()
scanner = table.scanner().open()
rows = dict((t[0], t) for t in scanner.read_all_tuples())
assert len(rows) == nrows
assert rows[1] == (1, 111, 'updated',
datetime.datetime(2016, 10, 30, 10, 12)
.replace(tzinfo=utc))
assert rows[2] == (2, 222, 'upserted', None)
# Delete the rows we just wrote
for i in range(nrows):
op = table.new_delete({'key': i})
session.apply(op)
session.flush()
scanner = table.scanner().open()
assert len(scanner.read_all_tuples()) == 0
def test_failed_write_op(self):
# Insert row
table = self.client.table(self.ex_table)
session = self.client.new_session()
session.apply(table.new_insert({'key': 1}))
session.flush()
# Attempt to insert row again
session.apply(table.new_insert({'key': 1}))
self.assertRaises(kudu.KuduBadStatus, session.flush)
# Check errors
errors, overflowed = session.get_pending_errors()
self.assertFalse(overflowed)
self.assertEqual(len(errors), 1)
error = errors[0]
# This test passes because we currently always return
# True.
self.assertTrue(error.was_possibly_successful())
self.assertEqual(error.failed_op(), 'INSERT int32 key=1')
# Delete inserted row
session.apply(table.new_delete({'key': 1}))
session.flush()
def test_session_auto_open(self):
table = self.client.table(self.ex_table)
scanner = table.scanner()
result = scanner.read_all_tuples()
assert len(result) == 0
def test_session_open_idempotent(self):
table = self.client.table(self.ex_table)
scanner = table.scanner().open().open()
result = scanner.read_all_tuples()
assert len(result) == 0
def test_session_flush_modes(self):
self.client.new_session(flush_mode=kudu.FLUSH_MANUAL)
self.client.new_session(flush_mode=kudu.FLUSH_AUTO_SYNC)
self.client.new_session(flush_mode=kudu.FLUSH_AUTO_BACKGROUND)
self.client.new_session(flush_mode='manual')
self.client.new_session(flush_mode='sync')
self.client.new_session(flush_mode='background')
with self.assertRaises(ValueError):
self.client.new_session(flush_mode='foo')
def test_session_mutation_buffer_settings(self):
self.client.new_session(flush_mode=kudu.FLUSH_AUTO_BACKGROUND,
mutation_buffer_sz= 10*1024*1024,
mutation_buffer_watermark=0.5,
mutation_buffer_flush_interval=2000,
mutation_buffer_max_num=3)
session = self.client.new_session(flush_mode=kudu.FLUSH_AUTO_BACKGROUND)
session.set_mutation_buffer_space(10*1024*1024)
session.set_mutation_buffer_flush_watermark(0.5)
session.set_mutation_buffer_flush_interval(2000)
session.set_mutation_buffer_max_num(3)
def test_session_mutation_buffer_errors(self):
session = self.client.new_session(flush_mode=kudu.FLUSH_AUTO_BACKGROUND)
with self.assertRaises(OverflowError):
session.set_mutation_buffer_max_num(-1)
with self.assertRaises(kudu.errors.KuduInvalidArgument):
session.set_mutation_buffer_flush_watermark(1.2)
with self.assertRaises(OverflowError):
session.set_mutation_buffer_flush_interval(-1)
with self.assertRaises(OverflowError):
session.set_mutation_buffer_space(-1)
def test_connect_timeouts(self):
# it works! any other way to check
kudu.connect(self.master_hosts, self.master_ports,
admin_timeout_ms=1000,
rpc_timeout_ms=1000)
def test_capture_kudu_error(self):
pass
def test_list_tablet_server(self):
# Not confirming the number of tablet servers in this test because
# that is confirmed in the beginning of testing. This test confirms
# that all of the KuduTabletServer methods returned results and that
# a result is returned from the list_tablet_servers method.
tservers = self.client.list_tablet_servers()
self.assertTrue(tservers)
for tserver in tservers:
assert tserver.uuid() is not None
assert tserver.hostname() is not None
assert tserver.port() is not None
def test_bad_partialrow(self):
table = self.client.table(self.ex_table)
op = table.new_insert()
# Test bad keys or indexes
keys = [
('not-there', KeyError),
(len(self.schema) + 1, IndexError),
(-1, IndexError)
]
for key in keys:
with self.assertRaises(key[1]):
op[key[0]] = 'test'
# Test incorrectly typed data
with self.assertRaises(TypeError):
op['int_val'] = 'incorrect'
# Test setting NULL in a not-null column
with self.assertRaises(kudu.errors.KuduInvalidArgument):
op['key'] = None
def test_alter_table_rename(self):
try:
self.client.create_table('alter-rename',
self.schema,
self.partitioning)
table = self.client.table('alter-rename')
alterer = self.client.new_table_alterer(table)
table = alterer.rename('alter-newname').alter()
self.assertEqual(table.name, 'alter-newname')
finally:
self.client.delete_table('alter-newname')
def test_alter_column(self):
try:
self.client.create_table('alter-column',
self.schema,
self.partitioning)
table = self.client.table('alter-column')
alterer = self.client.new_table_alterer(table)
alterer.alter_column('string_val',rename_to='string_val_renamed')
table = alterer.alter()
# Confirm column rename
col = table['string_val_renamed']
finally:
self.client.delete_table('alter-column')
def test_alter_table_add_drop_column(self):
table = self.client.table(self.ex_table)
alterer = self.client.new_table_alterer(table)
alterer.add_column('added-column', type_='int64', default=0)
table = alterer.alter()
# Confirm column was added
expected_repr = 'Column(added-column, parent={0}, type=int64)'\
.format(self.ex_table)
self.assertEqual(expected_repr, repr(table['added-column']))
alterer = self.client.new_table_alterer(table)
alterer.drop_column('added-column')
table = alterer.alter()
# Confirm column has been dropped.
with self.assertRaises(KeyError):
col = table['added-column']
def test_alter_table_direct_instantiation(self):
# Run the add_drop_column test with direct instantiation of
# the TableAlterer
table = self.client.table(self.ex_table)
alterer = kudu.client.TableAlterer(table)
alterer.add_column('added-column', type_='int64', default=0)
table = alterer.alter()
# Confirm column was added
expected_repr = 'Column(added-column, parent={0}, type=int64)' \
.format(self.ex_table)
self.assertEqual(expected_repr, repr(table['added-column']))
alterer = self.client.new_table_alterer(table)
alterer.drop_column('added-column')
table = alterer.alter()
# Confirm column has been dropped.
with self.assertRaises(KeyError):
col = table['added-column']
def test_alter_table_add_drop_partition(self):
# Add Range Partition
table = self.client.table(self.ex_table)
alterer = self.client.new_table_alterer(table)
# Drop the unbounded range partition.
alterer.drop_range_partition()
# Add a partition from 0 to 100
alterer.add_range_partition(
lower_bound={'key': 0},
upper_bound={'key': 100}
)
table = alterer.alter()
# TODO(jtbirdsell): Once C++ client can list partition schema
# then this test should confirm that the partition was added.
alterer = self.client.new_table_alterer(table)
# Drop the partition from 0 to 100
alterer.drop_range_partition(
lower_bound={'key': 0},
upper_bound={'key': 100}
)
# Add back the unbounded range partition
alterer.add_range_partition()
table = alterer.alter()
class TestMonoDelta(unittest.TestCase):
def test_empty_ctor(self):
delta = kudu.TimeDelta()
assert repr(delta) == 'kudu.TimeDelta()'
def test_static_ctors(self):
delta = kudu.timedelta(3.5)
assert delta.to_seconds() == 3.5
delta = kudu.timedelta(millis=3500)
assert delta.to_millis() == 3500
delta = kudu.timedelta(micros=3500)
assert delta.to_micros() == 3500
delta = kudu.timedelta(micros=1000)
assert delta.to_nanos() == long(1000000)
delta = kudu.timedelta(nanos=3500)
assert delta.to_nanos() == 3500
|
|
from typing import List, Any
import urllib3
from CommonServerPython import *
from math import ceil
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
''' CLIENT CLASS '''
class Client(BaseClient):
def get_domain_data(self, domain: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'full/{domain}',
params={}
)
def get_search_data(self, field: str, value: str, limit: int, page: int) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'domains/{field}/{value}',
params={'limit': limit,
'page': page}
)
def test_module(self):
return self._http_request(
method='GET',
url_suffix='domains/ip/8.8.8.8',
params={}
)
''' COMMAND FUNCTIONS '''
def test_module_command(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: the base client
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.test_module()
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise
return 'ok'
def domain_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param client: Hostio client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
domains = argToList(args.get('domain'))
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_data(domain)
if domain_data.get('web', {}).get('date'):
domain_date_dt = dateparser.parse(domain_data['web']['date'])
if domain_date_dt:
domain_data['updated_date'] = domain_date_dt.strftime(DATE_FORMAT)
score = Common.DBotScore.NONE
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HostIo',
indicator_type=DBotScoreType.DOMAIN,
score=score,
)
domain_standard_context = Common.Domain(
domain=domain,
updated_date=domain_data.get('updated_date', None),
name_servers=domain_data['web'].get('server', None),
registrant_name=domain_data['web'].get('title', None),
registrant_country=domain_data['web'].get('country', None),
registrant_email=domain_data['web'].get('email', None),
registrant_phone=domain_data['web'].get('phone', None),
dns=domain_data.get('dns', None),
dbot_score=dbot_score
)
readable_output = tableToMarkdown('Domain', domain_data)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HostIo.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_command(client: Client, args: Dict[str, Any]) -> CommandResults:
field = args.get('field', None)
value = args.get('value', None)
limit = int(args.get('limit', 25))
data = client.get_search_data(field, value, limit, 0)
domains = data.get('domains', [])
total: int = data.get('total', 0)
read = tableToMarkdown(f'Domains associated with {field}: {value}', data)
if total == 0:
read = f'No Domains associated with {field}'
elif total > limit:
# set it as len domains since in trial its always 5
pages = ceil((total - len(domains)) / len(domains))
page = 1
while page <= pages:
data = client.get_search_data(field, value, limit, page)
domains += data.get('domains', [])
page += 1
data['domains'] = domains
context = {
'Field': field,
'Value': value,
'Domains': domains,
'Total': total
}
return CommandResults(
readable_output=read,
outputs_prefix='HostIo.Search',
outputs_key_field=['Field', 'Value'],
outputs=context,
raw_response=data)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
api_key = params.get('token')
base_url = urljoin(params['url'], '/api')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
)
if demisto.command() == 'test-module':
return_results(test_module_command(client))
elif demisto.command() == 'domain':
return_results(domain_command(client, demisto.args()))
elif demisto.command() == 'hostio-domain-search':
return_results(search_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""Tests for event related actions."""
from __future__ import with_statement
from StringIO import StringIO
import os
from jinja2 import DictLoader, Environment
from mock import patch
from chevah.utils import MODULE_PATH
from chevah.utils.testing import (
LogTestCase,
manufacture,
UtilsTestCase,
)
from chevah.utils.constants import (
CONFIGURATION_ALL_LOG_ENABLED_GROUPS,
)
from chevah.utils.event import (
Event,
EventDefinition,
EventGroupDefinition,
EventsDefinition,
EventsHandler,
)
from chevah.utils.exceptions import (
UtilsError,
)
from chevah.utils.interfaces import (
IEvent,
IEventDefinition,
IEventGroupDefinition,
IEventsDefinition,
)
class TestEventGroupDefinition(UtilsTestCase):
"""Unit tests for EventGroupDefinition."""
def test_init(self):
"""
Check EventGroupDefinition initialization.
"""
name = manufacture.getUniqueString()
description = manufacture.getUniqueString()
event_group = EventGroupDefinition(
name=name, description=description)
self.assertProvides(IEventGroupDefinition, event_group)
self.assertEqual(name, event_group.name)
self.assertEqual(description, event_group.description)
class TestEventDefinition(UtilsTestCase):
"""
Unit tests for EventDefinition.
"""
def test_init(self):
"""
Check EventDefinition initialization.
"""
event_id = manufacture.getUniqueString() + 'greater_than_5'
description = manufacture.getUniqueString()
message = manufacture.getUniqueString()
version_added = manufacture.getUniqueString()
version_removed = manufacture.getUniqueString()
groups = [
manufacture.makeEventGroupDefinition(),
manufacture.makeEventGroupDefinition(),
]
event_definition = EventDefinition(
id=event_id,
message=message,
description=description,
groups=groups,
version_added=version_added,
version_removed=version_removed,
)
self.assertProvides(IEventDefinition, event_definition)
self.assertEqual(event_id, event_definition.id)
self.assertEqual(message, event_definition.message)
self.assertEqual(groups, event_definition.groups)
self.assertEqual(description, event_definition.description)
self.assertEqual(version_added, event_definition.version_added)
self.assertEqual(version_removed, event_definition.version_removed)
self.assertEqual(
[groups[0].name, groups[1].name], event_definition.group_names)
def test_eventID_padding(self):
"""
Id's less than 5 characters in length will be padded with 0 at the
start.
"""
event_id = '20'
event_definition = EventDefinition(
id=event_id,
message=u'don-t care',
)
self.assertEqual('20', event_definition.id)
self.assertEqual('00020', event_definition.id_padded)
event_id = '100320'
event_definition = EventDefinition(
id=event_id,
message=u'don-t care',
)
self.assertEqual('100320', event_definition.id)
self.assertEqual('100320', event_definition.id_padded)
class TestEvent(UtilsTestCase):
"""Unit tests for Event."""
def test_init(self):
"""
Check Event initialization.
"""
event_id = manufacture.getUniqueString()
message = manufacture.getUniqueString()
data = {
'attr1': 'value1',
'attr2': 'value2',
'peer': manufacture.makeIPv4Address(),
'avatar': manufacture.makeFilesystemApplicationAvatar()}
event = Event(
id=event_id,
message=message,
data=data,
)
self.assertProvides(IEvent, event)
self.assertEqual(event_id, event.id)
self.assertEqual(message, event.message)
self.assertEqual(data, event.data)
class TestEventsDefinition(UtilsTestCase):
"""
Unit tests for EventsDefinition.
"""
def test_init(self):
"""
Check object initialization.
"""
events_file = StringIO()
definitions = EventsDefinition(file=events_file)
self.assertProvides(IEventsDefinition, definitions)
def test_load_default_events_file(self):
"""
Check that the events file is valid.
"""
path = os.path.join(
MODULE_PATH, 'static', 'events', 'events.json')
definitions = EventsDefinition(path=path)
definitions.load()
self.assertIsNotEmpty(definitions.getAllEventDefinitions())
self.assertIsNotEmpty(definitions.getAllEventGroupDefinitions())
def test_load_bad_config_file(self):
"""
Trying to configure from a bad formated configuration file
will raise UtilsError.
"""
content = manufacture.getUniqueString()
definitions = manufacture.makeEventsDefinition(
content=content, load=False)
with self.assertRaises(UtilsError) as context:
definitions.load()
self.assertExceptionID(u'1028', context.exception)
def test_load_empty(self):
"""
An EventGroup with just a complex or simple description will load
just fine.
"""
definitions = manufacture.makeEventsDefinition(content=u'')
definitions.load()
self.assertIsEmpty(definitions.getAllEventDefinitions())
self.assertIsEmpty(definitions.getAllEventGroupDefinitions())
def test_load_EventGroup_good(self):
"""
An EventGroup with just a complex or simple description will load
just fine.
"""
name_1 = manufacture.getUniqueString()
name_2 = manufacture.getUniqueString()
description = manufacture.getUniqueString()
content = '''
{
"groups" : {
"%s": { "description": "%s"},
"%s": { "description": ""}
},
"events" : {}
}
''' % (name_1, description, name_2)
definitions = manufacture.makeEventsDefinition(content=content)
group = definitions.getEventGroupDefinition(name=name_1)
self.assertEqual(name_1, group.name)
self.assertEqual(description, group.description)
group = definitions.getEventGroupDefinition(name=name_2)
self.assertEqual(name_2, group.name)
def test_load_EventDefinition_good(self):
"""
An EventDefinition with a message and groups will load just fine.
"""
event_id = manufacture.getUniqueString()
group_1 = manufacture.getUniqueString()
group_2 = manufacture.getUniqueString()
message = manufacture.getUniqueString()
content = '''
{
"groups" : {
"%s": { "description": ""},
"%s": { "description": ""}
},
"events" : {
"%s": {
"message": "%s",
"groups": ["%s", "%s"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
''' % (group_1, group_2, event_id, message, group_1, group_2)
config = manufacture.makeEventsDefinition(
content=content, load=False)
config.load()
event_definition = config.getEventDefinition(id=event_id)
self.assertEqual(event_id, event_definition.id)
self.assertEqual(message, event_definition.message)
self.assertEqual(2, len(event_definition.groups))
self.assertIsNone(event_definition.version_added)
self.assertIsNone(event_definition.version_removed)
self.assertIsNone(event_definition.description)
def test_load_EventDefinition_missing_group(self):
"""
Loading an EventDefinition with a reference to a non-existent group
will raise UtilsError.
"""
event_id = manufacture.getUniqueString()
group_1 = manufacture.getUniqueString()
message = manufacture.getUniqueString()
content = '''
{
"groups" : {},
"events" : {
"%s": {
"message": "%s",
"groups": ["%s"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
''' % (event_id, message, group_1)
config = manufacture.makeEventsDefinition(
content=content, load=False)
with self.assertRaises(UtilsError):
config.load()
def test_getAllEventGroupDefinitions_good(self):
"""
An getAllEventGroupDefinitions with return a dictionary with all
defined EventGroups.
"""
name_1 = manufacture.getUniqueString()
name_2 = manufacture.getUniqueString()
description = manufacture.getUniqueString()
content = '''
{
"groups" : {
"%s": { "description": "%s"},
"%s": { "description": ""}
},
"events" : {}
}
''' % (name_1, description, name_2)
config = manufacture.makeEventsDefinition(content=content)
result = config.getAllEventGroupDefinitions()
self.assertEqual(2, len(result))
self.assertTrue(name_1 in result)
self.assertTrue(name_2 in result)
def test_getAllEventDefinitions_good(self):
"""
An getAllEventDefinitions with return a dictionary with all
defined EventDefinitons keyed by event id.
"""
content = '''
{
"groups" : {
"group-1": { "description": ""}
},
"events" : {
"ev1": {
"message": "something",
"groups": ["group-1"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"ev2": {
"message": "something",
"groups": ["group-1"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
config = manufacture.makeEventsDefinition(content=content)
result = config.getAllEventDefinitions()
self.assertEqual(2, len(result))
self.assertTrue(u'ev1' in result)
self.assertTrue(u'ev2' in result)
def test_getAllEventDefinitionsPadded(self):
"""
An test_getAllEventDefinitionsPadded with return a dictionary with all
defined EventDefinitons keyed by padded event id.
"""
content = '''
{
"groups" : {
"group-1": { "description": ""}
},
"events" : {
"ev1": {
"message": "something",
"groups": ["group-1"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"event3": {
"message": "something",
"groups": ["group-1"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
config = manufacture.makeEventsDefinition(content=content)
result = config.getAllEventDefinitionsPadded()
self.assertTrue(u'00ev1' in result)
self.assertTrue(u'event3' in result)
def test_generateDocumentation_good(self):
"""
The handler can generates a RESTructuredText file documenting
the events and event groups.
"""
content = '''
{
"groups" : {
"group-1": { "description": ""},
"group-2": { "description": ""}
},
"events" : {
"ev1": {
"message": "something",
"groups": ["group-1", "group-2"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"ev2": {
"message": "something",
"groups": ["group-1"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"event3": {
"message": "something",
"groups": ["group-2"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
template_content = (
'Header\n'
'=======\n'
'{% for group_id in groups %}\n'
'{{ groups[group_id].name }}\n'
'----\n'
'description: {{ groups[group_id].description }}\n'
'\n'
'{% endfor %}\n'
'\n'
'{% for event_id in events %}\n'
'{{ events[event_id].id }}\n'
'----\n'
'message: {{ events[event_id].message }}\n'
'groups: {{ ", ".join(events[event_id].group_names) }}\n'
'\n'
'{% endfor %}\n'
)
config = manufacture.makeEventsDefinition(content=content)
templates_loader = DictLoader(
{'events_documentation.rst': template_content})
jinja_environment = Environment(loader=templates_loader)
template = jinja_environment.get_template('events_documentation.rst')
result = config.generateDocumentation(template=template)
self.assertTrue('ev1' in result)
self.assertTrue('groups: group-1, group-2' in result)
class TestEventsHandler(LogTestCase):
"""
Unit tests for EventsHandler.
"""
def test_init(self):
"""
EventsHandler can be initialized without arguments.
"""
handler = EventsHandler()
self.assertFalse(handler.configured)
with self.assertRaises(AssertionError):
handler.definitions
with self.assertRaises(AssertionError):
handler.enabled_groups
def test_configure(self):
"""
EventsHandler can be configured.
"""
handler = EventsHandler()
definitions = manufacture.makeEventsDefinition()
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
self.assertTrue(handler.configured)
self.assertIsNotNone(handler.definitions)
self.assertEqual(
[CONFIGURATION_ALL_LOG_ENABLED_GROUPS], handler.enabled_groups)
def test_removeConfiguration(self):
"""
EventsHandler configurations can be removed using removeConfiguration.
"""
handler = EventsHandler()
definitions = manufacture.makeEventsDefinition()
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
self.assertTrue(handler.configured)
handler.removeConfiguration()
self.assertFalse(handler.configured)
def test_emit_without_configuration(self):
"""
If handler is not configured, all events will be logged using only
data from the event.
Event definitions and other configurations is not used.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
handler.emit('100', message=message)
self.assertLog(100, regex=u'Some ')
def test_log(self):
"""
`log` method is here for transition and used the old Logger.log
interface to emit an event.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
peer = manufacture.makeIPv4Address()
avatar = manufacture.makeFilesystemApplicationAvatar()
data = {}
handler.log(100, message, peer=peer, avatar=avatar, data=data)
log_message = self.popLog()
self.assertEqual(100, log_message[0])
self.assertTrue('Some message' in log_message[1])
self.assertEqual(avatar, log_message[2])
self.assertEqual(peer, log_message[3])
self.assertEqual(data, log_message[4])
def test_emit_with_int_id(self):
"""
When event id is an integer, it will be converted to string.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
with patch.object(handler, 'emitEvent') as patched:
handler.emit(100, message=message)
event = patched.call_args[0][0]
self.assertEqual(u'100', event.id)
def test_emit_with_unicode_id(self):
"""
Events can be emitted with unicode ids.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
with patch.object(handler, 'emitEvent') as patched:
handler.emit(u'100', message=message)
event = patched.call_args[0][0]
self.assertEqual(u'100', event.id)
def test_emit_with_string_id(self):
"""
Events can be emitted with string ids and are converted to unicode.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
with patch.object(handler, 'emitEvent') as patched:
handler.emit(u'100', message=message)
event = patched.call_args[0][0]
self.assertEqual(u'100', event.id)
def test_emit_unknown_id(self):
"""
Emitting an event with unknown ID will log an error containing
the text of the unknown id.
"""
handler = EventsHandler()
message = u'Some message ' + manufacture.getUniqueString()
definitions = manufacture.makeEventsDefinition()
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section,
)
handler.emit(u'100', message=message)
self.assertLog(
1024, regex='Unknown event with id "100"')
def test_emit_with_configuration(self):
"""
If handler is configured, the logs can be filterd.
"""
handler = EventsHandler()
content = '''
{
"groups" : {
"enabled": { "description": ""},
"disabled": { "description": ""}
},
"events" : {
"100": {
"message": "some message",
"groups": ["enabled"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"101": {
"message": "other message",
"groups": ["disabled"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
definitions = manufacture.makeEventsDefinition(content=content)
log_configuration_section = manufacture.makeLogConfigurationSection()
log_configuration_section.enabled_groups = ['enabled']
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
handler.emit('101', message='101some message')
handler.emit('100', message='100some message')
self.assertLog(100, regex="100some m")
def test_emit_with_all(self):
"""
When 'all' group is enabled, logs will be enabled even if their
groups is not explicitly enabled.
"""
handler = EventsHandler()
content = '''
{
"groups" : {
"disabled": { "description": ""}
},
"events" : {
"100": {
"message": "some message",
"groups": ["disabled"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
},
"101": {
"message": "other message",
"groups": ["disabled"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
definitions = manufacture.makeEventsDefinition(content=content)
log_configuration_section = manufacture.makeLogConfigurationSection()
log_configuration_section.enabled_groups = [
CONFIGURATION_ALL_LOG_ENABLED_GROUPS]
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
handler.emit('101', message='101some message')
handler.emit('100', message='100some message')
self.assertLog(101, regex="101some m")
self.assertLog(100, regex="100some m")
def test_emit_message_from_definition_no_data(self):
"""
If not message was defined for emit, the message from event definition
will be used.
When no data is provided the string will not be interpolated.
"""
handler = EventsHandler()
content = '''
{
"groups" : {
"group": { "description": "something"}
},
"events" : {
"100": {
"message": "100 %(replace)s some message",
"groups": ["group"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
definitions = manufacture.makeEventsDefinition(content=content)
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
handler.emit('100', data={'replace': 'test'})
self.assertLog(100, regex="100 test some m")
def test_emit_message_from_definition_with_data(self):
"""
When data is provided the message will be interpolated based on
data and event_definition.
"""
handler = EventsHandler()
content = '''
{
"groups" : {
"group": { "description": "something"}
},
"events" : {
"100": {
"message": "100 %(data)s some message",
"groups": ["group"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
definitions = manufacture.makeEventsDefinition(content=content)
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
data_string = manufacture.getUniqueString()
handler.emit('100', data={'data': data_string})
self.assertLog(100, regex="100 " + data_string + " some m")
def test_emit_message_from_definition_bad_interpolation(self):
"""
When wrong data is provided the an additional message is logged
together with the non-interpolated message.
"""
handler = EventsHandler()
content = '''
{
"groups" : {
"group": { "description": "something"}
},
"events" : {
"100": {
"message": "100 %(unknown_data)s some message",
"groups": ["group"],
"description": "",
"version_removed": "",
"version_added": "",
"details": "",
"data": ""
}
}
}
'''
definitions = manufacture.makeEventsDefinition(content=content)
log_configuration_section = manufacture.makeLogConfigurationSection()
handler.configure(
definitions=definitions,
log_configuration_section=log_configuration_section)
handler.emit('100', data={'other': u'dontcare'})
self.assertLog(1025)
self.assertLog(100, regex='100 %\(unknown_data\)s some message')
|
|
from __future__ import print_function, division, absolute_import
from ufo2ft.filters import (
getFilterClass,
BaseFilter,
loadFilters,
UFO2FT_FILTERS_KEY,
logger,
)
from fontTools.misc.py23 import SimpleNamespace
from fontTools.misc.loggingTools import CapturingLogHandler
import pytest
from ..testSupport import _TempModule
class FooBarFilter(BaseFilter):
"""A filter that does nothing."""
_args = ("a", "b")
_kwargs = {"c": 0}
def filter(self, glyph):
return False
@pytest.fixture(scope="module", autouse=True)
def fooBar():
"""Make a temporary 'ufo2ft.filters.fooBar' module containing a
'FooBarFilter' class for testing the filter loading machinery.
"""
with _TempModule("ufo2ft.filters.fooBar") as temp_module:
temp_module.module.__dict__["FooBarFilter"] = FooBarFilter
yield
def test_getFilterClass():
assert getFilterClass("Foo Bar") == FooBarFilter
assert getFilterClass("FooBar") == FooBarFilter
assert getFilterClass("fooBar") == FooBarFilter
with pytest.raises(ImportError):
getFilterClass("Baz")
with _TempModule("myfilters"), _TempModule("myfilters.fooBar") as temp_module:
with pytest.raises(AttributeError):
# this fails because `myfilters.fooBar` module does not
# have a `FooBarFilter` class
getFilterClass("Foo Bar", pkg="myfilters")
temp_module.module.__dict__["FooBarFilter"] = FooBarFilter
# this will attempt to import the `FooBarFilter` class from the
# `myfilters.fooBar` module
assert getFilterClass("Foo Bar", pkg="myfilters") == FooBarFilter
class MockFont(SimpleNamespace):
pass
class MockGlyph(SimpleNamespace):
pass
def test_loadFilters_empty():
ufo = MockFont(lib={})
assert UFO2FT_FILTERS_KEY not in ufo.lib
assert loadFilters(ufo) == ([], [])
@pytest.fixture
def ufo():
ufo = MockFont(lib={})
ufo.lib[UFO2FT_FILTERS_KEY] = [{"name": "Foo Bar", "args": ["foo", "bar"]}]
return ufo
def test_loadFilters_pre(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["pre"] = True
pre, post = loadFilters(ufo)
assert len(pre) == 1
assert not post
assert isinstance(pre[0], FooBarFilter)
def test_loadFilters_custom_namespace(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["name"] = "Self Destruct"
ufo.lib[UFO2FT_FILTERS_KEY][0]["namespace"] = "my_dangerous_filters"
class SelfDestructFilter(FooBarFilter):
def filter(glyph):
# Don't try this at home!!! LOL :)
# shutil.rmtree(os.path.expanduser("~"))
return True
with _TempModule("my_dangerous_filters"), _TempModule(
"my_dangerous_filters.selfDestruct"
) as temp:
temp.module.__dict__["SelfDestructFilter"] = SelfDestructFilter
_, [filter_obj] = loadFilters(ufo)
assert isinstance(filter_obj, SelfDestructFilter)
def test_loadFilters_args_missing(ufo):
del ufo.lib[UFO2FT_FILTERS_KEY][0]["args"]
with pytest.raises(TypeError) as exc_info:
loadFilters(ufo)
assert exc_info.match("missing")
def test_loadFilters_args_unsupported(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["args"].append("baz")
with pytest.raises(TypeError) as exc_info:
loadFilters(ufo)
assert exc_info.match("unsupported")
def test_loadFilters_include_all(ufo):
_, [filter_obj] = loadFilters(ufo)
assert filter_obj.include(MockGlyph(name="hello"))
assert filter_obj.include(MockGlyph(name="world"))
def test_loadFilters_include_list(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["include"] = ["a", "b"]
_, [filter_obj] = loadFilters(ufo)
assert filter_obj.include(MockGlyph(name="a"))
assert filter_obj.include(MockGlyph(name="b"))
assert not filter_obj.include(MockGlyph(name="c"))
def test_loadFilters_exclude_list(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["exclude"] = ["a", "b"]
_, [filter_obj] = loadFilters(ufo)
assert not filter_obj.include(MockGlyph(name="a"))
assert not filter_obj.include(MockGlyph(name="b"))
assert filter_obj.include(MockGlyph(name="c"))
def test_loadFilters_both_include_exclude(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["include"] = ["a", "b"]
ufo.lib[UFO2FT_FILTERS_KEY][0]["exclude"] = ["c", "d"]
with pytest.raises(ValueError) as exc_info:
loadFilters(ufo)
assert exc_info.match("arguments are mutually exclusive")
def test_loadFilters_failed(ufo):
ufo.lib[UFO2FT_FILTERS_KEY].append(dict(name="Non Existent"))
with CapturingLogHandler(logger, level="ERROR") as captor:
loadFilters(ufo)
captor.assertRegex("Failed to load filter")
def test_loadFilters_kwargs_unsupported(ufo):
ufo.lib[UFO2FT_FILTERS_KEY][0]["kwargs"] = {}
ufo.lib[UFO2FT_FILTERS_KEY][0]["kwargs"]["c"] = 1
ufo.lib[UFO2FT_FILTERS_KEY][0]["kwargs"]["d"] = 2 # unknown
with pytest.raises(TypeError) as exc_info:
loadFilters(ufo)
assert exc_info.match("got an unsupported keyword")
def test_BaseFilter_repr():
class NoArgFilter(BaseFilter):
pass
assert repr(NoArgFilter()) == "NoArgFilter()"
assert repr(FooBarFilter("a", "b", c=1)) == ("FooBarFilter('a', 'b', c=1)")
assert (
repr(FooBarFilter("c", "d", include=["x", "y"]))
== "FooBarFilter('c', 'd', c=0, include=['x', 'y'])"
)
assert (
repr(FooBarFilter("e", "f", c=2.0, exclude=("z",)))
== "FooBarFilter('e', 'f', c=2.0, exclude=('z',))"
)
f = lambda g: False
assert repr(
FooBarFilter("g", "h", include=f)
) == "FooBarFilter('g', 'h', c=0, include={})".format(repr(f))
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 112100 if testnet else 12100
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
import sys
import os.path
from pypdevs.infinity import *
from pypdevs.DEVS import AtomicDEVS, CoupledDEVS
class Event(object):
def __init__(self, eventSize):
self.eventSize = eventSize
def __str__(self):
return "Eventsize = " + str(self.eventSize)
class ModelState(object):
def __init__(self):
self.counter = INFINITY
self.event = None
def __str__(self):
return str(self.counter)
def toXML(self):
return "<counter>" + str(self.counter) + "</counter>"
class ProcessorNPP(AtomicDEVS):
def __init__(self, name = "Processor", t_event1 = 1):
AtomicDEVS.__init__(self, name)
self.t_event1 = t_event1
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.state = ModelState()
def timeAdvance(self):
return self.state.counter
def intTransition(self):
self.state.counter -= self.timeAdvance()
if self.state.counter == 0:
self.state.counter = INFINITY
self.state.event = None
return self.state
def extTransition(self, inputs):
self.state.counter -= self.elapsed
ev1 = inputs[self.inport][0]
if ev1 != None:
self.state.event = ev1
self.state.counter = self.t_event1
return self.state
def outputFnc(self):
output = {}
mini = self.state.counter
if self.state.counter == mini:
output[self.outport] = [self.state.event]
return output
class RemoteDCProcessor(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "RemoteDCProcessor")
mod = self.addSubModel(CoupledProcessor(1, 1), 2)
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.connectPorts(self.inport, mod.inport)
self.connectPorts(mod.outport, self.outport)
class Processor(AtomicDEVS):
def __init__(self, name = "Processor", t_event1 = 1):
AtomicDEVS.__init__(self, name)
self.t_event1 = t_event1
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.state = ModelState()
def timeAdvance(self):
return self.state.counter
def intTransition(self):
self.state.counter -= self.timeAdvance()
if self.state.counter == 0:
self.state.counter = INFINITY
self.state.event = None
return self.state
def extTransition(self, inputs):
self.state.counter -= self.elapsed
ev1 = inputs[self.inport][0]
if ev1 != None:
self.state.event = ev1
self.state.counter = self.t_event1
return self.state
def outputFnc(self):
mini = self.state.counter
if self.state.counter == mini:
return {self.outport: [self.state.event]}
else:
return {}
class HeavyProcessor(AtomicDEVS):
def __init__(self, name = "Processor", t_event1 = 1, iterations = 0):
AtomicDEVS.__init__(self, name)
self.t_event1 = t_event1
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.state = ModelState()
self.iterations = iterations
def timeAdvance(self):
return self.state.counter
def intTransition(self):
self.state.counter -= self.timeAdvance()
# Do lots of work now
stupidcounter = 0
for _ in xrange(self.iterations):
pass
if self.state.counter == 0:
self.state.counter = INFINITY
self.state.event = None
return self.state
def extTransition(self, inputs):
self.state.counter -= self.elapsed
ev1 = inputs[self.inport][0]
stupidcounter = 0
for _ in xrange(self.iterations):
pass
if ev1 != None:
self.state.event = ev1
self.state.counter = self.t_event1
return self.state
def outputFnc(self):
mini = self.state.counter
stupidcounter = 0
for _ in xrange(self.iterations):
pass
if self.state.counter == mini:
return {self.outport: [self.state.event]}
class NestedProcessor(Processor):
def __init__(self, name = "NestedProcessor"):
Processor.__init__(self, name)
self.state.processed = 0
self.state.event = Event(5)
def extTransition(self, inputs):
self.state = Processor.extTransition(self, inputs)
self.state.processed += 1
return self.state
def timeAdvance(self):
from pypdevs.simulator import Simulator
model = CoupledGenerator()
sim = Simulator(model)
sim.setTerminationTime(self.state.processed)
#sim.setVerbose(True)
sim.simulate()
result = max(sim.model.generator.state.generated, 1)
return result
class Generator(AtomicDEVS):
def __init__(self, name = "Generator", t_gen_event1 = 1.0, binary = False):
AtomicDEVS.__init__(self, name)
self.state = ModelState()
# Add an extra variable
self.state.generated = 0
self.state.counter = t_gen_event1
self.state.value = 1
self.t_gen_event1 = t_gen_event1
self.outport = self.addOutPort("outport")
self.inport = self.addInPort("inport")
self.binary = binary
def timeAdvance(self):
return self.state.counter
def intTransition(self):
self.state.generated += 1
return self.state
def extTransition(self, inputs):
self.state.counter -= self.elapsed
return self.state
def outputFnc(self):
if self.binary:
return {self.outport: ["b1"]}
else:
return {self.outport: [Event(self.state.value)]}
class GeneratorNPP(AtomicDEVS):
def __init__(self, name = "Generator", t_gen_event1 = 1.0):
AtomicDEVS.__init__(self, name)
self.state = ModelState()
# Add an extra variable
self.state.generated = 0
self.state.counter = t_gen_event1
self.t_gen_event1 = t_gen_event1
self.outport = self.addOutPort("outport")
self.inport = self.addInPort("inport")
def timeAdvance(self):
return self.state.counter
def intTransition(self):
self.state.generated += 1
return self.state
def extTransition(self, inputs):
self.state.counter -= self.elapsed
return self.state
def outputFnc(self):
return {self.outport: [Event(1)]}
class CoupledGenerator(CoupledDEVS):
def __init__(self, t_gen_event1 = 1, binary = False):
CoupledDEVS.__init__(self, "CoupledGenerator")
self.generator = self.addSubModel(Generator("Generator", t_gen_event1, binary))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.connectPorts(self.inport, self.generator.inport)
self.connectPorts(self.generator.outport, self.outport)
class CoupledGeneratorNPP(CoupledDEVS):
def __init__(self, t_gen_event1 = 1):
CoupledDEVS.__init__(self, "CoupledGenerator")
self.generator = self.addSubModel(GeneratorNPP("Generator", t_gen_event1))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.connectPorts(self.inport, self.generator.inport)
self.connectPorts(self.generator.outport, self.outport)
class CoupledHeavyProcessor(CoupledDEVS):
def __init__(self, t_event1_P1, levels, iterations, name = None):
if name == None:
name = "CoupledHeavyProcessor_" + str(levels)
CoupledDEVS.__init__(self, name)
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.coupled = []
for i in range(levels):
self.coupled.append(self.addSubModel(HeavyProcessor("Processor" + str(i), t_event1_P1, iterations)))
for i in range(levels-1):
self.connectPorts(self.coupled[i].outport, self.coupled[i+1].inport)
self.connectPorts(self.inport, self.coupled[0].inport)
self.connectPorts(self.coupled[-1].outport, self.outport)
class CoupledProcessorNPP(CoupledDEVS):
def __init__(self, t_event1_P1, levels):
CoupledDEVS.__init__(self, "CoupledProcessor_" + str(levels))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.coupled = []
for i in range(levels):
self.coupled.append(self.addSubModel(ProcessorNPP("Processor" + str(i), t_event1_P1)))
for i in range(levels-1):
self.connectPorts(self.coupled[i].outport, self.coupled[i+1].inport)
self.connectPorts(self.inport, self.coupled[0].inport)
self.connectPorts(self.coupled[-1].outport, self.outport)
class CoupledProcessor(CoupledDEVS):
def __init__(self, t_event1_P1, levels):
CoupledDEVS.__init__(self, "CoupledProcessor_" + str(levels))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.coupled = []
for i in range(levels):
self.coupled.append(self.addSubModel(Processor("Processor" + str(i), t_event1_P1)))
for i in range(levels-1):
self.connectPorts(self.coupled[i].outport, self.coupled[i+1].inport)
self.connectPorts(self.inport, self.coupled[0].inport)
self.connectPorts(self.coupled[-1].outport, self.outport)
class CoupledProcessorMP(CoupledDEVS):
def __init__(self, t_event1_P1):
CoupledDEVS.__init__(self, "CoupledProcessorMP")
self.inport = self.addInPort("inport")
p1 = self.addSubModel(Processor("Processor1", t_event1_P1))
p2 = self.addSubModel(Processor("Processor2", t_event1_P1))
p3 = self.addSubModel(Processor("Processor3", t_event1_P1))
p4 = self.addSubModel(Processor("Processor4", t_event1_P1))
self.connectPorts(self.inport, p1.inport)
self.connectPorts(self.inport, p3.inport)
self.connectPorts(p1.outport, p2.inport)
self.connectPorts(p3.outport, p4.inport)
class Binary(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Binary")
self.generator = self.addSubModel(CoupledGenerator(1.0, True))
self.processor1 = self.addSubModel(CoupledProcessor(0.6, 2), 2)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3), 1)
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class Binary_local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Binary")
self.generator = self.addSubModel(CoupledGenerator(1.0, True))
self.processor1 = self.addSubModel(CoupledProcessor(0.6, 2))
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class Chain_local(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "Chain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(CoupledProcessor(ta, 2))
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class Chain(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "Chain")
self.generator = self.addSubModel(CoupledGenerator(1.0), 1)
self.processor1 = self.addSubModel(CoupledProcessor(ta, 2), 2)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class Boundary(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Boundary")
self.generator = self.addSubModel(CoupledGenerator(1.0), 1)
self.processor1 = self.addSubModel(CoupledProcessor(0.60, 2), 2)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 4), 3)
self.processor3 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor1.outport, self.processor3.inport)
self.connectPorts(self.processor2.outport, self.processor3.inport)
class Two(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Two")
self.generator = self.addSubModel(CoupledGenerator(1.0), 1)
self.processor1 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
class DualChain_local(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualChain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(CoupledProcessor(ta, 2))
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.generator.outport, self.processor2.inport)
class DualChain(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualChain")
self.generator = self.addSubModel(CoupledGenerator(1.0), 1)
self.processor1 = self.addSubModel(CoupledProcessor(ta, 2), 2)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.generator.outport, self.processor2.inport)
class DualChainMP_local(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualChainMP")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(CoupledProcessorMP(0.66))
self.connectPorts(self.generator.outport, self.processor1.inport)
class DualChainMP(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualChainMP")
self.generator = self.addSubModel(CoupledGenerator(1.0), 1)
self.processor1 = self.addSubModel(CoupledProcessorMP(0.66), 2)
self.connectPorts(self.generator.outport, self.processor1.inport)
class DualDepthProcessor_local(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualDepthProcessor")
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.processor1 = self.addSubModel(CoupledProcessor(ta, 1))
self.processor2 = self.addSubModel(CoupledProcessor(ta, 2))
self.connectPorts(self.inport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor2.outport, self.outport)
class DualDepthProcessor(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualDepthProcessor")
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.processor1 = self.addSubModel(CoupledProcessor(ta, 1), 2)
self.processor2 = self.addSubModel(CoupledProcessor(ta, 2))
self.connectPorts(self.inport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor2.outport, self.outport)
class DualDepth_local(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualDepth")
self.generator = self.addSubModel(Generator("Generator", 1.0))
self.processor = self.addSubModel(DualDepthProcessor_local(ta))
self.connectPorts(self.generator.outport, self.processor.inport)
class DualDepth(CoupledDEVS):
def __init__(self, ta):
CoupledDEVS.__init__(self, "DualDepth")
self.generator = self.addSubModel(Generator("Generator", 1.0))
self.processor = self.addSubModel(DualDepthProcessor(ta), 1)
self.connectPorts(self.generator.outport, self.processor.inport)
class Nested_local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Nested")
self.generator = self.addSubModel(Generator("Generator", 1))
self.processor = self.addSubModel(NestedProcessor("NProcessor"))
self.connectPorts(self.generator.outport, self.processor.inport)
class MultiNested(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "MultiNested")
self.generator = self.addSubModel(Generator("Generator", 1))
self.processor1 = self.addSubModel(NestedProcessor("NProcessor1"), 1)
self.processor2 = self.addSubModel(NestedProcessor("NProcessor2"), 2)
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class Local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Local")
self.generator = self.addSubModel(Generator("Generator", 1))
self.processor1 = self.addSubModel(CoupledProcessor(1, 2))
self.processor2 = self.addSubModel(CoupledProcessor(1, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class OptimizableChain(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "OptimizableChain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(CoupledProcessor(0.66, 2), 1)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3), 2)
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class HugeOptimizableChain(CoupledDEVS):
def __init__(self, iterations):
CoupledDEVS.__init__(self, "HugeOptimizableChain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor0 = self.addSubModel(CoupledHeavyProcessor(0.77, 10, iterations))
self.processor1 = self.addSubModel(CoupledHeavyProcessor(0.66, 10, iterations), 1)
self.processor2 = self.addSubModel(CoupledHeavyProcessor(0.30, 10, iterations), 2)
self.connectPorts(self.generator.outport, self.processor0.inport)
self.connectPorts(self.processor0.outport, self.processor1.inport)
self.connectPorts(self.processor0.outport, self.processor2.inport)
class HugeOptimizableLocalChain(CoupledDEVS):
def __init__(self, iterations):
CoupledDEVS.__init__(self, "HugeOptimizableChain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor0 = self.addSubModel(CoupledHeavyProcessor(0.77, 10, iterations))
self.processor1 = self.addSubModel(CoupledHeavyProcessor(0.66, 10, iterations))
self.processor2 = self.addSubModel(CoupledHeavyProcessor(0.30, 10, iterations))
self.connectPorts(self.generator.outport, self.processor0.inport)
self.connectPorts(self.processor0.outport, self.processor1.inport)
self.connectPorts(self.processor0.outport, self.processor2.inport)
class LocalLong(CoupledDEVS):
def __init__(self, name):
CoupledDEVS.__init__(self, name)
self.generator = self.addSubModel(Generator("Generator", 1))
self.processor1 = self.addSubModel(CoupledProcessor(0.66, 20))
self.connectPorts(self.generator.outport, self.processor1.inport)
class ParallelChain(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ParallelChain")
self.processor1 = self.addSubModel(LocalLong('Local1'), 1)
self.processor2 = self.addSubModel(LocalLong('Local2'), 2)
class ParallelLocalChain(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ParallelLocalChain")
self.processor1 = self.addSubModel(LocalLong('Local1'))
self.processor2 = self.addSubModel(LocalLong('Local2'))
class ChainNoPeekPoke(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ChainNoPeekPoke")
self.generator = self.addSubModel(CoupledGeneratorNPP(1.0))
self.processor1 = self.addSubModel(CoupledProcessorNPP(0.66, 2))
self.processor2 = self.addSubModel(CoupledProcessorNPP(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class ChainPeekPoke(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ChainPeekPoke")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(CoupledProcessor(0.66, 2))
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class AutoDistChain(CoupledDEVS):
def __init__(self, nodes, totalAtomics, iterations):
CoupledDEVS.__init__(self, "AutoDistChain")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processors = []
have = 0
ta = 0.66
for i in range(nodes):
shouldhave = (float(i+1) / nodes) * totalAtomics
num = int(shouldhave - have)
have += num
if i == 0:
self.processors.append(self.addSubModel(CoupledHeavyProcessor(ta, num, iterations, "HeavyProcessor_" + str(i))))
else:
self.processors.append(self.addSubModel(CoupledHeavyProcessor(ta, num, iterations, "HeavyProcessor_" + str(i)), i))
self.connectPorts(self.generator.outport, self.processors[0].inport)
for i in range(len(self.processors)-1):
self.connectPorts(self.processors[i].outport, self.processors[i+1].inport)
class RemoteDC(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Root")
self.generator = self.addSubModel(CoupledGenerator(1.0))
self.processor1 = self.addSubModel(RemoteDCProcessor(), 1)
self.processor2 = self.addSubModel(CoupledProcessor(0.30, 3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
class MultipleInputs(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "MultipleInputs")
self.generator = self.addSubModel(Generator(1.0))
self.processors1 = []
for i in xrange(5):
self.processors1.append(self.addSubModel(Processor("1-" + str(i), 0.3), 1))
self.connectPorts(self.generator.outport, self.processors1[-1].inport)
self.processors2 = []
for i in xrange(2):
self.processors2.append(self.addSubModel(Processor("2-" + str(i), 0.3), 2))
for s in self.processors1:
self.connectPorts(s.outport, self.processors2[-1].inport)
class MultipleInputs_local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "MultipleInputs")
self.generator = self.addSubModel(Generator(1.0))
self.processors1 = []
for i in xrange(5):
self.processors1.append(self.addSubModel(Processor("1-" + str(i), 0.3)))
self.connectPorts(self.generator.outport, self.processors1[-1].inport)
self.processors2 = []
for i in xrange(2):
self.processors2.append(self.addSubModel(Processor("2-" + str(i), 0.3)))
for s in self.processors1:
self.connectPorts(s.outport, self.processors2[-1].inport)
class DoubleLayer1(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Layer1")
self.inport = self.addInPort("inport")
self.processor = self.addSubModel(Processor("Processor", 0.3))
self.connectPorts(self.inport, self.processor.inport)
class DoubleLayer2(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Layer2")
self.lower = self.addSubModel(DoubleLayer1())
self.inport1 = self.addInPort("inport1")
self.inport2 = self.addInPort("inport2")
self.connectPorts(self.inport1, self.lower.inport)
self.connectPorts(self.inport2, self.lower.inport)
class DoubleLayerRoot(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Root")
self.lower = self.addSubModel(DoubleLayer2())
self.generator = self.addSubModel(Generator("Generator", 1))
self.connectPorts(self.generator.outport, self.lower.inport1)
self.connectPorts(self.generator.outport, self.lower.inport2)
class DSDEVSRoot(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Root")
self.submodel = self.addSubModel(GeneratorDS())
self.submodel2 = self.addSubModel(Processor())
self.submodel3 = self.addSubModel(Processor())
self.connectPorts(self.submodel.outport, self.submodel2.inport)
self.connectPorts(self.submodel2.outport, self.submodel3.inport)
self.connectPorts(self.submodel.outport, self.submodel3.inport)
def modelTransition(self, state):
self.removeSubModel(self.submodel2)
self.submodel2 = self.addSubModel(Processor())
self.connectPorts(self.submodel2.outport, self.submodel3.inport)
self.submodel4 = self.addSubModel(CoupledProcessor(0.2, 3))
self.connectPorts(self.submodel3.outport, self.submodel4.inport)
self.submodelX = self.addSubModel(ElapsedNothing())
return False
class ElapsedNothing(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "ElapsedNothing")
self.elapsed = 0.3
self.state = 1
def intTransition(self):
return 0
def timeAdvance(self):
return self.state if self.state > 0 else float('inf')
class GeneratorDS(Generator):
def __init__(self):
Generator.__init__(self, "GEN")
self.elapsed = 0.5
def outputFnc(self):
if self.state.generated < 1:
return Generator.outputFnc(self)
else:
return {}
def modelTransition(self, state):
if self.state.generated == 1:
self.removePort(self.outport)
del self.outport
return self.state.generated == 1
class ClassicGenerator(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "Generator")
self.state = None
self.outport = self.addOutPort("outport")
def intTransition(self):
return None
def outputFnc(self):
return {self.outport: 1}
def timeAdvance(self):
return 1
class ClassicProcessor(AtomicDEVS):
def __init__(self, name):
AtomicDEVS.__init__(self, "Processor_%s" % name)
self.state = None
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
def intTransition(self):
return None
def outputFnc(self):
return {self.outport: self.state}
def extTransition(self, inputs):
self.state = inputs[self.inport]
return self.state
def timeAdvance(self):
return (1.0 if self.state is not None else INFINITY)
class ClassicCoupledProcessor(CoupledDEVS):
def __init__(self, it, namecounter):
CoupledDEVS.__init__(self, "CoupledProcessor_%s_%s" % (it, namecounter))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
if it != 0:
self.subproc = self.addSubModel(ClassicCoupledProcessor(it-1, 0))
else:
self.subproc = self.addSubModel(ClassicProcessor(0))
self.subproc2 = self.addSubModel(ClassicProcessor(1))
if it != 0:
self.subproc3 = self.addSubModel(ClassicCoupledProcessor(it-1, 2))
else:
self.subproc3 = self.addSubModel(ClassicProcessor(2))
self.connectPorts(self.inport, self.subproc.inport)
self.connectPorts(self.subproc.outport, self.subproc2.inport)
self.connectPorts(self.subproc2.outport, self.subproc3.inport)
self.connectPorts(self.subproc3.outport, self.outport)
def select(self, immChildren):
if self.subproc3 in immChildren:
return self.subproc3
elif self.subproc2 in immChildren:
return self.subproc2
elif self.subproc in immChildren:
return self.subproc
else:
return immChildren[0]
class ClassicCoupled(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Coupled")
self.generator = self.addSubModel(ClassicGenerator())
self.processor = self.addSubModel(ClassicCoupledProcessor(3, 0))
self.connectPorts(self.generator.outport, self.processor.inport)
def select(self, immChildren):
if self.processor in immChildren:
return self.processor
else:
return immChildren[0]
class RandomProcessorState(object):
def __init__(self, seed):
from pypdevs.randomGenerator import RandomGenerator
self.randomGenerator = RandomGenerator(seed)
self.queue = []
self.proctime = self.randomGenerator.uniform(0.3, 3.0)
def __str__(self):
return "Random Processor State -- " + str(self.proctime)
class RandomProcessor(AtomicDEVS):
def __init__(self, seed):
AtomicDEVS.__init__(self, "RandomProcessor_" + str(seed))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.state = RandomProcessorState(seed)
def intTransition(self):
self.state.queue = self.state.queue[1:]
self.state.proctime = self.state.randomGenerator.uniform(0.3, 3.0)
return self.state
def extTransition(self, inputs):
if self.state.queue:
self.state.proctime -= self.elapsed
self.state.queue.extend(inputs[self.inport])
return self.state
def outputFnc(self):
return {self.outport: [self.state.queue[0]]}
def timeAdvance(self):
if self.state.queue:
return self.state.proctime
else:
return INFINITY
class RandomCoupled(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Coupled")
self.generator = self.addSubModel(Generator())
self.processor1 = self.addSubModel(RandomProcessor(1), 1)
self.processor2 = self.addSubModel(RandomProcessor(2), 2)
self.processor3 = self.addSubModel(RandomProcessor(3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor2.outport, self.processor3.inport)
class RandomCoupled_local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Coupled")
self.generator = self.addSubModel(Generator())
self.processor1 = self.addSubModel(RandomProcessor(1))
self.processor2 = self.addSubModel(RandomProcessor(2))
self.processor3 = self.addSubModel(RandomProcessor(3))
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor2.outport, self.processor3.inport)
class Chain_bad(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Chain")
self.generator = self.addSubModel(CoupledGenerator(1.0), 0)
self.processor1 = self.addSubModel(CoupledProcessor(0.66, 2), 1)
self.processor2 = self.addSubModel(CoupledProcessor(0.66, 3), 2)
self.processor3 = self.addSubModel(CoupledProcessor(0.66, 4), 1)
self.processor4 = self.addSubModel(CoupledProcessor(0.66, 5), 0)
self.processor5 = self.addSubModel(CoupledProcessor(0.30, 6), 2)
self.connectPorts(self.generator.outport, self.processor1.inport)
self.connectPorts(self.processor1.outport, self.processor2.inport)
self.connectPorts(self.processor2.outport, self.processor3.inport)
self.connectPorts(self.processor3.outport, self.processor4.inport)
self.connectPorts(self.processor4.outport, self.processor5.inport)
class GeneratorClassic(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "Gen")
self.outport = self.addOutPort("outport")
self.state = True
def intTransition(self):
return False
def outputFnc(self):
return {self.outport: 3}
def timeAdvance(self):
return 1.0 if self.state else INFINITY
class ProcessorClassic1(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "P1")
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.state = None
def intTransition(self):
return None
def extTransition(self, inputs):
return inputs[self.inport]
def outputFnc(self):
return {self.outport: self.state}
def timeAdvance(self):
return 1.0 if self.state is not None else INFINITY
class ProcessorClassic2(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "P2")
self.inport1 = self.addInPort("inport1")
self.inport2 = self.addInPort("inport2")
self.outport = self.addOutPort("outport")
self.state = (None, None)
def intTransition(self):
return (None, None)
def extTransition(self, inputs):
inp1 = inputs.get(self.inport1, None)
inp2 = inputs.get(self.inport2, None)
return (inp1, inp2)
def outputFnc(self):
return {self.outport: self.state}
def timeAdvance(self):
return 1.0 if self.state[0] is not None or self.state[1] is not None else INFINITY
class ProcessorClassicO2(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "PO2")
self.inport = self.addInPort("inport")
self.outport1 = self.addOutPort("outport1")
self.outport2 = self.addOutPort("outport2")
self.state = None
def intTransition(self):
return None
def extTransition(self, inputs):
return inputs[self.inport]
def outputFnc(self):
return {self.outport1: self.state, self.outport2: self.state}
def timeAdvance(self):
return 1.0 if self.state is not None else INFINITY
class ProcessorCoupledClassic(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Coupled")
self.inport1 = self.addInPort("inport1")
self.inport2 = self.addInPort("inport2")
self.outport = self.addOutPort("outport")
self.proc1 = self.addSubModel(ProcessorClassic1())
self.proc2 = self.addSubModel(ProcessorClassic1())
self.connectPorts(self.inport1, self.proc1.inport)
self.connectPorts(self.inport2, self.proc2.inport)
self.connectPorts(self.proc1.outport, self.outport)
self.connectPorts(self.proc2.outport, self.outport)
class AllConnectClassic(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "Root")
self.model1 = self.addSubModel(GeneratorClassic())
self.model2 = self.addSubModel(ProcessorCoupledClassic())
self.model3 = self.addSubModel(ProcessorClassic2())
self.model4 = self.addSubModel(ProcessorClassic1())
self.model5 = self.addSubModel(ProcessorClassicO2())
self.connectPorts(self.model1.outport, self.model2.inport1)
self.connectPorts(self.model1.outport, self.model2.inport2)
self.connectPorts(self.model2.outport, self.model3.inport1)
self.connectPorts(self.model2.outport, self.model3.inport2)
self.connectPorts(self.model3.outport, self.model5.inport)
self.connectPorts(self.model2.outport, self.model4.inport)
self.connectPorts(self.model4.outport, self.model5.inport)
def trans1(inp):
inp.eventSize += 1
return inp
def trans2(inp):
inp.eventSize = 0
return inp
class ZCoupledProcessor(CoupledDEVS):
def __init__(self, num):
CoupledDEVS.__init__(self, "CoupledProcessor_" + str(num))
self.inport = self.addInPort("inport")
self.outport = self.addOutPort("outport")
self.coupled = []
levels = 4
for i in range(levels):
self.coupled.append(self.addSubModel(Processor("Processor" + str(i), 1.0)))
for i in range(levels-1):
self.connectPorts(self.coupled[i].outport, self.coupled[i+1].inport, trans1)
self.connectPorts(self.inport, self.coupled[0].inport)
self.connectPorts(self.coupled[-1].outport, self.outport)
class ZChain_local(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ROOT")
self.gen = self.addSubModel(Generator())
self.proc1 = self.addSubModel(ZCoupledProcessor(1))
self.proc2 = self.addSubModel(ZCoupledProcessor(2))
self.connectPorts(self.gen.outport, self.proc1.inport)
self.connectPorts(self.gen.outport, self.proc2.inport, trans2)
class ZChain(CoupledDEVS):
def __init__(self):
CoupledDEVS.__init__(self, "ROOT")
self.gen = self.addSubModel(Generator())
self.proc1 = self.addSubModel(ZCoupledProcessor(1), 1)
self.proc2 = self.addSubModel(ZCoupledProcessor(2), 2)
self.connectPorts(self.gen.outport, self.proc1.inport)
self.connectPorts(self.gen.outport, self.proc2.inport, trans2)
|
|
import turtle
import random
turtle.reset()
# Set up the window
turtle.setup(600, 460)
turtle.bgpic("candy_crush_bg.gif")
# Turn off the turtle animations
turtle.tracer(False)
# Variables storing the total numbers of candies in a row/column
# Candies are at coordinate (X, Y) where X is an integer in the range 0...6 and Y is an integer in the range 0...6
num_of_candies_in_row = 7
num_of_candies_in_col = 7
# Variable storing the size of the candies
candy_size = 60
# Variable storing the speed of the candy turtles
candy_speed = 7
# A list storing the file names of the candy images
candy_image = ["candy0.gif", "candy1.gif", "candy2.gif",
"candy3.gif", "candy4.gif", "candy5.gif"]
# Variable storing the total number of types of candies
num_of_candy_type = len(candy_image)
# Variable storing the special candy type value (-1), representing no candy at that location
nothing_in_cell = -1
# A 2D list storing the candy locations on the screen
candy_pos = [[(-110, -180), (-50, -180), (10, -180), (70, -180), (130, -180), (190, -180), (250, -180)],
[(-110, -120), (-50, -120), (10, -120), (70, -120), (130, -120), (190, -120), (250, -120)],
[(-110, -60), (-50, -60), (10, -60), (70, -60), (130, -60), (190, -60), (250, -60)],
[(-110, 0), (-50, 0), (10, 0), (70, 0), (130, 0), (190, 0), (250, 0)],
[(-110, 60), (-50, 60), (10, 60), (70, 60), (130, 60), (190, 60), (250, 60)],
[(-110, 120), (-50, 120), (10, 120), (70, 120), (130, 120), (190, 120), (250, 120)],
[(-110, 180), (-50, 180), (10, 180), (70, 180), (130, 180), (190, 180), (250, 180)]]
# A 2D list storing the candy types at the corresponding locations in the play area
candy_map = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
# A 2D list storing the "candies" turtles
# It will be initialized later and will have the same structure as candy_map
candies = []
# Variables storing the row number and column number of the firstly-clicked candy (for candy swapping)
firstly_clicked_candy_row = -1
firstly_clicked_candy_col = -1
# Variable indicating whether the firstly-clicked candy is clicked
# If it is False, that means the user has yet to click on the first candy for swapping;
# If it is True, that means the user has already clicked on the first candy for swapping,
# so the next candy to be clicked is the second candy for swapping
first_candy_clicked = False
# Variable storing the minimum number of adjacent candies needed to form a matched group
minimum_num_of_candies_required_for_matching = 3
# Variables storing the matched candies locations which is detected by the find_any_matched_group function
matched_group = []
# Variable indicating whether the game is over
end = False
# Variable used by turtle.ontimer() for some delay between two executions of the handle_matched_candies function
delay = 250
# Variable storing the player's score
score = 0
# Turtle showing the player's score
score_turtle = turtle.Turtle()
score_turtle.hideturtle()
score_turtle.up()
score_turtle.goto(-220, -170)
score_turtle.pencolor("gray20")
# Variable storing the time left (in seconds)
time_left = 60
# Turtle showing the time left
time_turtle = turtle.Turtle()
time_turtle.hideturtle()
time_turtle.up()
time_turtle.pencolor("gray20")
time_turtle.goto(-220, -65)
# Add shapes from the gif image files
for i in range(num_of_candy_type):
turtle.addshape(candy_image[i])
def generate_candies_turtles():
# This function uses a for-loop to generate a list of lists of turtles
# with the same structure as candy_map to represent all the candies
global candies
for row in range(num_of_candies_in_row):
# Create an empty list for storing the candy turtles
candy_list = []
for col in range(num_of_candies_in_col):
# Create one turtle for one candy
one_candy = turtle.Turtle()
# Make the turtle move in a different speed
one_candy.speed(candy_speed)
# Move the turtle to the appropriate location and make it face down
# (instead of the default east direction)
one_candy.up()
one_candy.right(90)
one_candy.goto(candy_pos[row][col])
# Append the turtle to the candy_list
candy_list.append(one_candy)
# Add candy_list, which is a list of 7 candy turtles, to another list
# called candies to produce a list of lists of turtles (i.e. a 2D list)
candies.append(candy_list)
def generate_candy_map():
# This function generates the candy map by filling random numbers
# (representing random candy types) into the candy_map 2D list
global candy_map
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
# Randomly generate an integer to represent a random candy type
candy_map[row][col] = random.randrange(num_of_candy_type)
# Regenerate the number until there is no matched group of candies
# as the game should start with no matched group of candies
while len(find_any_matched_group()) != 0:
candy_map[row][col] = random.randrange(num_of_candy_type)
def countdown():
# This function updates the "time left" display and make the game over when the time is up
global time_left
# Clear the previous "time left" display
time_turtle.clear()
time_turtle.write(str(time_left), align="center", font=("Comic Sans MS", 16, "bold"))
# If there is still some time left, schedule another execution of this function
if time_left > 0:
time_left -= 1
turtle.ontimer(countdown, 1000)
# If the time is up, the game should be over, schedule the execution of game_over
else:
turtle.ontimer(game_over, 1000)
def candy(row, col):
# This function returns the candy type at the location (row, col)
# If (row, col) is out of range, nothing_in_cell is returned
# If the numbers of row and column are out of range, return nothing_in_cell
if row < 0 or row >= num_of_candies_in_row or col < 0 or col >= num_of_candies_in_col:
return nothing_in_cell
# If not, return the requested value
else:
return candy_map[row][col]
def swap_candies(first_candy_row, first_candy_col, second_candy_row, second_candy_col):
# This function handles the swap candies animation between neighbouring candies
# Save the locations
first_candy_pos = candies[first_candy_row][first_candy_col].pos()
second_candy_pos = candies[second_candy_row][second_candy_col].pos()
turtle.tracer(True)
# Move the first turtle
candies[first_candy_row][first_candy_col].goto(second_candy_pos)
# Move the second turtle
candies[second_candy_row][second_candy_col].goto(first_candy_pos)
turtle.tracer(False)
# Move the turtles back to their original positions
candies[first_candy_row][first_candy_col].goto(first_candy_pos)
candies[second_candy_row][second_candy_col].goto(second_candy_pos)
def highlight_candy(row, col):
# This function highlights the candy at (row, col) using a box
global candies
# Save the current position and heading of the candy
pos = candies[row][col].pos()
heading = candies[row][col].heading()
# Draw a box around it
candies[row][col].setpos(pos[0] - candy_size / 2, pos[1] + candy_size / 2)
candies[row][col].down()
for _ in range(4):
candies[row][col].forward(candy_size)
candies[row][col].left(90)
candies[row][col].up()
# Restore the position and heading of the candy
candies[row][col].setpos(pos)
candies[row][col].setheading(heading)
def is_neighbor(first_candy_row, first_candy_col, second_candy_row, second_candy_col):
# This function checks whether two candies are next to each other
# Input parameters, in this order, are:
# - row of firstly-clicked candy
# - column of firstly-clicked candy
# - row of secondly-clicked candy
# - column of secondly-clicked candy
# It returns True (candies are next to each other) or False (candies are not next to each other)
# If the clicked candies are in the same row, check whether their column numbers are adjacent
if first_candy_row == second_candy_row and \
(first_candy_col == second_candy_col - 1 or first_candy_col == second_candy_col + 1):
return True
# If the clicked candies are in the same column, check whether their row numbers are adjacent
if first_candy_col == second_candy_col and \
(first_candy_row == second_candy_row - 1 or first_candy_row == second_candy_row + 1):
return True
# If the clicked candies are not next to each other, the function returns False
return False
def select_candies(x, y):
# This function is run when any candies' turtle is clicked. It handles the swapping of candies
# Input parameters: x, y
# - the x, y coordinates of the clicked location
global firstly_clicked_candy_row, firstly_clicked_candy_col, candy_map, first_candy_clicked
# Get the row and column numbers of the clicked candy
currently_clicked_candy_row = round((y - candy_pos[0][0][1]) / candy_size)
currently_clicked_candy_col = round((x - candy_pos[0][0][0]) / candy_size)
# If the player just clicked on the second candy (i.e. first_candy_clicked is already set to True)
if first_candy_clicked:
# If the clicked candies are next to each other
if is_neighbor(firstly_clicked_candy_row, firstly_clicked_candy_col, currently_clicked_candy_row, currently_clicked_candy_col):
# Swap the values of the two clicked candies in candy_map
candy_map[firstly_clicked_candy_row][firstly_clicked_candy_col], candy_map[currently_clicked_candy_row][currently_clicked_candy_col] = \
candy_map[currently_clicked_candy_row][currently_clicked_candy_col], candy_map[firstly_clicked_candy_row][firstly_clicked_candy_col]
# If there is any matched group of candies handle the swapping and matching
if len(find_any_matched_group()) > 0:
swap_candies(firstly_clicked_candy_row, firstly_clicked_candy_col, currently_clicked_candy_row, currently_clicked_candy_col)
display_candies()
disable_clicking()
turtle.ontimer(handle_matched_candies, delay)
# If not, undo the swap
else:
candy_map[firstly_clicked_candy_row][firstly_clicked_candy_col], candy_map[currently_clicked_candy_row][currently_clicked_candy_col] = \
candy_map[currently_clicked_candy_row][currently_clicked_candy_col], candy_map[firstly_clicked_candy_row][firstly_clicked_candy_col]
# Clear the candy highlight
candies[firstly_clicked_candy_row][firstly_clicked_candy_col].clear()
# Restore the variables to their original values, so that the player can select another pair of candies for swapping
first_candy_clicked = False
firstly_clicked_candy_row = -1
firstly_clicked_candy_col = -1
# If the player just clicked on the first candy (i.e. first_candy_clicked is False before clicking)
else:
# Store True in first_candy_clicked to indicate that the first candy is clicked
first_candy_clicked = True
# Store coordinates of the firstly-clicked candy in the corresponding variables
firstly_clicked_candy_row = currently_clicked_candy_row
firstly_clicked_candy_col = currently_clicked_candy_col
# Highlight the candy
highlight_candy(firstly_clicked_candy_row, firstly_clicked_candy_col)
def find_any_matched_group():
# This function finds out a group of matched candies (i.e.
# "minimum_num_of_candies_required_for_matching" or more adjacent
# candies) and stores the results in the corresponding variables
matched_group = []
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
# No need to check if there is no candy here
if candy(row, col) == nothing_in_cell:
continue
# Find the row below and above which matches the candy in a column
matching_row_below = row
while candy(row, col) == candy(matching_row_below, col):
matching_row_below -= 1
matching_row_above = row
while candy(row, col) == candy(matching_row_above, col):
matching_row_above += 1
# If the candy is within a matching group, add it to the list
if matching_row_above - matching_row_below - 1 >= minimum_num_of_candies_required_for_matching:
matched_group.append((row, col))
continue
# Find the column on the left and right which matches the candy in a row
matching_col_left = col
while candy(row, col) == candy(row, matching_col_left):
matching_col_left -= 1
matching_col_right = col
while candy(row, col) == candy(row, matching_col_right):
matching_col_right += 1
# If the candy is within a matching group, add it to the list
if matching_col_right - matching_col_left - 1 >= minimum_num_of_candies_required_for_matching:
matched_group.append((row, col))
return matched_group
def eliminate_matched_candies():
# This function eliminates the matched candies in candy_map
global candy_map
# Change the values in candy_map of the matched candies to nothing_in_cell
for row, col in matched_group:
candy_map[row][col] = nothing_in_cell
def update_score(num_of_matched_candies):
# This function calculates the score and updates the score display
global score
# Calculate the new score
score += num_of_matched_candies * 100
# Clear the previous score and write the new one
score_turtle.clear()
score_turtle.goto(-220, -135)
score_turtle.write("Score:", align="center", font=("Comic Sans MS", 16, "bold"))
score_turtle.goto(-220, -175)
score_turtle.write(str(score), align="center", font=("Comic Sans MS", 16, "bold"))
def move_candies_down_to_fill_gap():
# This function moves candies down from the top to fill the gap ('empty' space)
# produced by the matched candies which have just been eliminated
global candy_map
# Replace the 'empty' space with candies from above in a column
for col in range(num_of_candies_in_col):
# Find the first empty row from below
start_empty_row = 0
while candy(start_empty_row, col) != nothing_in_cell:
start_empty_row += 1
# Find the first empty row from above
end_empty_row = num_of_candies_in_row - 1
while candy(end_empty_row, col) != nothing_in_cell:
end_empty_row -= 1
# If there is a empty gap in the column
if end_empty_row >= start_empty_row:
rows_to_fill = end_empty_row - start_empty_row + 1
for row in range(start_empty_row, num_of_candies_in_row):
# Replace the space with candy from above
candy_map[row][col] = candy(row + rows_to_fill, col)
# Move the candy to prepare for the falling down animation
candies[row][col].backward(candy_size * rows_to_fill)
update_candy_type()
display_candies()
# Move the involved candies down with tracer set to True for the "new candies falling down from the top" animation
turtle.tracer(True)
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
pos = candies[row][col].pos()
# If the candy is not in the correct position, move it
if pos != candy_pos[row][row]:
candies[row][col].goto(candy_pos[row][col])
turtle.tracer(False)
def update_candy_type():
# This function generates new candies for the 'empty' space in the candy_map
global candy_map
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
# Check if the candy is 'empty', and hence new candy generation is required
if candy_map[row][col] == nothing_in_cell:
# We generate a random number so that
# 1) there is a 50% chance that we simply fill the empty space with any type of candy without any consideration (when rand is 0)
# 2) there is a 50% chance that we generate a candy type with some consideration (refer to the following part) (when rand is 1)
rand = random.randrange(2)
# If rand equals to 1 (True)
if rand:
# The following codes check whether there is a suitable candy type for candy_map[row][col] by
# checking the candy types of the candies around it. If a particular type of candy assigned can be
# swapped to any adjacent location to generate a matched group, we should assign that
# candy type to candy_map[row][col] to increase the number of possible matched groups
if (candy(row-1, col-1) == candy(row-1, col+1)) and (candy(row-1, col-1) != -1):
candy_map[row][col] = candy(row-1, col-1)
elif (candy(row+1, col-1) == candy(row+1, col+1)) and (candy(row+1, col-1) != -1):
candy_map[row][col] = candy(row+1, col-1)
elif (candy(row-2, col) == candy(row-3, col)) and (candy(row-2, col) != -1):
candy_map[row][col] = candy(row-2, col)
elif (candy(row+2, col) == candy(row+3, col)) and (candy(row+2, col) != -1):
candy_map[row][col] = candy(row+2, col)
elif (candy(row, col-2) == candy(row, col-3)) and (candy(row, col-2) != -1):
candy_map[row][col] = candy(row, col-2)
elif (candy(row, col+2) == candy(row, col+3)) and (candy(row, col+2) != -1):
candy_map[row][col] = candy(row, col+2)
elif (candy(row-1, col-1) == candy(row-1, col-2)) and (candy(row-1, col-1) != -1):
candy_map[row][col] = candy(row-1, col-1)
elif (candy(row-1, col+1) == candy(row-1, col+2)) and (candy(row-1, col+1) != -1):
candy_map[row][col] = candy(row-1, col+1)
elif (candy(row+1, col-1) == candy(row+1, col-2)) and (candy(row+1, col-1) != -1):
candy_map[row][col] = candy(row+1, col-1)
elif (candy(row+1, col+1) == candy(row+1, col+2)) and (candy(row+1, col+1) != -1):
candy_map[row][col] = candy(row+1, col+1)
elif (candy(row-1, col-1) == candy(row-2, col-1)) and (candy(row-1, col-1) != -1):
candy_map[row][col] = candy(row-1, col-1)
elif (candy(row+1, col-1) == candy(row+2, col-1)) and (candy(row+1, col-1) != -1):
candy_map[row][col] = candy(row+1, col-1)
elif (candy(row-1, col+1) == candy(row-2, col+1)) and (candy(row-1, col+1) != -1):
candy_map[row][col] = candy(row-1, col+1)
elif (candy(row+1, col+1) == candy(row+2, col+1)) and (candy(row+1, col+1) != -1):
candy_map[row][col] = candy(row+1, col+1)
else:
# Just fill the space with any type of candy
candy_map[row][col] = random.randrange(num_of_candy_type)
#If rand equals to 0 (False)
else:
# Just fill the space with any type of candy
candy_map[row][col] = random.randrange(num_of_candy_type)
def display_candies():
# This function updates the visual display of the candies
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
# Change the shape of the turtle according to the candy type
candies[row][col].shape(candy_image[candy(row, col)])
turtle.update()
def enable_clicking():
# This function enables the onclick event handlers of the 'candies' turtles
global candies
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
candies[row][col].onclick(select_candies)
def disable_clicking():
# This function disables the onclick event handlers of the 'candies' turtles
global candies
for row in range(num_of_candies_in_row):
for col in range(num_of_candies_in_col):
candies[row][col].onclick(None)
def handle_matched_candies():
# This function calls other functions to make an effect of
# clearing the matched candies, moving down the candies to fill the
# gap ('empty' space) created, generating new candies at the top
# and then displaying the new candies on the screen
global matched_group
if not end:
matched_group = find_any_matched_group()
num_of_matched_candies = len(matched_group)
update_score(num_of_matched_candies)
eliminate_matched_candies()
move_candies_down_to_fill_gap()
# Check if there is any more matched group
# If yes, call this function again for further processing
# of those matched groups
if len(find_any_matched_group()) > 0:
turtle.ontimer(handle_matched_candies, delay)
else:
enable_clicking()
def game_over():
# This function disables the click events of the 'candies' turtles and shows a message when the game is over
global end
end = True
message = "Your final score was " + str(score) + "!"
turtle.up()
turtle.goto(1, -2)
turtle.pencolor("black")
turtle.write(message, align="center", font=("Arial", 30, "bold"))
turtle.goto(0, 0)
turtle.pencolor("white")
turtle.write(message, align="center", font=("Arial", 30, "bold"))
disable_clicking()
def start_game(x, y):
# This function is called when the player clicks on the start button to start the game
# It calls other functions for preparing and starting the game
# Clear the instruction screen first
instruction_turtle.clear()
instruction_turtle.pencolor("gray20")
instruction_turtle.goto(-220, -25)
instruction_turtle.write("Time:", align="center", font=("Comic Sans MS", 16, "bold"))
turtle.onscreenclick(None)
# Then, call other functions to start the game
generate_candies_turtles()
generate_candy_map()
display_candies()
enable_clicking()
countdown()
update_score(0)
# The following is for drawing the instruction screen with a start button
# Hide the default turtle
turtle.hideturtle()
# Write the game instructions with shadow
def write_instruction(offsetx, offsety, color):
instructions = [["Remove candies by matching three or",
"more candies of the same colour in",
"either a row or a column"],
["Swap any two adjacent candies by",
"clicking on them"],
["The larger number of matched candies",
"the higher mark you get"]]
instruction_turtle.goto(-120 + offsetx, 160 + offsety)
instruction_turtle.color(color)
for instruction in instructions:
for line in instruction:
instruction_turtle.write(line, font=("Comic Sans MS", 15, "bold"))
instruction_turtle.sety(instruction_turtle.ycor() - 30)
instruction_turtle.sety(instruction_turtle.ycor() - 20)
instruction_turtle.goto(70, instruction_turtle.ycor() - 30)
instruction_turtle.write("Click anywhere on", align="center", font=("Comic Sans MS", 20, "bold"))
instruction_turtle.sety(instruction_turtle.ycor() - 40)
instruction_turtle.write("the screen to start!", align="center", font=("Comic Sans MS", 20, "bold"))
instruction_turtle = turtle.Turtle()
instruction_turtle.up()
instruction_turtle.hideturtle()
write_instruction(1, -2, "black")
write_instruction(0, 0, "yellow")
# Set up the event handler for the game start
turtle.onscreenclick(start_game)
# Update the screen to show everything we instructed the turtle to draw so far
turtle.update()
# The player can now interact with the game interface via the event handlers to play the game
turtle.done()
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from collections import OrderedDict
from urllib.parse import unquote
HEAD_PROPERTIES = { # Convert response headers to properties.
'Last-Modified': 'lastModified',
'ocp-creation-time': 'creationTime',
'ocp-batch-file-isdirectory': 'isDirectory',
'ocp-batch-file-url': 'url',
'ocp-batch-file-mode': 'fileMode',
'Content-Length': 'contentLength',
'Content-Type': 'contentType'
}
def _file_list_table_format(result):
"""Format file list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Name'] = item['name']
table_row['URL'] = item['url']
table_row['Is Directory'] = str(item['isDirectory'])
table_row['Content Length'] = str(item['properties']['contentLength']) \
if item['properties'] else ""
table_row['Creation Time'] = item['properties']['creationTime'] \
if item['properties'] else ""
table_output.append(table_row)
return table_output
def _account_key_table_format(result):
"""Format account keys as a table."""
table_output = []
table_row = OrderedDict()
table_row['Number'] = 'Primary'
table_row['Key'] = result['primary']
table_output.append(table_row)
table_row = OrderedDict()
table_row['Number'] = 'Secondary'
table_row['Key'] = result['secondary']
table_output.append(table_row)
return table_output
def transform_response_headers(result):
"""Extract and format file property headers from ClientRawResponse object"""
properties = {HEAD_PROPERTIES[k]: v for k, v in result.headers.items()
if k in HEAD_PROPERTIES}
if properties.get('url'):
properties['url'] = unquote(properties['url'])
return properties
def task_file_list_table_format(result):
"""Format task file list as a table."""
return _file_list_table_format(result)
def node_file_list_table_format(result):
"""Format node file list as a table."""
return _file_list_table_format(result)
def application_list_table_format(result):
"""Format application list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Id'] = item['id']
table_row['Default Version'] = item['defaultVersion']
table_row['Allow Updates'] = item['allowUpdates']
table_row['Version Count'] = str(len(item['packages'])) if item['packages'] else '0'
table_output.append(table_row)
return table_output
def application_summary_list_table_format(result):
"""Format application summary list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Application Id'] = item['id']
table_row['Display Name'] = item['displayName']
table_row['Versions'] = json.dumps(item['versions'])
table_output.append(table_row)
return table_output
def account_list_table_format(result):
"""Format account list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Name'] = item['name']
table_row['Location'] = item['location']
table_row['Resource Group'] = item['resourceGroup']
table_output.append(table_row)
return table_output
def account_keys_list_table_format(result):
"""Format account keys list as a table."""
return _account_key_table_format(result)
def account_keys_renew_table_format(result):
"""Format account keys renew as a table."""
return _account_key_table_format(result)
def certificate_list_table_format(result):
"""Format certificate list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Thumbprint'] = item['thumbprint']
table_row['State'] = item['state']
table_row['Previous State'] = item['previousState']
table_row['Deletion Error'] = 'True' if item['deleteCertificateError'] else 'False'
table_output.append(table_row)
return table_output
def job_list_table_format(result):
"""Format job list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Job Id'] = item['id']
table_row['State'] = item['state']
table_row['Previous State'] = item['previousState']
table_row['Execution Pool'] = item['executionInfo']['poolId'] \
if item['executionInfo'] else ""
table_output.append(table_row)
return table_output
def job_prep_release_status_list_table_format(result):
"""Format job prep-release-status list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Node Id'] = item['nodeId']
table_row['Job Prep State'] = item['jobPreparationTaskExecutionInfo']['state'] \
if item['jobPreparationTaskExecutionInfo'] else ""
table_row['Job Release State'] = item['jobReleaseTaskExecutionInfo']['state'] \
if item['jobReleaseTaskExecutionInfo'] else ""
table_output.append(table_row)
return table_output
def job_schedule_list_table_format(result):
"""Format job-schedule list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Job Schedule Id'] = item['id']
table_row['State'] = item['state']
table_row['Previous State'] = item['previousState']
table_output.append(table_row)
return table_output
def node_list_table_format(result):
"""Format node list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Node Id'] = item['id']
table_row['State'] = item['state']
table_row['VM Size'] = item['vmSize']
table_row['IP Address'] = item['ipAddress']
table_output.append(table_row)
return table_output
def pool_node_agent_skus_list_table_format(result):
"""Format pool node-agent-skus list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Agent Id'] = item['id']
table_row['Publisher'] = item['publisher']
table_row['Offer'] = item['offer']
table_row['Sku'] = item['sku']
table_output.append(table_row)
return table_output
def pool_list_table_format(result):
"""Format pool list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['id']
table_row['State'] = item['state']
table_row['Allocation State'] = item['allocationState']
table_row['VM Size'] = item['vmSize']
table_row['Dedicated VM Count'] = item['currentDedicatedNodes']
table_row['Low Priority VM Count'] = item['currentLowPriorityNodes']
table_row['Type'] = 'IaaS' if item['virtualMachineConfiguration'] else 'PaaS'
table_output.append(table_row)
return table_output
def pool_usage_metrics_list_table_format(result):
"""Format pool usage-metrics list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Start Time'] = item['startTime'] if item['startTime'] else ""
table_row['End Time'] = item['endTime'] if item['endTime'] else ""
table_row['VM Size'] = item['vmSize']
table_row['Total Core Hours'] = str(item['totalCoreHours'])
table_output.append(table_row)
return table_output
def task_list_table_format(result):
"""Format task list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Task Id'] = item['id']
table_row['State'] = item['state']
table_row['Exit Code'] = str(item['executionInfo']['exitCode']) \
if item['executionInfo'] else ""
table_row['Node Id'] = item['nodeInfo']['nodeId'] if item['nodeInfo'] else ""
table_row['Command Line'] = item['commandLine']
table_output.append(table_row)
return table_output
def task_create_table_format(result):
"""Format task create as a table."""
table_output = []
if not isinstance(result, list):
table_row = OrderedDict()
table_row['Task Id'] = result['id']
table_row['Submission Status'] = "success"
table_output.append(table_row)
else:
for item in result:
table_row = OrderedDict()
table_row['Task Id'] = item['taskId']
table_row['Submission Status'] = item['status']
table_row['Error'] = item['error']['code'] if item['error'] else ""
table_output.append(table_row)
return table_output
def list_pool_node_counts_table_format(result):
"""Format account list pool node counts result as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Dedicated Starting'] = str(item['dedicated']['starting'])
table_row['Dedicated Idle'] = str(item['dedicated']['idle'])
table_row['Dedicated Running'] = str(item['dedicated']['running'])
table_row['Dedicated Total'] = str(item['dedicated']['total'])
table_row['LowPri Starting'] = str(item['lowPriority']['starting'])
table_row['LowPri Idle'] = str(item['lowPriority']['idle'])
table_row['LowPri Running'] = str(item['lowPriority']['running'])
table_row['LowPri Total'] = str(item['lowPriority']['total'])
table_output.append(table_row)
return table_output
def list_supported_images_table_format(result):
"""Format account list node agent skus result as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['OS Type'] = item['osType']
table_row['Node Agent Sku'] = item['nodeAgentSkuId']
table_row['Publisher'] = item['imageReference']['publisher']
table_row['Offer'] = item['imageReference']['offer']
table_row['Sku'] = item['imageReference']['sku']
table_row['Version'] = item['imageReference']['version']
table_row['VerificationType'] = item['verificationType']
table_output.append(table_row)
return table_output
|
|
from utils import (mkdir_p, print_fancy, Logger, strip_separators_in_the_end)
from utils.pipeline_aux import (read_public_images, check_img_type, check_path_and_landmarks, load_images,
check_initial_path, im_read_greyscale)
from utils.path_and_folder_definition import * # import paths for databases, folders and libraries
from utils.clip import Clip
from joblib import Parallel, delayed
from shutil import copy2
from menpo.io import export_pickle, import_landmark_file, export_landmark_file
from menpo.transform import PiecewiseAffine
from menpo.feature import fast_dsift
# imports for GN-DPM builder/fitter:
from menpofit.aam import PatchAAM
from menpofit.aam import LucasKanadeAAMFitter
features = fast_dsift
patch_shape = (18, 18)
crop = 0.2 # crop when loading images from databases.
pix_thres = 250
fitter = []
def main_for_ps_aam(path_clips, in_ln_fol, out_ln_fol, out_model_fol, loop=False, mi=110, d_aam=130,
in_ln_fit_fol=None, max_helen=220, max_cl_e=60, n_shape=None, n_appearance=None,
out_ln_svm=None, patch_s_svm=(14, 14), pix_th_svm=170):
"""
Main function for the person specific (part-based) AAM.
Processes a batch of clips in the same folder. Creates the dictionary with the paths, the SVM params,
loads the images from public datasets and then calls the processing per clip.
* SVM * : The part with the SVM is not required, this is an additional functionality
provided for pruning the landmarks not well localised. In other words, the SVM works
as a failure checker and is just a pre-trained classifier on images with landmarks.
:param path_clips: str: Base path that contains the frames/lns folders.
:param in_ln_fol: str: Folder name for importing landmarks.
:param out_ln_fol: str: Folder name for exporting landmarks after AAM fit.
:param out_model_fol: str: Folder name for exporting the AAM (pickled file).
:param loop: bool: (optional) Declares whether this is a 2nd fit for AAM (loop).
:param mi: int: (optional) Max images of the clip loaded for the pbaam.
:param d_aam: int: (optional) Diagonal of AAM (param in building it).
:param in_ln_fit_fol: str: (optional) Folder name for importing during fitting (loop case).
:param max_helen: int: (optional) Max images of the helen dataset to be loaded.
:param max_cl_e: int: (optional) Max images of the 'close eyes' dataset to be loaded.
:param n_shape: int/list/None: (optional) Number of shapes for AAM (as expected in menpofit).
:param n_appearance: int/list/None: (optional) Number of appearances for AAM (as expected in menpofit).
:param out_ln_svm: str: (optional) Folder name for exporting landmarks after SVM (if applicable).
:param patch_s_svm: tuple: (optional) Patch size for SVM (if applicable).
:param pix_th_svm: int: (optional) Pixel threshold for resizing images in SVM classification.
:return:
"""
# loop: whether this is the 1st or the 2nd fit (loop).
# define a dictionary for the paths
assert(isinstance(n_shape, (int, float, type(None), list))) # allowed values in menpofit.
assert(isinstance(n_appearance, (int, float, type(None), list)))
paths = {}
paths['clips'] = path_clips
paths['in_lns'] = path_clips + in_ln_fol # existing bbox of detection
paths['out_lns'] = path_clips + out_ln_fol
paths['out_model'] = mkdir_p(path_clips + out_model_fol) # path that trained models will be saved.
paths['in_fit_lns'] = (path_clips + in_ln_fit_fol) if in_ln_fit_fol else paths['in_lns']
paths['out_svm'] = (path_clips + out_ln_svm) if out_ln_svm else None
# save the svm params in a dict in case they are required.
# See the doc in the beginning of the function for SVM.
svm_params = {}
svm_params['apply'] = True if out_ln_svm else False # True only if user provided path for output.
# load pickled files for classifier and reference frame.
if svm_params['apply']:
print('Option to classify (non-)faces with SVM is activated.')
svm_params['feat'] = features
svm_params['patch_s'] = patch_s_svm
name_p = features.__name__ + '_' + str(patch_s_svm[0]) + '_' + str(crop) + '_' + \
str(pix_th_svm) + '_' + 'helen_' + 'ibug_' + 'lfpw'
path_pickle_svm = path_pickles + 'general_svm' + sep
if not os.path.isdir(path_pickle_svm):
raise RuntimeError('This path ({}) should contain the pickled file '
'for the SVM and the reference frame.'.format(path_pickle_svm))
from sklearn.externals import joblib
svm_params['clf'] = joblib.load(path_pickle_svm + name_p + '.pkl')
svm_params['refFrame'] = joblib.load(path_pickle_svm + name_p + '_refFrame.pkl')
# Log file output.
log = mkdir_p(path_clips + 'logs' + sep) + datetime.now().strftime("%Y.%m.%d.%H.%M.%S") + \
'_' + basename(__file__) + '.log'
sys.stdout = Logger(log)
print_fancy('Building GN-DPMs for the clips')
# read the training images from the public databases (ibug, helen)
tr_images = _aux_read_public_images(path_to_ibug, 130, [])
tr_images = _aux_read_public_images(path_to_helen, max_helen, tr_images)
tr_images = _aux_read_public_images(path_closed_eyes, max_cl_e, tr_images)
list_clips = sorted(os.listdir(path_clips + frames))
# assumption that all clips have the same extension, otherwise run in the loop for each clip separately:
img_type = check_img_type(list_clips, path_clips + frames)
t = [process_clip(clip_name, paths, tr_images, img_type, loop, svm_params, mi=mi, d_aam=d_aam,
n_s=n_shape, n_a=n_appearance) for clip_name in list_clips
if not(clip_name in list_done) and os.path.isdir(path_clips + frames + clip_name)]
def process_frame(frame_name, clip, img_type, svm_p, loop=False):
"""
Applies the AAM fitter (global var) in a frame. Additionally, it might apply an
SVM to verify it's a face if required.
:param frame_name: str: Name of the frame along with extension, e.g. '000001.png'.
:param clip: str: Name of the clip.
:param img_type: str: Suffix (extension) of the frames, e.g. '.png'.
:param svm_p: dict: Required params for SVM classification.
:param loop: bool: (optional) Declares whether this is a 2nd fit for AAM (loop).
:return:
"""
global fitter
name = frame_name[:frame_name.rfind('.')]
p0 = clip.path_read_ln[0] + name + '_0.pts'
# find if this is 2nd fit or 1st.
if loop: # if 2nd fit, then if landmark is 'approved', return. Otherwise proceed.
try:
ln = import_landmark_file(p0)
copy2(p0, clip.path_write_ln[0] + name + '_0.pts')
return # if the landmark already exists, return (for performance improvement)
except ValueError:
pass
try:
ln = import_landmark_file(clip.path_read_ln[1] + name + '_0.pts')
except ValueError: # either not found or no suitable importer
return
else:
try:
ln = import_landmark_file(p0)
except ValueError: # either not found or no suitable importer
return
im = im_read_greyscale(frame_name, clip.path_frames, img_type)
if not im:
return
im.landmarks['PTS2'] = ln
# fitting can be faster if the image is cropped in advance, though
# you should save the transform to get back to the original shape,
# hence here we just leave the original image.
fr = fitter.fit_from_shape(im, im.landmarks['PTS2'].lms)
p_wr = clip.path_write_ln[0] + im.path.stem + '_0.pts'
im.landmarks['ps_pbaam'] = fr.final_shape
export_landmark_file(im.landmarks['ps_pbaam'], p_wr, overwrite=True)
# apply SVM classifier by extracting patches (is face or not).
if not svm_p['apply']:
return
im_cp = im.crop_to_landmarks_proportion(0.2, group='ps_pbaam')
im_cp = svm_p['feat'](im_cp)
im2 = warp_image_to_reference_shape(im_cp, svm_p['refFrame'], 'ps_pbaam')
_p_nd = im2.extract_patches_around_landmarks(group='source', as_single_array=True,
patch_shape=svm_p['patch_s']).flatten()
if svm_p['clf'].decision_function(_p_nd) > 0:
copy2(p_wr, clip.path_write_ln[1] + im.path.stem + '_0.pts')
def process_clip(clip_name, paths, training_images, img_type, loop, svm_params,
mi=110, d_aam=130, n_s=None, n_a=None):
"""
Processes a clip. Accepts a clip (along with its params and paths), trains a person-specific
part based AAM (pbaam) and then fits it to all the frames.
:param clip_name: str: Name of the clip.
:param paths: dict: Required paths for training/fitting/exporting data.
:param training_images: list: List of menpo images (generic images) appended to the person specific ones.
:param img_type: str: Suffix (extension) of the frames, e.g. '.png'.
:param loop: bool: Declares whether this is a 2nd fit for AAM (loop).
:param svm_params: dict: Required params for SVM classification. If 'apply' is False,
the rest are not used. Otherwise, requires reference frame and classifier loaded.
:param mi: int: (optional) Max images of the clip loaded for the pbaam.
:param d_aam: int: (optional) Diagonal of AAM (param in building it).
:param n_s: int/list/None: (optional) Number of shapes for AAM (as expected in menpofit).
:param n_a: int/list/None: (optional) Number of appearances for AAM (as expected in menpofit).
:return:
"""
global fitter
# paths and list of frames
frames_path = paths['clips'] + frames + clip_name + sep
if not check_path_and_landmarks(frames_path, clip_name, paths['in_lns'] + clip_name):
return False
list_frames = sorted(os.listdir(frames_path))
pts_p = mkdir_p(paths['out_lns'] + clip_name + sep)
svm_p = mkdir_p(paths['out_svm'] + clip_name + sep) # svm path
# loading images from the clip
training_detector = load_images(list(list_frames), frames_path, paths['in_lns'], clip_name,
training_images=list(training_images), max_images=mi)
print('\nBuilding Part based AAM for the clip {}.'.format(clip_name))
aam = PatchAAM(training_detector, verbose=True, holistic_features=features, patch_shape=patch_shape,
diagonal=d_aam, scales=(.5, 1))
del training_detector
fitter = LucasKanadeAAMFitter(aam, n_shape=n_s, n_appearance=n_a)
# save the AAM model (requires plenty of disk space for each model).
aam.features = None
export_pickle(aam, paths['out_model'] + clip_name + '.pkl', overwrite=True)
del aam
clip = Clip(clip_name, paths['clips'], frames, [paths['in_lns'], paths['in_fit_lns']], [pts_p, svm_p])
# [process_frame(frame_name, clip, img_type, svm_params,loop) for frame_name in list_frames];
Parallel(n_jobs=-1, verbose=4)(delayed(process_frame)(frame_name, clip, img_type, svm_params,
loop) for frame_name in list_frames)
fitter = [] # reset fitter
return True
def warp_image_to_reference_shape(i, reference_frame, group):
transform = [PiecewiseAffine(reference_frame.landmarks['source'][None], i.landmarks[group][None])]
im2 = i.warp_to_mask(reference_frame.mask, transform[0])
im2.landmarks['source'] = reference_frame.landmarks['source']
return im2
def _aux_read_public_images(path, max_images, training_images, crop=crop, pix_thres=pix_thres):
return read_public_images(path, max_images=max_images, training_images=training_images,
crop_reading=crop, pix_thres=pix_thres)
if __name__ == '__main__':
args = len(sys.argv)
path_0_m = check_initial_path(args, sys.argv)
if args > 3:
in_landmarks_fol_m = str(sys.argv[2]) + sep
out_landmarks_fol_m = str(sys.argv[3]) + sep
ms = 'ln_in : {} ln_out : {}.'
print(ms.format(in_landmarks_fol_m, out_landmarks_fol_m))
else:
in_landmarks_fol_m, out_landmarks_fol_m = '3_ffld_ln' + sep, '4_pbaam' + sep
out_model_fol_m = strip_separators_in_the_end(out_landmarks_fol_m) + '_models' + sep
main_for_ps_aam(path_0_m, in_landmarks_fol_m, out_landmarks_fol_m, out_model_fol_m,
n_shape=[3, 12], n_appearance=[50, 100])
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-08-25 17:38
from __future__ import unicode_literals
import django.db.models.deletion
import django.db.models.manager
import jsonfield.fields
import mptt.fields
from django.db import migrations
from django.db import models
import kolibri.core.content.models
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [("content", "0003_auto_20170607_1212")]
operations = [
migrations.CreateModel(
name="AssessmentMetaData",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("assessment_item_ids", jsonfield.fields.JSONField(default=[])),
("number_of_assessments", models.IntegerField()),
("mastery_model", jsonfield.fields.JSONField(default={})),
("randomize", models.BooleanField(default=False)),
("is_manipulable", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name="ChannelMetadata",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("name", models.CharField(max_length=200)),
("description", models.CharField(blank=True, max_length=400)),
("author", models.CharField(blank=True, max_length=400)),
("version", models.IntegerField(default=0)),
("thumbnail", models.TextField(blank=True)),
("last_updated", kolibri.core.fields.DateTimeTzField(null=True)),
("min_schema_version", models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name="ContentNode",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("title", models.CharField(max_length=200)),
("content_id", kolibri.core.content.models.UUIDField(db_index=True)),
("channel_id", kolibri.core.content.models.UUIDField(db_index=True)),
(
"description",
models.CharField(blank=True, max_length=400, null=True),
),
("sort_order", models.FloatField(blank=True, null=True)),
("license_owner", models.CharField(blank=True, max_length=200)),
("author", models.CharField(blank=True, max_length=200)),
(
"kind",
models.CharField(
blank=True,
choices=[
("topic", "Topic"),
("video", "Video"),
("audio", "Audio"),
("exercise", "Exercise"),
("document", "Document"),
("html5", "HTML5 App"),
],
max_length=200,
),
),
("available", models.BooleanField(default=False)),
("stemmed_metaphone", models.CharField(blank=True, max_length=1800)),
("lft", models.PositiveIntegerField(db_index=True, editable=False)),
("rght", models.PositiveIntegerField(db_index=True, editable=False)),
("tree_id", models.PositiveIntegerField(db_index=True, editable=False)),
("level", models.PositiveIntegerField(db_index=True, editable=False)),
(
"has_prerequisite",
models.ManyToManyField(
blank=True,
related_name="prerequisite_for",
to="content.ContentNode",
),
),
],
options={"ordering": ("lft",)},
# Removed because django-mptt 0.8.7 patched up an error in
# Django 1.9 (fixed since 1.10).
# Ref: https://code.djangoproject.com/ticket/26643
# https://github.com/learningequality/kolibri/pull/3180
# managers=[
# ('_default_manager', django.db.models.manager.Manager()),
# ],
# Removed with the same reasoning
# managers=[
# ('objects', django.db.models.manager.Manager()),
# ],
),
migrations.CreateModel(
name="ContentTag",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("tag_name", models.CharField(blank=True, max_length=30)),
],
),
migrations.CreateModel(
name="File",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("available", models.BooleanField(default=False)),
(
"preset",
models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("vector_video", "Vectorized"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_thumbnail", "HTML5 Thumbnail"),
],
max_length=150,
),
),
("supplementary", models.BooleanField(default=False)),
("thumbnail", models.BooleanField(default=False)),
("priority", models.IntegerField(blank=True, db_index=True, null=True)),
(
"contentnode",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="files",
to="content.ContentNode",
),
),
],
options={"ordering": ["priority"]},
),
migrations.CreateModel(
name="Language",
fields=[
(
"id",
models.CharField(max_length=14, primary_key=True, serialize=False),
),
("lang_code", models.CharField(db_index=True, max_length=3)),
(
"lang_subcode",
models.CharField(
blank=True, db_index=True, max_length=10, null=True
),
),
("lang_name", models.CharField(blank=True, max_length=100, null=True)),
(
"lang_direction",
models.CharField(
choices=[("ltr", "Left to Right"), ("rtl", "Right to Left")],
default="ltr",
max_length=3,
),
),
],
),
migrations.CreateModel(
name="License",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("license_name", models.CharField(max_length=50)),
(
"license_description",
models.CharField(blank=True, max_length=400, null=True),
),
],
),
migrations.CreateModel(
name="LocalFile",
fields=[
(
"id",
models.CharField(max_length=32, primary_key=True, serialize=False),
),
(
"extension",
models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("srt", "SRT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("zip", "HTML5 Zip"),
],
max_length=40,
),
),
("available", models.BooleanField(default=False)),
("file_size", models.IntegerField(blank=True, null=True)),
],
),
migrations.DeleteModel(name="ChannelMetadataCache"),
migrations.AddField(
model_name="file",
name="lang",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="content.Language",
),
),
migrations.AddField(
model_name="file",
name="local_file",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="files",
to="content.LocalFile",
),
),
migrations.AddField(
model_name="contentnode",
name="lang",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="content.Language",
),
),
migrations.AddField(
model_name="contentnode",
name="license",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="content.License",
),
),
migrations.AddField(
model_name="contentnode",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="content.ContentNode",
),
),
migrations.AddField(
model_name="contentnode",
name="related",
field=models.ManyToManyField(
blank=True,
related_name="_contentnode_related_+",
to="content.ContentNode",
),
),
migrations.AddField(
model_name="contentnode",
name="tags",
field=models.ManyToManyField(
blank=True, related_name="tagged_content", to="content.ContentTag"
),
),
migrations.AddField(
model_name="channelmetadata",
name="root",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="content.ContentNode"
),
),
migrations.AddField(
model_name="assessmentmetadata",
name="contentnode",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="assessmentmetadata",
to="content.ContentNode",
),
),
]
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
import logging
import threading
import time
import traceback
import urllib
from collections import defaultdict
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms.display import DisplayData
from apache_beam.typehints import typehints
from apache_beam.utils.plugin import BeamPlugin
__all__ = ['DataflowRunner']
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
last_message_hash = None
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration / 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
logging.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0
or last_error_msg is not None
or str(response.currentState) == 'JOB_STATE_DONE'
or str(response.currentState) == 'JOB_STATE_CANCELLED'
or str(response.currentState) == 'JOB_STATE_UPDATED'
or str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
m_hash = hash(message)
if last_message_hash is not None and m_hash == last_message_hash:
# Skip the first message if it is the last message we got in the
# previous round. This can happen because we use the
# last_message_time as a parameter of the query for new messages.
continue
last_message_time = m.time
last_message_hash = m_hash
# Skip empty messages.
if m.messageImportance is None:
continue
logging.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
logging.warning('Timing out on waiting for job %s after %d seconds',
job_id, passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
input_type = pcoll.element_type
# If input_type is not specified, then treat it as `Any`.
if not input_type:
input_type = typehints.Any
def coerce_to_kv_type(element_type):
if isinstance(element_type, typehints.TupleHint.TupleConstraint):
if len(element_type.tuple_types) == 2:
return element_type
else:
raise ValueError(
"Tuple input to GroupByKey must be have two components. "
"Found %s for %s" % (element_type, pcoll))
elif isinstance(input_type, typehints.AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# force a KV coder as the main output coder for the pcollection
# preceding a GroupByKey.
return typehints.KV[typehints.Any, typehints.Any]
elif isinstance(element_type, typehints.UnionConstraint):
union_types = [
coerce_to_kv_type(t) for t in element_type.union_types]
return typehints.KV[
typehints.Union[tuple(t.tuple_types[0] for t in union_types)],
typehints.Union[tuple(t.tuple_types[1] for t in union_types)]]
else:
# TODO: Possibly handle other valid types.
raise ValueError(
"Input to GroupByKey must be of Tuple or Any type. "
"Found %s for %s" % (element_type, pcoll))
pcoll.element_type = coerce_to_kv_type(input_type)
return GroupByKeyInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = transform_node.outputs[None]
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def run_pipeline(self, pipeline):
"""Remotely executes entire pipeline or parts reachable from node."""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
# Snapshot the pipeline in a portable proto before mutating it
proto_pipeline = pipeline.to_runner_api()
# Performing configured PTransform overrides.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = pipeline._options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
self.job = apiclient.Job(pipeline._options, proto_pipeline)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# The superclass's run will trigger a traversal of all reachable nodes.
super(DataflowRunner, self).run_pipeline(pipeline)
test_options = pipeline._options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
return None
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(
pipeline._options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(self._get_coder(typehint,
window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint),
window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError('Coder object must inherit from coders.Coder: %s.' %
str(coder))
return coder.as_cloud_object()
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': input_encoding['@type'],
'component_encodings': [input_encoding]
}
def _get_encoded_output_coder(self, transform_node, window_value=True):
"""Returns the cloud encoding of the coder for the output of a transform."""
if (len(transform_node.outputs) == 1
and transform_node.outputs[None].element_type is not None):
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[None].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
window_coder = (
transform_node.outputs[None].windowing.windowfn.get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(
element_type, window_coder=window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
self._cache.cache_output(transform_node, None, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[item.get_dict() for item in
DisplayData.create_from(transform_node.transform).items])
return step
def _add_singleton_step(self, label, full_label, tag, input_step):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)})
step.encoding = self._get_side_input_encoding(input_step.encoding)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (full_label, PropertyNames.OUTPUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
return step
def run_Impulse(self, transform_node):
standard_options = (
transform_node.outputs[None].pipeline._options.view_as(StandardOptions))
if standard_options.streaming:
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (
transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
else:
ValueError('Impulse source for batch pipelines has not been defined.')
def run_Flatten(self, transform_node):
step = self._add_step(TransformNames.FLATTEN,
transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append(
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def apply_WriteToBigQuery(self, transform, pcoll):
# Make sure this is the WriteToBigQuery class that we expected
if not isinstance(transform, beam.io.WriteToBigQuery):
return self.apply_PTransform(transform, pcoll)
standard_options = pcoll.pipeline._options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll)
else:
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
transform.schema,
transform.create_disposition,
transform.write_disposition))
def apply_GroupByKey(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError(('Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label,
coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection(pcoll.pipeline)
def run_GroupByKey(self, transform_node):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
windowing = transform_node.transform.get_windowing(
transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_ParDo(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
# We must call self._cache.get_pvalue exactly once due to refcounting.
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
for side_pval in transform_node.side_inputs:
assert isinstance(side_pval, AsSideInput)
step_number = self._get_unique_step_name()
si_label = 'SideInput-' + step_number
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
si_label, si_full_label, side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue))
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: si_label,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label + (
'/{}'.format(transform_name)
if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
fn_data = self._pardo_fn_data(transform_node, lookup_label)
step.add_property(PropertyNames.SERIALIZED_FN, pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'out' for main output and 'out_<tag>' for a tagged output.
# Using 'out' as a tag will not clash with the name for main since it will
# be transformed into 'out_out' internally.
outputs = []
step.encoding = self._get_encoded_output_coder(transform_node)
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
for side_tag in transform.output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: (
'%s_%s' % (PropertyNames.OUT, side_tag))})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (transform.fn, transform.args, transform.kwargs, si_tags_and_types,
transform_node.inputs[0].windowing)
def apply_CombineValues(self, transform, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def run_CombineValues(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
fn_data = (transform.fn, transform.args, transform.kwargs, ())
step.add_property(PropertyNames.SERIALIZED_FN,
pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform_node.transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
logging.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
logging.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source, traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT,
source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError('BigQuery source %r must specify either a table or'
' a query',
transform.source)
elif transform.source.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSource is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(PropertyNames.PUBSUB_TOPIC,
transform.source.full_topic)
if transform.source.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.source.id_label)
else:
raise ValueError(
'Source %r has unexpected format %s.' % (
transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform._infer_output_coder(),
coders.coders.GlobalWindowCoder()) # pylint: disable=protected-access
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run__NativeWrite(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
elif transform.sink.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSink is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
else:
raise ValueError(
'Sink %r has unexpected format %s.' % (
transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform.sink.coder,
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return urllib.quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return urllib.unquote(encoded_string)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self._is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# TODO: Move this table to a another location.
# Ordered by the enum values.
api_jobstate_map = {
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
}
return (api_jobstate_map[self._job.currentState] if self._job.currentState
else PipelineState.UNKNOWN)
def _is_in_terminal_state(self):
if not self.has_job:
return True
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
return self._job.currentState in [
values_enum.JOB_STATE_STOPPED, values_enum.JOB_STATE_DONE,
values_enum.JOB_STATE_FAILED, values_enum.JOB_STATE_CANCELLED,
values_enum.JOB_STATE_UPDATED, values_enum.JOB_STATE_DRAINED]
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.isAlive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# _is_in_terminal_state.
terminated = self._is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self._is_in_terminal_state():
logging.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(), self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
logging.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
self.job_id(),
self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generator versions of transforms.
"""
import types
import logbook
from numbers import Integral
from datetime import datetime
from collections import deque
from abc import ABCMeta, abstractmethod
from alephnull.protocol import DATASOURCE_TYPE
from alephnull.gens.utils import assert_sort_unframe_protocol, hash_args
from alephnull.finance import trading
log = logbook.Logger('Transform')
class UnsupportedEventWindowFlagValue(Exception):
"""
Error state when an EventWindow option is attempted to be set
to a value that is no longer supported by the library.
This is to help enforce deprecation of the market_aware and delta flags,
without completely removing it and breaking existing algorithms.
"""
pass
class InvalidWindowLength(Exception):
"""
Error raised when the window length is unusable.
"""
pass
def check_window_length(window_length):
"""
Ensure the window length provided to a transform is valid.
"""
if window_length is None:
raise InvalidWindowLength("window_length must be provided")
if not isinstance(window_length, Integral):
raise InvalidWindowLength(
"window_length must be an integer-like number")
if window_length == 0:
raise InvalidWindowLength("window_length must be non-zero")
if window_length < 0:
raise InvalidWindowLength("window_length must be positive")
class TransformMeta(type):
"""
Metaclass that automatically packages a class inside of
StatefulTransform on initialization. Specifically, if Foo is a
class with its __metaclass__ attribute set to TransformMeta, then
calling Foo(*args, **kwargs) will return StatefulTransform(Foo,
*args, **kwargs) instead of an instance of Foo. (Note that you can
still recover an instance of a "raw" Foo by introspecting the
resulting StatefulTransform's 'state' field.)
"""
def __call__(cls, *args, **kwargs):
return StatefulTransform(cls, *args, **kwargs)
class StatefulTransform(object):
"""
Generic transform generator that takes each message from an
in-stream and passes it to a state object. For each call to
update, the state class must produce a message to be fed
downstream. Any transform class with the FORWARDER class variable
set to true will forward all fields in the original message.
Otherwise only dt, tnfm_id, and tnfm_value are forwarded.
"""
def __init__(self, tnfm_class, *args, **kwargs):
assert isinstance(tnfm_class, (types.ObjectType, types.ClassType)), \
"Stateful transform requires a class."
assert hasattr(tnfm_class, 'update'), \
"Stateful transform requires the class to have an update method"
# Create an instance of our transform class.
if isinstance(tnfm_class, TransformMeta):
# Classes derived TransformMeta have their __call__
# attribute overridden. Since this is what is usually
# used to create an instance, we have to delegate the
# responsibility of creating an instance to
# TransformMeta's parent class, which is 'type'. This is
# what is implicitly done behind the scenes by the python
# interpreter for most classes anyway, but here we have to
# be explicit because we've overridden the method that
# usually resolves to our super call.
self.state = super(TransformMeta, tnfm_class).__call__(
*args, **kwargs)
# Normal object instantiation.
else:
self.state = tnfm_class(*args, **kwargs)
# save the window_length of the state for external access.
self.window_length = self.state.window_length
# Create the string associated with this generator's output.
self.namestring = tnfm_class.__name__ + hash_args(*args, **kwargs)
def get_hash(self):
return self.namestring
def transform(self, stream_in):
return self._gen(stream_in)
def _gen(self, stream_in):
# IMPORTANT: Messages may contain pointers that are shared with
# other streams. Transforms that modify their input
# messages should only manipulate copies.
for message in stream_in:
# we only handle TRADE events.
if (hasattr(message, 'type')
and message.type not in (
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CUSTOM)):
yield message
continue
# allow upstream generators to yield None to avoid
# blocking.
if message is None:
continue
assert_sort_unframe_protocol(message)
tnfm_value = self.state.update(message)
out_message = message
out_message[self.namestring] = tnfm_value
yield out_message
class EventWindow(object):
"""
Abstract base class for transform classes that calculate iterative
metrics on events within a given timedelta. Maintains a list of
events that are within a certain timedelta of the most recent
tick. Calls self.handle_add(event) for each event added to the
window. Calls self.handle_remove(event) for each event removed
from the window. Subclass these methods along with init(*args,
**kwargs) to calculate metrics over the window.
If the market_aware flag is True, the EventWindow drops old events
based on the number of elapsed trading days between newest and oldest.
Otherwise old events are dropped based on a raw timedelta.
See zipline/transforms/mavg.py and zipline/transforms/vwap.py for example
implementations of moving average and volume-weighted average
price.
"""
# Mark this as an abstract base class.
__metaclass__ = ABCMeta
def __init__(self, market_aware=True, window_length=None, delta=None):
check_window_length(window_length)
self.window_length = window_length
self.ticks = deque()
# Only Market-aware mode is now supported.
if not market_aware:
raise UnsupportedEventWindowFlagValue(
"Non-'market aware' mode is no longer supported."
)
if delta:
raise UnsupportedEventWindowFlagValue(
"delta values are no longer supported."
)
# Set the behavior for dropping events from the back of the
# event window.
self.drop_condition = self.out_of_market_window
@abstractmethod
def handle_add(self, event):
raise NotImplementedError()
@abstractmethod
def handle_remove(self, event):
raise NotImplementedError()
def __len__(self):
return len(self.ticks)
def update(self, event):
if (hasattr(event, 'type')
and event.type not in (
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CUSTOM)):
return
self.assert_well_formed(event)
# Add new event and increment totals.
self.ticks.append(event)
# Subclasses should override handle_add to define behavior for
# adding new ticks.
self.handle_add(event)
# Clear out any expired events.
#
# oldest newest
# | |
# V V
while self.drop_condition(self.ticks[0].dt, self.ticks[-1].dt):
# popleft removes and returns the oldest tick in self.ticks
popped = self.ticks.popleft()
# Subclasses should override handle_remove to define
# behavior for removing ticks.
self.handle_remove(popped)
def out_of_market_window(self, oldest, newest):
oldest_index = \
trading.environment.trading_days.searchsorted(oldest)
newest_index = \
trading.environment.trading_days.searchsorted(newest)
trading_days_between = newest_index - oldest_index
# "Put back" a day if oldest is earlier in its day than newest,
# reflecting the fact that we haven't yet completed the last
# day in the window.
if oldest.time() > newest.time():
trading_days_between -= 1
return trading_days_between >= self.window_length
# All event windows expect to receive events with datetime fields
# that arrive in sorted order.
def assert_well_formed(self, event):
assert isinstance(event.dt, datetime), \
"Bad dt in EventWindow:%s" % event
if len(self.ticks) > 0:
# Something is wrong if new event is older than previous.
assert event.dt >= self.ticks[-1].dt, \
"Events arrived out of order in EventWindow: %s -> %s" % \
(event, self.ticks[0])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_set_ops
_VALID_DTYPES = set([
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.string])
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`.
Returns:
`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
# pylint: disable=protected-access
return gen_set_ops.set_size(
a.indices, a.values, a.dense_shape, validate_indices)
ops.NotDifferentiable("SetSize")
ops.NotDifferentiable("DenseToDenseSetOperation")
ops.NotDifferentiable("DenseToSparseSetOperation")
ops.NotDifferentiable("SparseToSparseSetOperation")
def _convert_to_tensors_or_sparse_tensors(a, b):
"""Convert to tensor types, and flip order if necessary.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`.
b: `Tensor` or `SparseTensor` of the same type as `a`.
Returns:
Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to
`Tensor` or `SparseTensor`, and `flipped` indicates whether the order has
been flipped to make it dense,sparse instead of sparse,dense (since the set
ops do not support the latter).
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if a.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("'a' invalid dtype %s." % a.dtype)
b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
if b.dtype.base_dtype != a.dtype.base_dtype:
raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
if (isinstance(a, sparse_tensor.SparseTensor) and
not isinstance(b, sparse_tensor.SparseTensor)):
return b, a, True
return a, b, False
def _set_operation(a, b, set_operation, validate_indices=True):
"""Compute set operation of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
set_operation: String indicating set operation. See
SetOperationOp::SetOperationFromContext for valid values.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the results
of the set operation.
Raises:
TypeError: If inputs are invalid types.
ValueError: If `a` is sparse and `b` is dense.
"""
if isinstance(a, sparse_tensor.SparseTensor):
if isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
set_operation, validate_indices)
else:
raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
"Please flip the order of your inputs.")
elif isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(
a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
else:
indices, values, shape = gen_set_ops.dense_to_dense_set_operation(
a, b, set_operation, validate_indices)
return sparse_tensor.SparseTensor(indices, values, shape)
def set_intersection(a, b, validate_indices=True):
"""Compute set intersection of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# Represent the following array of sets as a sparse tensor:
# a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]])
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2,2,2])
# b = np.array([[{1}, {}], [{4}, {5, 6, 7, 8}]])
b = collections.OrderedDict([
((0, 0, 0), 1),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `tf.sets.set_intersection` is applied to each aligned pair of sets.
tf.sets.set_intersection(a, b)
# The result will be equivalent to either of:
#
# np.array([[{1}, {}], [{4}, {5, 6}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 1),
# ((1, 0, 0), 4),
# ((1, 1, 0), 5),
# ((1, 1, 1), 6),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
intersections.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "intersection", validate_indices)
def set_difference(a, b, aminusb=True, validate_indices=True):
"""Compute set difference of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# Represent the following array of sets as a sparse tensor:
# a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]])
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2])
# np.array([[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]])
b = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 3),
((0, 1, 0), 2),
((1, 0, 0), 4),
((1, 0, 1), 5),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `set_difference` is applied to each aligned pair of sets.
tf.sets.set_difference(a, b)
# The result will be equivalent to either of:
#
# np.array([[{2}, {3}], [{}, {}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 2),
# ((0, 0, 1), 3),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
aminusb: Whether to subtract `b` from `a`, vs vice versa.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
differences.
"""
a, b, flipped = _convert_to_tensors_or_sparse_tensors(a, b)
if flipped:
aminusb = not aminusb
return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices)
def set_union(a, b, validate_indices=True):
"""Compute set union of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# [[{1, 2}, {3}], [{4}, {5, 6}]]
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2])
# [[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]]
b = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 3),
((0, 1, 0), 2),
((1, 0, 0), 4),
((1, 0, 1), 5),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `set_union` is applied to each aligned pair of sets.
tf.sets.set_union(a, b)
# The result will be a equivalent to either of:
#
# np.array([[{1, 2, 3}, {2, 3}], [{4, 5}, {5, 6, 7, 8}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 1),
# ((0, 0, 1), 2),
# ((0, 0, 2), 3),
# ((0, 1, 0), 2),
# ((0, 1, 1), 3),
# ((1, 0, 0), 4),
# ((1, 0, 1), 5),
# ((1, 1, 0), 5),
# ((1, 1, 1), 6),
# ((1, 1, 2), 7),
# ((1, 1, 3), 8),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
unions.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "union", validate_indices)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_FIRST = 0
STATUS_SECOND = 1
class DNSResolver(object):
def __init__(self, server_list=None, prefer_ipv6=False):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
if prefer_ipv6:
self._QTYPES = [QTYPE_AAAA, QTYPE_A]
else:
self._QTYPES = [QTYPE_A, QTYPE_AAAA]
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if not (line and line.startswith(b'nameserver')):
continue
parts = line.split()
if len(parts) < 2:
continue
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) < 2:
continue
ip = parts[0]
if not common.is_ip(ip):
continue
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_SECOND) \
== STATUS_FIRST:
self._hostname_status[hostname] = STATUS_SECOND
self._send_req(hostname, self._QTYPES[1])
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) \
== STATUS_SECOND:
for question in response.questions:
if question[1] == self._QTYPES[1]:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_FIRST
self._send_req(hostname, self._QTYPES[0])
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, self._QTYPES[0])
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
|
# type: ignore[attr-defined]
# pylint: disable=no-member
import pytest
from Tests.Marketplace.zip_packs import get_latest_pack_zip_from_pack_files, zip_packs,\
remove_test_playbooks_if_exist, remove_test_playbooks_from_signatures, get_zipped_packs_names,\
copy_zipped_packs_to_artifacts
class TestZipPacks:
BLOB_NAMES = [
'content/packs/Slack/1.0.0/Slack.zip',
'content/packs/Slack/1.0.2/Slack.zip',
'content/packs/Slack/1.0.1/Slack.zip',
'content/packs/SlackSheker/2.0.0/SlackSheker.zip',
'content/packs/Slack/Slack.png',
'content/packs/SlackSheker/SlackSheker.png'
]
BLOB_NAMES_NO_ZIP = [
'content/packs/SlackSheker/2.0.0/SlackSheker.zip',
'content/packs/Slack/Slack.png',
'content/packs/SlackSheker/SlackSheker.png'
]
def test_get_latest_pack_zip_from_blob(self):
"""
Given:
List of blobs
When:
Getting the pack to download
Then:
Return the correct pack zip blob
"""
blob_name = get_latest_pack_zip_from_pack_files('Slack', TestZipPacks.BLOB_NAMES)
assert blob_name == 'content/packs/Slack/1.0.2/Slack.zip'
def test_get_zipped_packs_name(self, mocker):
"""
Given:
Some general path information of the packs and the build
When:
There is a valid pack which should be stored in the created dictionary
Then:
Create a dict which has one dictionary of the found pack
"""
from Tests.Marketplace import zip_packs
list_dir_result = ['Slack', 'ApiModules', 'python_file.py']
pack_files = TestZipPacks.BLOB_NAMES
mocker.patch.object(zip_packs, 'get_files_in_dir', return_value=pack_files)
mocker.patch('os.listdir', return_value=list_dir_result)
mocker.patch('os.path.isdir', return_value=True)
zipped_packs = get_zipped_packs_names('content')
assert zipped_packs == {'Slack': 'content/packs/Slack/1.0.2/Slack.zip'}
def test_get_zipped_packs_name_no_zipped_packs(self, mocker):
"""
Given:
Some general path information of the packs and the build
When:
There are no valid packs in the packs directory
Then:
exit since no packs were found
"""
with pytest.raises(Exception):
from Tests.Marketplace import zip_packs
list_dir_result = ['ApiModules', 'python_file.py']
pack_files = TestZipPacks.BLOB_NAMES
mocker.patch.object(zip_packs, 'get_files_in_dir', return_value=pack_files)
mocker.patch('os.listdir', return_value=list_dir_result)
mocker.patch('os.path.isdir', return_value=True)
get_zipped_packs_names('content')
def test_get_zipped_packs_name_no_latest_zip(self, mocker):
"""
Given:
Some general path information of the packs and the build
When:
There are is one valid pack but it has no valid zip files
Then:
exit since no zipped packs were found
"""
with pytest.raises(Exception):
from Tests.Marketplace import zip_packs
list_dir_result = ['Slack', 'ApiModules', 'python_file.py']
pack_files = TestZipPacks.BLOB_NAMES_NO_ZIP
mocker.patch.object(zip_packs, 'get_files_in_dir', return_value=pack_files)
mocker.patch('os.listdir', return_value=list_dir_result)
mocker.patch('os.path.isdir', return_value=True)
get_zipped_packs_names('content')
def test_copy_zipped_packs_to_artifacts(self, mocker):
"""
Given:
A dict containing information about a single pack
When:
The information is valid
Then:
make a single call to the copy function
"""
import shutil
zipped_packs = {'Slack': 'content/packs/Slack/1.0.1/Slack.zip'}
artifacts_path = 'dummy_path'
mocker.patch.object(shutil, 'copy', side_effect=None)
mocker.patch('os.path.exists', return_value=True)
copy_zipped_packs_to_artifacts(zipped_packs, artifacts_path)
assert shutil.copy.call_count == 1
def test_copy_zipped_packs_to_artifacts_no_zipped_packs(self, mocker):
"""
Given:
A dict containing no information about packs
When:
There are no packs to copy
Then:
make no calls to the copy function
"""
import shutil
zipped_packs = {}
artifacts_path = 'dummy_path'
mocker.patch.object(shutil, 'copy', side_effect=None)
mocker.patch('os.path.exists', return_value=True)
copy_zipped_packs_to_artifacts(zipped_packs, artifacts_path)
assert shutil.copy.call_count == 0
def test_zip_packs(self, mocker):
"""
Given:
Packs zips in the zip folder
When:
Zipping into zip of zips
Then:
Zip the packs correctly
"""
from zipfile import ZipFile
mocker.patch.object(ZipFile, '__init__', return_value=None)
mocker.patch.object(ZipFile, 'write')
mocker.patch.object(ZipFile, 'close')
packs = {'Slack': 'path/Slack.zip'}
zip_packs(packs, 'oklol')
assert ZipFile.write.call_args[0][0] == 'path/Slack.zip'
assert ZipFile.write.call_args[0][1] == 'Slack.zip'
def test_remove_test_playbooks_if_exist(self, mocker):
from zipfile import ZipFile
import shutil
"""
Given:
Removing test playbooks from packs
When:
Zipping packs
Then:
The zip should be without TestPlaybooks
"""
files = ['README.md', 'changelog.json', 'metadata.json', 'ReleaseNotes/1_0_1.md',
'Playbooks/playbook-oylo.yml', 'TestPlaybooks/playbook-oylo.yml',
'Scripts/script-TaniumAskQuestion.yml', 'Integrations/integration-shtak.yml']
mocker.patch.object(ZipFile, '__init__', return_value=None)
mocker.patch.object(ZipFile, 'write')
mocker.patch.object(ZipFile, 'close')
mocker.patch.object(ZipFile, 'namelist', return_value=files)
mocker.patch.object(ZipFile, 'extractall')
mocker.patch('os.remove')
mocker.patch('shutil.make_archive')
mocker.patch('os.mkdir')
remove_test_playbooks_if_exist('dest', [{'name': 'path'}])
extract_args = ZipFile.extractall.call_args[1]['members']
archive_args = shutil.make_archive.call_args[0]
assert list(extract_args) == [file_ for file_ in files if 'TestPlaybooks' not in file_]
assert archive_args[0] == 'dest/name'
def test_remove_test_playbooks_if_exist_no_test_playbooks(self, mocker):
from zipfile import ZipFile
"""
Given:
Removing test playbooks from packs
When:
Zipping packs, the pack doesn't have TestPlaybooks
Then:
TestPlaybooks should not be removed
"""
files = ['README.md', 'changelog.json', 'metadata.json', 'ReleaseNotes/1_0_1.md',
'Playbooks/playbook-oylo.yml', 'Scripts/script-TaniumAskQuestion.yml',
'Integrations/integration-shtak.yml']
mocker.patch.object(ZipFile, '__init__', return_value=None)
mocker.patch.object(ZipFile, 'namelist', return_value=files)
mocker.patch.object(ZipFile, 'extractall')
remove_test_playbooks_if_exist('dest', [{'name': 'path'}])
assert ZipFile.extractall.call_count == 0
def test_remove_test_playbooks_from_signatures(self, mocker):
import json
from unittest.mock import mock_open
"""
Given:
Removing test playbooks from packs
When:
Zipping packs
Then:
Signatures should be updated to have no test playbooks
"""
files = ['Integrations/integration-VirusTotal_5.5.yml', 'changelog.json', 'metadata.json',
'ReleaseNotes/1_0_1.md', 'TestPlaybooks/playbook-VirusTotal_detonate_file.yml', 'README.md',
'Scripts/script-TaniumAskQuestion.yml', 'Playbooks/playbook-Detonate_File-VirusTotal.yml',
'Integrations/integration-shtak.yml', 'TestPlaybooks/playbook-VirusTotal_preferred_vendors_test.yml',
"TestPlaybooks/playbook-virusTotal-test.yml"]
sigs = json.dumps({
"Integrations/integration-VirusTotal_5.5.yml": "a123",
"Playbooks/playbook-Detonate_File-VirusTotal.yml": "b123",
"README.md": "c123",
"TestPlaybooks/playbook-VirusTotal_detonate_file.yml": "d123",
"TestPlaybooks/playbook-VirusTotal_preferred_vendors_test.yml": "e123",
"TestPlaybooks/playbook-virusTotal-test.yml": "f123",
"changelog.json": "g123",
"metadata.json": "h123"
})
mocker.patch('os.path.isfile', return_value=True)
mocker.patch('builtins.open', mock_open(read_data=sigs))
mocker.patch.object(json, 'dump')
remove_test_playbooks_from_signatures('path', files)
dump_args = json.dump.call_args[0][0]
assert dump_args == {
"Integrations/integration-VirusTotal_5.5.yml": "a123",
"Playbooks/playbook-Detonate_File-VirusTotal.yml": "b123",
"README.md": "c123",
"changelog.json": "g123",
"metadata.json": "h123"
}
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release
#
# 1. Update the Version.java to remove the snapshot bit
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
#
# USAGE:
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
import fnmatch
import argparse
from prepare_release_update_documentation import update_reference_docs
import subprocess
import tempfile
import re
import os
import shutil
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
MAIL_TEMPLATE = """
Hi all
The new release candidate for %(version)s based on this commit[1] is now available, including the x-plugins, and RPM/deb repos:
- ZIP [2]
- tar.gz [3]
- RPM [4]
- deb [5]
Plugins can be installed as follows,
bin/plugin -Des.plugins.staging=true install cloud-aws
The same goes for the x-plugins:
bin/plugin -Des.plugins.staging=true install license
bin/plugin -Des.plugins.staging=true install shield
bin/plugin -Des.plugins.staging=true install watcher
To install the deb from an APT repo:
APT line sources.list line:
deb http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/debian/ stable main
To install the RPM, create a YUM file like:
/etc/yum.repos.d/elasticsearch.repo
containing:
[elasticsearch-2.0]
name=Elasticsearch repository for packages
baseurl=http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
To smoke-test the release please run:
python3 -B ./dev-tools/smoke_tests_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher
NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime
[1] https://github.com/elastic/elasticsearch/commit/%(hash)s
[2] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip
[3] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz
[4] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
[5] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
"""
VERBOSE=True
def run(command, env_vars=None, verbose=VERBOSE):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
if not verbose:
command = '%s >> /dev/null 2>&1' % (command)
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8')
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
if 'dev-tools/__pycache__/' in s:
print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***')
raise RuntimeError('git status shows untracked files got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
release = release.replace('-', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
processed = process_file(version_file, callback)
if not processed:
raise RuntimeError('failed to remove snapshot version for %s' % (release))
def rename_local_meta_files(path):
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'):
full_path = os.path.join(root, file_name)
os.rename(full_path, os.path.join(root, file_name.replace('-local', '')))
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--deploy', '-d', dest='deploy', action='store_true',
help='Installs and Deploys the release on a sonartype staging repository.')
parser.add_argument('--skipDocCheck', '-c', dest='skip_doc_check', action='store_false',
help='Skips any checks for pending documentation changes')
parser.add_argument('--push-s3', '-p', dest='push', action='store_true',
help='Pushes artifacts to the S3 staging area')
parser.add_argument('--install_only', '-i', dest='install_only', action='store_true',
help='Only runs a maven install to skip the remove deployment step')
parser.add_argument('--gpg-key', '-k', dest='gpg_key', default="D88E42B4",
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.add_argument('--verbose', '-b', dest='verbose',
help='Runs the script in verbose mode')
parser.set_defaults(deploy=False)
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(push=False)
parser.set_defaults(install_only=False)
parser.set_defaults(verbose=False)
args = parser.parse_args()
install_and_deploy = args.deploy
skip_doc_check = args.skip_doc_check
push = args.push
gpg_key = args.gpg_key
install_only = args.install_only
VERBOSE = args.verbose
ensure_checkout_is_clean()
release_version = find_release_version()
if not re.match('(\d+\.\d+)\.*',release_version):
raise RuntimeError('illegal release version format: %s' % (release_version))
major_minor_version = re.match('(\d+\.\d+)\.*',release_version).group(1)
print('*** Preparing release version: [%s]' % release_version)
if not skip_doc_check:
print('*** Check for pending documentation changes')
pending_files = update_reference_docs(release_version)
if pending_files:
raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files))
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
remove_version_snapshot(VERSION_FILE, release_version)
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
localRepo = '/tmp/elasticsearch-%s-%s' % (release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
if os.path.exists(localRepoElasticsearch):
print('clean local repository %s' % localRepoElasticsearch)
shutil.rmtree(localRepoElasticsearch)
if install_only:
mvn_target = 'install'
else:
mvn_target = 'deploy'
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.keyname="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, gpg_key, localRepo)
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
s3_sync_command = 's3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
s3_bucket_sync_to = 'download.elasticsearch.org/elasticsearch/staging/%s-%s/repos' % (release_version, shortHash)
build_repo_command = 'dev-tools/build_repositories.sh %s' % (major_minor_version)
if install_and_deploy:
for cmd in [install_command, clean_repo_command]:
run(cmd)
rename_local_meta_files(localRepoElasticsearch)
else:
print('')
print('*** To create a release candidate run: ')
print(' %s' % (install_command))
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
if push:
run(s3_sync_command)
env_vars = {'S3_BUCKET_SYNC_TO': s3_bucket_sync_to}
run(build_repo_command, env_vars)
else:
print('')
print('*** To push a release candidate to s3 run: ')
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' %s' % (s3_sync_command))
print(' 2. Create repositories: ')
print (' export S3_BUCKET_SYNC_TO="%s"' % (s3_bucket_sync_to))
print(' %s' % (build_repo_command))
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
print("""
<profiles>
<profile>
<id>release</id>
<properties>
<gpg.passphrase>YourPasswordGoesHere</gpg.passphrase>
</properties>
</profile>
</profiles>
""")
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')
print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:')
print(MAIL_TEMPLATE % ({'version' : release_version, 'hash': shortHash, 'major_minor_version' : major_minor_version}))
|
|
r"""
===============
Decoding (MVPA)
===============
.. include:: ../../links.inc
Design philosophy
=================
Decoding (a.k.a. MVPA) in MNE largely follows the machine
learning API of the scikit-learn package.
Each estimator implements ``fit``, ``transform``, ``fit_transform``, and
(optionally) ``inverse_transform`` methods. For more details on this design,
visit scikit-learn_. For additional theoretical insights into the decoding
framework in MNE :footcite:`KingEtAl2018`.
For ease of comprehension, we will denote instantiations of the class using
the same name as the class but in small caps instead of camel cases.
Let's start by loading data for a simple two-class problem:
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads') # remove stim and EOG
del raw
X = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times
y = epochs.events[:, 2] # target: auditory left vs visual left
###############################################################################
# Transformation classes
# ======================
#
# Scaler
# ^^^^^^
# The :class:`mne.decoding.Scaler` will standardize the data based on channel
# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,
# each data channel type (e.g., mag, grad, eeg) is treated separately and
# scaled by a constant. This is the approach used by e.g.,
# :func:`mne.compute_covariance` to standardize channel scales.
#
# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using
# empirical measures. Each channel is scaled independently by the mean and
# standand deviation, or median and interquartile range, respectively, across
# all epochs and time points during :class:`~mne.decoding.Scaler.fit`
# (during training). The :meth:`~mne.decoding.Scaler.transform` method is
# called to transform data (training or test set) by scaling all time points
# and epochs on a channel-by-channel basis. To perform both the ``fit`` and
# ``transform`` operations in a single call, the
# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the
# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For
# ``scalings='median'``, scikit-learn_ version 0.17+ is required.
#
# .. note:: Using this class is different from directly applying
# :class:`sklearn.preprocessing.StandardScaler` or
# :class:`sklearn.preprocessing.RobustScaler` offered by
# scikit-learn_. These scale each *classification feature*, e.g.
# each time point for each channel, with mean and standard
# deviation computed across epochs, whereas
# :class:`mne.decoding.Scaler` scales each *channel* using mean and
# standard deviation computed across all of its time points
# and epochs.
#
# Vectorizer
# ^^^^^^^^^^
# Scikit-learn API provides functionality to chain transformers and estimators
# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding
# pipelines and perform cross-validation and grid-search. However scikit-learn
# transformers and estimators generally expect 2D data
# (n_samples * n_features), whereas MNE transformers typically output data
# with a higher dimensionality
# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer
# therefore needs to be applied between the MNE and the scikit-learn steps
# like:
# Uses all MEG sensors and time points as separate classification
# features, so the resulting filters used are spatio-temporal
clf = make_pipeline(Scaler(epochs.info),
Vectorizer(),
LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
score = np.mean(scores, axis=0)
print('Spatio-temporal: %0.1f%%' % (100 * score,))
###############################################################################
# PSDEstimator
# ^^^^^^^^^^^^
# The :class:`mne.decoding.PSDEstimator`
# computes the power spectral density (PSD) using the multitaper
# method. It takes a 3D array as input, converts it into 2D and computes the
# PSD.
#
# FilterEstimator
# ^^^^^^^^^^^^^^^
# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.
#
# Spatial filters
# ===============
#
# Just like temporal filters, spatial filters provide weights to modify the
# data along the sensor dimension. They are popular in the BCI community
# because of their simplicity and ability to distinguish spatially-separated
# neural activity.
#
# Common spatial pattern
# ^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based
# on recordings from two classes :footcite:`Koles1991` (see also
# https://en.wikipedia.org/wiki/Common_spatial_pattern).
#
# Let :math:`X \in R^{C\times T}` be a segment of data with
# :math:`C` channels and :math:`T` time points. The data at a single time point
# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.
# Common spatial pattern (CSP) finds a decomposition that projects the signal
# in the original sensor space to CSP space using the following transformation:
#
# .. math:: x_{CSP}(t) = W^{T}x(t)
# :label: csp
#
# where each column of :math:`W \in R^{C\times C}` is a spatial filter and each
# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also
# called the de-mixing matrix in other contexts. Let
# :math:`\Sigma^{+} \in R^{C\times C}` and :math:`\Sigma^{-} \in R^{C\times C}`
# be the estimates of the covariance matrices of the two conditions.
# CSP analysis is given by the simultaneous diagonalization of the two
# covariance matrices
#
# .. math:: W^{T}\Sigma^{+}W = \lambda^{+}
# :label: diagonalize_p
# .. math:: W^{T}\Sigma^{-}W = \lambda^{-}
# :label: diagonalize_n
#
# where :math:`\lambda^{C}` is a diagonal matrix whose entries are the
# eigenvalues of the following generalized eigenvalue problem
#
# .. math:: \Sigma^{+}w = \lambda \Sigma^{-}w
# :label: eigen_problem
#
# Large entries in the diagonal matrix corresponds to a spatial filter which
# gives high variance in one class but low variance in the other. Thus, the
# filter facilitates discrimination between the two classes.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_eeg.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_timefreq.py`
#
# .. note::
#
# The winning entry of the Grasp-and-lift EEG competition in Kaggle used
# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as
# a `script of the week <sotw_>`_.
#
# .. _sotw: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/
#
# We can use CSP with these data with:
csp = CSP(n_components=3, norm_trace=False)
clf_csp = make_pipeline(csp, LinearModel(LogisticRegression(solver='lbfgs')))
scores = cross_val_multiscore(clf_csp, X, y, cv=5, n_jobs=1)
print('CSP: %0.1f%%' % (100 * scores.mean(),))
###############################################################################
# Source power comodulation (SPoC)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Source Power Comodulation (:class:`mne.decoding.SPoC`)
# :footcite:`DahneEtAl2014` identifies the composition of
# orthogonal spatial filters that maximally correlate with a continuous target.
#
# SPoC can be seen as an extension of the CSP where the target is driven by a
# continuous variable rather than a discrete variable. Typical applications
# include extraction of motor patterns using EMG power or audio patterns using
# sound envelope.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_spoc_CMC.py`
#
# xDAWN
# ^^^^^
# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to
# improve the signal to signal + noise ratio (SSNR) of the ERP responses
# :footcite:`RivetEtAl2009`. Xdawn was originally
# designed for P300 evoked potential by enhancing the target response with
# respect to the non-target response. The implementation in MNE-Python is a
# generalization to any type of ERP.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_preprocessing_plot_xdawn_denoising.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_xdawn_eeg.py`
#
# Effect-matched spatial filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The result of :class:`mne.decoding.EMS` is a spatial filter at each time
# point and a corresponding time course :footcite:`SchurgerEtAl2013`.
# Intuitively, the result gives the similarity between the filter at
# each time point and the data vector (sensors) at that time point.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_ems_filtering.py`
#
# Patterns vs. filters
# ^^^^^^^^^^^^^^^^^^^^
#
# When interpreting the components of the CSP (or spatial filters in general),
# it is often more intuitive to think about how :math:`x(t)` is composed of
# the different CSP components :math:`x_{CSP}(t)`. In other words, we can
# rewrite Equation :eq:`csp` as follows:
#
# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)
# :label: patterns
#
# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.
# This is also called the mixing matrix. The example
# :ref:`sphx_glr_auto_examples_decoding_plot_linear_model_patterns.py`
# discusses the difference between patterns and filters.
#
# These can be plotted with:
# Fit CSP on full data and plot
csp.fit(X, y)
csp.plot_patterns(epochs.info)
csp.plot_filters(epochs.info, scalings=1e-9)
###############################################################################
# Decoding over time
# ==================
#
# This strategy consists in fitting a multivariate predictive model on each
# time instant and evaluating its performance at the same instant on new
# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a
# pair of features :math:`X` and targets :math:`y`, where :math:`X` has
# more than 2 dimensions. For decoding over time the data :math:`X`
# is the epochs data of shape n_epochs x n_channels x n_times. As the
# last dimension of :math:`X` is the time, an estimator will be fit
# on every time instant.
#
# This approach is analogous to SlidingEstimator-based approaches in fMRI,
# where here we are interested in when one can discriminate experimental
# conditions and therefore figure out when the effect of interest happens.
#
# When working with linear models as estimators, this approach boils
# down to estimating a discriminative spatial filter for each time instant.
#
# Temporal decoding
# ^^^^^^^^^^^^^^^^^
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked_time_gen = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked_time_gen.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
###############################################################################
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
# transferring a trained model to a distinct learning problem, where the
# problems correspond to decoding the patterns of brain activity recorded at
# distinct time instants.
#
# The object to for Temporal generalization is
# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`
# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but
# generates predictions from each model for all time instants. The class
# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the
# last dimension as the one to be used for generalization testing. For
# convenience, here, we refer to it as different tasks. If :math:`X`
# corresponds to epochs data then the last dimension is time.
#
# This runs the analysis used in :footcite:`KingEtAl2014` and further detailed
# in :footcite:`KingDehaene2014`:
# define the Temporal generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',
verbose=True)
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
###############################################################################
# Plot the full (generalization) matrix:
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
###############################################################################
# Projecting sensor-space patterns to source space
# ================================================
# If you use a linear classifier (or regressor) for your data, you can also
# project these to source space. For example, using our ``evoked_time_gen``
# from before:
cov = mne.compute_covariance(epochs, tmax=0.)
del epochs
fwd = mne.read_forward_solution(
data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif')
inv = mne.minimum_norm.make_inverse_operator(
evoked_time_gen.info, fwd, cov, loose=0.)
stc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM')
del fwd, inv
###############################################################################
# And this can be visualized using :meth:`stc.plot <mne.SourceEstimate.plot>`:
brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1,
subjects_dir=subjects_dir)
###############################################################################
# Source-space decoding
# =====================
#
# Source space decoding is also possible, but because the number of features
# can be much larger than in the sensor space, univariate feature selection
# using ANOVA f-test (or some other metric) can be done to reduce the feature
# dimension. Interpreting decoding results might be easier in source space as
# compared to sensor space.
#
# .. topic:: Examples
#
# * :ref:`tut_dec_st_source`
#
# Exercise
# ========
#
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# References
# ==========
# .. footbibliography::
|
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
"""
SAMI XJoin
This script simply joins the four existing extensions inside a FITS file
created during observations with SAMI (SAM Imager). During the process,
it also fits a 2nd degree polynomium to the OVERSCAN region that is
subtracted from the corresponding image.
The user also may want to add flags in order to process the images
according to the following options (in order):
- BIAS subtraction;
- DARK subtraction;
- Remove hot pixels and cosmic rays;
- Remove overglow using a long exposure DARK image;
- Divide by the FLAT;
- Divide by the exposure time;
The documentation for each process is shown in the corresponding function.
Bruno Quint (bquint at ctio.noao.edu)
May 2016
Thanks to Andrei Tokovinin and Claudia M. de Oliveira for the ideas that
were implemented here.
"""
from __future__ import division, print_function
import astropy.io.fits as pyfits
import argparse
import logging as log
import numpy as np
import os
from scipy import ndimage
from scipy import signal
try:
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
xrange
except NameError:
# noinspection PyShadowingBuiltins
xrange = range
# Piece of code from cosmics.py
# We define the laplacian kernel to be used
laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])
# Other kernels :
growkernel = np.ones((3, 3))
# dilation structure for some morphological operations
dilstruct = np.ones((5, 5))
dilstruct[0, 0] = 0
dilstruct[0, 4] = 0
dilstruct[4, 0] = 0
dilstruct[4, 4] = 0
# noinspection PyPep8Naming
class SAMI_XJoin:
def __init__(self, list_of_files, bias_file=None, clean=False,
cosmic_rays=False, dark_file=None, debug=False,
flat_file=None, glow_file=None, time=False, verbose=False):
self.set_verbose(verbose)
self.set_debug(debug)
self.main(list_of_files, bias_file=bias_file, clean=clean,
cosmic_rays=cosmic_rays, dark_file=dark_file,
flat_file=flat_file, glow_file=glow_file, time=time)
return
@staticmethod
def clean_column(_data, x0, y0, yf, n=5):
t1 = _data[y0:yf, x0 - n:x0]
t2 = _data[y0:yf, x0 + 1:x0 + n]
t = np.hstack((t1, t2))
_data[y0:yf, x0] = np.median(t, axis=1)
return _data
def clean_columns(self, _data):
bad_columns = [
[167, 0, 513],
[476, 0, 513],
[602, 0, 513],
[671, 0, 513],
[673, 475, 513],
[810, 0, 513],
[213, 513, 1024]
]
for column in bad_columns:
x0 = column[0]
y0 = column[1]
yf = column[2]
_data = self.clean_column(_data, x0, y0, yf)
return _data
@staticmethod
def clean_line(_data, x0, xf, y, n=5):
t1 = _data[y - n:y, x0:xf]
t2 = _data[y + 1:y + n, x0:xf]
t = np.vstack((t1, t2))
_data[y, x0:xf] = np.median(t, axis=0)
return _data
def clean_hot_columns_and_lines(self, data, header, prefix, clean):
if clean is True:
data = self.clean_columns(data)
data = self.clean_lines(data)
header.add_history('Cleaned bad columns and lines.')
prefix = 'c' + prefix
return data, header, prefix
def clean_lines(self, _data):
bad_lines = [
[214, 239, 688],
[477, 516, 490],
[387, 429, 455],
[574, 603, 494],
[574, 603, 493],
[640, 672, 388],
[604, 671, 388]
]
for line in bad_lines:
x0 = line[0]
xf = line[1]
y = line[2]
_data = self.clean_line(_data, x0, xf, y)
return _data
@staticmethod
def dark_subtraction(data, header, prefix, dark_file):
if dark_file is not None:
dark = pyfits.getdata(dark_file)
data -= dark
header['DARKFILE'] = dark_file
prefix = 'd' + prefix
header.add_history('Dark subtracted')
return data, header, prefix
@staticmethod
def divide_by_flat(data, header, prefix, flat_file):
if flat_file is not None:
flat = pyfits.getdata(flat_file)
data /= flat
header['FLATFILE'] = flat_file
header.add_history('Flat normalized')
prefix = 'f' + prefix
return data, header, prefix
@staticmethod
def divide_by_exposuretime(data, header, prefix, time):
if time is True:
try:
exptime = float(header['EXPTIME'])
data /= exptime
header['UNITS'] = 'COUNTS/s'
header.add_history('Divided by exposure time.')
prefix = 't' + prefix
except KeyError:
pass
return data, header, prefix
@staticmethod
def get_header(filename):
fits_file = pyfits.open(filename)
h0 = fits_file[0].header
# noinspection PyUnusedLocal
h1 = fits_file[1].header
# TODO - Multiple header inheritance.
# If there's any card that should be passed from the extentions
# header to the main header, uncomment bellow.
# for key in h1:
# if key not in h0:
# h0.set(key, value=h1[key], comment=h1.comments[key])
h0.append('UNITS')
h0.set('UNITS', value='COUNTS', comment='Pixel intensity units.')
return h0
@staticmethod
def get_joined_data(filename):
fits_file = pyfits.open(filename)
w, h = str2pixels(fits_file[1].header['DETSIZE'])
log.info(' > %s' % filename)
# Correct for binning
bin_size = np.array(fits_file[1].header['CCDSUM'].split(' '),
dtype=int)
bw, bh = w[1] // bin_size[0], h[1] // bin_size[1]
# Create empty full frame
new_data = np.empty((bh, bw), dtype=float)
# Process each extension
for i in range(1, 5):
tx, ty = str2pixels(fits_file[i].header['TRIMSEC'])
bx, by = str2pixels(fits_file[i].header['BIASSEC'])
data = fits_file[i].data
trim = data[ty[0] - 1:ty[1], tx[0] - 1:tx[1]]
bias = data[by[0] - 1:by[1], bx[0] - 1:bx[1]]
# Collapse the bias columns to a single column.
bias = np.median(bias, axis=1)
# Fit and remove OVERSCAN
x = np.arange(bias.size) + 1
bias_fit_pars = np.polyfit(x, bias, 2) # Last par = inf
bias_fit = np.polyval(bias_fit_pars, x)
bias_fit = bias_fit.reshape((bias_fit.size, 1))
bias_fit = np.repeat(bias_fit, trim.shape[1], axis=1)
trim = trim - bias_fit
dx, dy = str2pixels(fits_file[i].header['DETSEC'])
dx, dy = dx // bin_size[0], dy // bin_size[1]
new_data[dy[0]:dy[1], dx[0]:dx[1]] = trim
return new_data
def main(self, list_of_files, bias_file=None, clean=False,
cosmic_rays=False, dark_file=None, flat_file=None,
glow_file=None, time=False):
self.print_header()
log.info('Processing data')
list_of_files = sorted(list_of_files)
for filename in list_of_files:
prefix = "xj"
# Get joined data
data = self.get_joined_data(filename)
# Build header
header = self.get_header(filename)
# Removing bad column and line
data = self.remove_central_bad_columns(data)
# BIAS subtraction
data, header, prefix = bias_subtraction(
data, header, prefix, bias_file
)
# DARK subtraction
data, header, prefix = self.dark_subtraction(
data, header, prefix, dark_file
)
# Remove cosmic rays and hot pixels
data, header, prefix = self.remove_cosmic_rays(
data, header, prefix, cosmic_rays
)
# Remove lateral glows
data, header, prefix = self.remove_glows(
data, header, prefix, glow_file
)
# FLAT division
data, header, prefix = self.divide_by_flat(
data, header, prefix, flat_file
)
# Normalize by the EXPOSURE TIME
data, header, prefix = self.divide_by_exposuretime(
data, header, prefix, time
)
# Clean known bad columns and lines
data, header, prefix = self.clean_hot_columns_and_lines(
data, header, prefix, clean
)
# Writing file
header.add_history('Extensions joined using "sami_xjoin"')
path, filename = os.path.split(filename)
pyfits.writeto(os.path.join(path, prefix + filename), data,
header, clobber=True)
log.info("\n All done!")
@staticmethod
def print_header():
msg = (
"\n SAMI - Join Extensions"
" by Bruno Quint (bquint@astro.iag.usp.br)"
" Mar 2015 - Version 0.4"
"\n Starting program.")
log.info(msg)
@staticmethod
def remove_cosmic_rays(data, header, prefix, cosmic_rays):
if cosmic_rays:
c = CosmicsImage(data, gain=2.1, readnoise=10.0, sigclip=3.0,
sigfrac=0.3, objlim=5.0)
c.run(maxiter=4)
data = c.cleanarray
header.add_history(
'Cosmic rays and hot pixels removed using LACosmic')
prefix = 'r' + prefix
return data, header, prefix
def remove_glows(self, data, header, prefix, glow_file):
if glow_file is not None:
# Create four different regions.
regions = [
[np.median(data[539:589, 6:56]), # Top Left
np.median(data[539:589, 975:1019])], # Top Right
[np.median(data[449:506, 6:56]), # Bottom Left
np.median(data[449:506, 975:1019])] # Bottom Right
]
min_std_region = np.argmin(regions) % 2
# The upper reg has background lower or equal to the lower reg
midpt1 = regions[0][min_std_region]
midpt2 = regions[1][min_std_region]
diff = midpt2 - midpt1
dark = pyfits.getdata(glow_file)
dark = self.clean_columns(dark)
dark = self.clean_lines(dark)
dark_regions = [
[np.median(dark[539:589, 6:56]), # Top Left
np.median(dark[539:589, 975:1019])], # Top Right
[np.median(dark[449:506, 6:56]), # Bottom Left
np.median(dark[449:506, 975:1019])] # Bottom Right
]
dark_midpt1 = dark_regions[0][min_std_region]
dark_midpt2 = dark_regions[1][min_std_region]
dark_diff = dark_midpt2 - dark_midpt1
dark -= dark_midpt1
k = diff / dark_diff
temp_dark = dark * k
data -= midpt1
data -= temp_dark
# print(k)
header.add_history('Lateral glow removed using %s file' % glow_file)
prefix = 'g' + prefix
return data, header, prefix
@staticmethod
def set_debug(debug):
if debug:
log.basicConfig(level=log.DEBUG, format='%(message)s')
@staticmethod
def set_verbose(verbose):
if verbose:
log.basicConfig(level=log.INFO, format='%(message)s')
else:
log.basicConfig(level=log.WARNING, format='%(message)s')
@staticmethod
def remove_central_bad_columns(data):
n_rows, n_columns = data.shape
# Copy the central bad columns to a temp array
temp_column = data[:, n_columns // 2 - 1:n_columns // 2 + 1]
# Shift the whole image by two columns
data[:, n_columns // 2 - 1:-2] = data[:, n_columns // 2 + 1:]
# Copy the bad array in the end (right) of the image).
data[:, -2:] = temp_column
return data
# noinspection PyPep8
class CosmicsImage:
def __init__(self, rawarray, pssl=0.0, gain=2.2, readnoise=10.0,
sigclip=5.0, sigfrac=0.3, objlim=5.0, satlevel=50000.0,
verbose=True):
"""
sigclip: increase this if you detect cosmics where there are none.
Default is 5.0, a good value for earth-bound images.
objlim : increase this if normal stars are detected as cosmics.
Default is 5.0, a good value for earth-bound images.
Constructor of the cosmic class, takes a 2D numpy array of your image as
main argument.
sigclip : laplacian-to-noise limit for cosmic ray detection
objlim : minimum contrast between laplacian image and fine
structure image. Use 5.0 if your image is undersampled, HST, ...
satlevel : if we find agglomerations of pixels above this level, we
consider it to be a saturated star and do not try to correct
and pixels around it. A negative satlevel skips this
feature.
pssl is the previously subtracted sky level !
real gain = 1.8 # gain (electrons/ADU) (0=unknown)
real readn = 6.5 # read noise (electrons) (0=unknown)
real skyval = 0. # sky level that has been subtracted (ADU)
real sigclip = 3.0 # detection limit for cosmic rays (sigma)
real sigfrac = 0.5 # fractional detection limit for
# neighbouring pixels
real objlim = 3.0 # contrast limit between CR and underlying
# object
int niter = 1 # maximum number of iterations
"""
# internally, we will always work "with sky".
self.rawarray = np.array(rawarray + pssl )
# In lacosmiciteration() we work on this guy
self.cleanarray = self.rawarray.copy()
# All False, no cosmics yet
self.mask = np.cast['bool'](np.zeros(self.rawarray.shape))
self.gain = gain
self.readnoise = readnoise
self.sigclip = sigclip
self.objlim = objlim
self.sigcliplow = sigclip * sigfrac
self.satlevel = satlevel
self.verbose = verbose
self.pssl = pssl
# only calculated and used if required.
self.backgroundlevel = None
# a mask of the saturated stars, only calculated if required
self.satstars = None
def __str__(self):
"""
Gives a summary of the current state, including the number of cosmic
pixels in the mask etc.
"""
stringlist = [
"Input array : (%i, %i), %s" % (
self.rawarray.shape[0], self.rawarray.shape[1],
self.rawarray.dtype.name),
"Current cosmic ray mask : %i pixels" % np.sum(self.mask)
]
if self.pssl != 0.0:
stringlist.append(
"Using a previously subtracted sky level of %f" % self.pssl)
if self.satstars is not None:
stringlist.append(
"Saturated star mask : %i pixels" % np.sum(self.satstars))
return "\n".join(stringlist)
def labelmask(self, verbose=None):
"""
Finds and labels the cosmic "islands" and returns a list of dicts
containing their positions. This is made on purpose for visualizations a
la f2n.drawstarslist, but could be useful anyway.
"""
if verbose is None:
verbose = self.verbose
if verbose:
print("Labeling mask pixels ...")
# We morphologicaly dilate the mask to generously connect "sparse"
# cosmics:
# dilstruct = np.ones((5,5))
dilmask = ndimage.morphology.binary_dilation(self.mask,
structure=dilstruct,
iterations=1, mask=None,
output=None,
border_value=0, origin=0,
brute_force=False)
# origin = 0 means center
(labels, n) = ndimage.measurements.label(dilmask)
# print "Number of cosmic ray hits : %i" % n
# tofits(labels, "labels.fits", verbose = False)
slicecouplelist = ndimage.measurements.find_objects(labels)
# Now we have a huge list of couples of numpy slice objects giving a
# frame around each object
# For plotting purposes, we want to transform this into the center of
# each object.
if len(slicecouplelist) != n:
# This never happened, but you never know ...
raise (RuntimeError, "Mega error in labelmask !")
centers = [[(tup[0].start + tup[0].stop) / 2.0,
(tup[1].start + tup[1].stop) / 2.0] for tup in
slicecouplelist]
# We also want to know how many pixels where affected by each cosmic
# ray. Why ? Dunno... it's fun and available in scipy :-)
sizes = ndimage.measurements.sum(self.mask.ravel(), labels.ravel(),
np.arange(1, n + 1, 1))
retdictlist = [{"name": "%i" % size, "x": center[0], "y": center[1]} for
(size, center) in zip(sizes, centers)]
if verbose:
print("Labeling done")
return retdictlist
def getdilatedmask(self, size=3):
"""
Returns a morphologically dilated copy of the current mask.
size = 3 or 5 decides how to dilate.
"""
if size == 3:
dilmask = ndimage.morphology.binary_dilation(self.mask,
structure=growkernel,
iterations=1,
mask=None, output=None,
border_value=0,
origin=0,
brute_force=False)
elif size == 5:
dilmask = ndimage.morphology.binary_dilation(self.mask,
structure=dilstruct,
iterations=1,
mask=None, output=None,
border_value=0,
origin=0,
brute_force=False)
else:
dilmask = self.mask.copy()
return dilmask
def clean(self, mask=None, verbose=None):
"""
Given the mask, we replace the actual problematic pixels with the masked
5x5 median value. This mimics what is done in L.A.Cosmic, but it's a bit
harder to do in python, as there is no readymade masked median. So for
now we do a loop...
Saturated stars, if calculated, are also masked : they are not
"cleaned", but their pixels are not used for the interpolation.
We will directly change self.cleanimage. Instead of using the self.mask,
you can supply your own mask as argument. This might be useful to apply
this cleaning function iteratively.
But for the true L.A.Cosmic, we don't use this, i.e. we use the full
mask at each iteration.
"""
if verbose is None:
verbose = self.verbose
if mask is None:
mask = self.mask
if verbose:
print("Cleaning cosmic affected pixels ...")
# So... mask is a 2D array containing False and True, where True means
# "here is a cosmic"
# We want to loop through these cosmics one by one.
cosmicindices = np.argwhere(mask)
# This is a list of the indices of cosmic affected pixels.
# print cosmicindices
# We put cosmic ray pixels to np.Inf to flag them :
self.cleanarray[mask] = np.Inf
# Now we want to have a 2 pixel frame of Inf padding around our image.
w = self.cleanarray.shape[0]
h = self.cleanarray.shape[1]
padarray = np.zeros((w + 4, h + 4)) + np.Inf
# that copy is important, we need 2 independent arrays
padarray[2:w + 2, 2:h + 2] = self.cleanarray.copy()
# The medians will be evaluated in this padarray, skipping the np.Inf.
# Now in this copy called padarray, we also put the saturated stars to
# np.Inf, if available :
if self.satstars is not None:
padarray[2:w + 2, 2:h + 2][self.satstars] = np.Inf
# Viva python, I tested this one, it works...
# A loop through every cosmic pixel :
for cosmicpos in cosmicindices:
x = cosmicpos[0]
y = cosmicpos[1]
cutout = padarray[x:x + 5,
y:y + 5].ravel() # remember the shift due to the padding !
# print cutout
# Now we have our 25 pixels, some of them are np.Inf, and we want
# to take the median
goodcutout = cutout[cutout != np.Inf]
# print np.alen(goodcutout)
if np.alen(goodcutout) >= 25:
# This never happened, but you never know ...
raise (RuntimeError, "Mega error in clean !")
elif np.alen(goodcutout) > 0:
replacementvalue = np.median(goodcutout)
else:
# i.e. no good pixels : Shit, a huge cosmic, we will have to
# improvise ...
print("OH NO, I HAVE A HUUUUUUUGE COSMIC !!!!!")
replacementvalue = self.guessbackgroundlevel()
# We update the cleanarray,
# but measure the medians in the padarray, so to not mix things up.
self.cleanarray[x, y] = replacementvalue
# That's it.
if verbose:
print("Cleaning done")
# FYI, that's how the LACosmic cleaning looks in iraf :
"""
imarith(outmask,"+",finalsel,outmask)
imreplace(outmask,1,lower=1,upper=INDEF) # ok so outmask = 1 are the cosmics
imcalc(outmask,inputmask,"(1.-10000.*im1)",verb-)
imarith(oldoutput,"*",inputmask,inputmask)
median(inputmask,med5,5,5,zloreject=-9999,zhi=INDEF,verb-)
imarith(outmask,"*",med5,med5)
if (i>1) imdel(output)
imcalc(oldoutput//","//outmask//","//med5,output,"(1.-im2)*im1+im3",verb-)
# =
merging to full mask
inputmask = 1.0 - 10000.0 * finalsel # So this is 1.0, but cosmics are very negative
inputmask = oldoutput * inputmask # orig image, with very negative cosmics
med5 = median of inputmask, but rejecting these negative cosmics
# i dunno how to do this in python -> had to do the loop
med5 = finalsel * med5 # we keep only the cosmics of this median
# actual replacement :
output = (1.0 - outmask)*oldoutput + med5 # ok
"""
def findsatstars(self, verbose=None):
"""
Uses the satlevel to find saturated stars (not cosmics !), and puts the result as a mask in self.satstars.
This can then be used to avoid these regions in cosmic detection and cleaning procedures.
Slow ...
"""
if verbose is None:
verbose = self.verbose
if verbose:
print("Detecting saturated stars ...")
# DETECTION
satpixels = self.rawarray > self.satlevel # the candidate pixels
# We build a smoothed version of the image to look for large stars and their support :
m5 = ndimage.filters.median_filter(self.rawarray, size=5, mode='mirror')
# We look where this is above half the satlevel
largestruct = m5 > (self.satlevel / 2.0)
# The rough locations of saturated stars are now :
satstarscenters = np.logical_and(largestruct, satpixels)
if verbose:
print("Building mask of saturated stars ...")
# BUILDING THE MASK
# The subtility is that we want to include all saturated pixels connected to these saturated stars...
# I haven't found a better solution then the double loop
# We dilate the satpixels alone, to ensure connectivity in glitchy regions and to add a safety margin around them.
# dilstruct = np.array([[0,1,0], [1,1,1], [0,1,0]])
dilsatpixels = ndimage.morphology.binary_dilation(satpixels,
structure=dilstruct,
iterations=2,
mask=None,
output=None,
border_value=0,
origin=0,
brute_force=False)
# It turns out it's better to think large and do 2 iterations...
# We label these :
(dilsatlabels, nsat) = ndimage.measurements.label(dilsatpixels)
# tofits(dilsatlabels, "test.fits")
if verbose:
print("We have %i saturated stars." % nsat)
# The ouput, False for now :
outmask = np.zeros(self.rawarray.shape)
for i in range(1,
nsat + 1): # we go through the islands of saturated pixels
thisisland = dilsatlabels == i # gives us a boolean array
# Does this intersect with satstarscenters ?
overlap = np.logical_and(thisisland, satstarscenters)
if np.sum(overlap) > 0:
outmask = np.logical_or(outmask,
thisisland) # we add thisisland to the mask
self.satstars = np.cast['bool'](outmask)
if verbose:
print("Mask of saturated stars done")
def getsatstars(self, verbose=None):
"""
Returns the mask of saturated stars after finding them if not yet done.
Intended mainly for external use.
"""
if verbose is None:
verbose = self.verbose
if not self.satlevel > 0:
raise (RuntimeError,
"Cannot determine satstars : you gave satlevel <= 0 !")
if self.satstars is None:
self.findsatstars(verbose=verbose)
return self.satstars
def getmask(self):
return self.mask
def getrawarray(self):
"""
For external use only, as it returns the rawarray minus pssl !
"""
return self.rawarray - self.pssl
def getcleanarray(self):
"""
For external use only, as it returns the cleanarray minus pssl !
"""
return self.cleanarray - self.pssl
def guessbackgroundlevel(self):
"""
Estimates the background level. This could be used to fill pixels in
large cosmics.
"""
if self.backgroundlevel is None:
self.backgroundlevel = np.median(self.rawarray.ravel())
return self.backgroundlevel
def lacosmiciteration(self, verbose=None):
"""
Performs one iteration of the L.A.Cosmic algorithm.
It operates on self.cleanarray, and afterwards updates self.mask by
adding the newly detected cosmics to the existing self.mask. Cleaning is
not made automatically ! You have to call clean() after each iteration.
This way you can run it several times in a row to to L.A.Cosmic
"iterations".
See function lacosmic, that mimics the full iterative L.A.Cosmic
algorithm.
Returns a dict containing
- niter : the number of cosmic pixels detected in this iteration
- nnew : among these, how many were not yet in the mask
- itermask : the mask of pixels detected in this iteration
- newmask : the pixels detected that were not yet in the mask
If findsatstars() was called, we exclude these regions from the search.
"""
if verbose is None:
verbose = self.verbose
if verbose:
print("Convolving image with Laplacian kernel ...")
# We subsample, convolve, clip negative values, and rebin to
# original size
subsam = subsample(self.cleanarray)
conved = signal.convolve2d(subsam, laplkernel, mode="same",
boundary="symm")
cliped = conved.clip(min=0.0)
# cliped = np.abs(conved) # unfortunately this does not work to find
# holes as well ...
lplus = rebin2x2(cliped)
if verbose:
print("Creating noise model ...")
# We build a custom noise map, so to compare the laplacian to
m5 = ndimage.filters.median_filter(self.cleanarray, size=5,
mode='mirror')
# We keep this m5, as I will use it later for the interpolation.
m5clipped = m5.clip(min=0.00001) # As we will take the sqrt
noise = (1.0 / self.gain) * np.sqrt(
self.gain * m5clipped + self.readnoise * self.readnoise)
if verbose:
print("Calculating Laplacian signal to noise ratio ...")
# Laplacian signal to noise ratio :
s = lplus / (2.0 * noise) # the 2.0 is from the 2x2 subsampling
# This s is called sigmap in the original lacosmic.cl
# We remove the large structures (s prime) :
sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')
if verbose:
print("Selecting candidate cosmic rays ...")
# Candidate cosmic rays (this will include stars + HII regions)
candidates = sp > self.sigclip
nbcandidates = np.sum(candidates)
if verbose:
print(" %5i candidate pixels" % nbcandidates)
# At this stage we use the saturated stars to mask the candidates,
# if available :
if self.satstars is not None:
if verbose:
print("Masking saturated stars ...")
candidates = np.logical_and(np.logical_not(self.satstars),
candidates)
nbcandidates = np.sum(candidates)
if verbose:
print(" %5i candidate pixels not part of saturated stars" %
nbcandidates)
if verbose:
print("Building fine structure image ...")
# We build the fine structure image :
m3 = ndimage.filters.median_filter(self.cleanarray, size=3,
mode='mirror')
m37 = ndimage.filters.median_filter(m3, size=7, mode='mirror')
f = m3 - m37
# In the article that's it, but in lacosmic.cl f is divided by the
# noise...
# Ok I understand why, it depends on if you use sp/f or L+/f as
# criterion.
# There are some differences between the article and the iraf
# implementation.
# So I will stick to the iraf implementation.
f /= noise
f = f.clip(
min=0.01) # as we will divide by f. like in the iraf version.
if verbose:
print("Removing suspected compact bright objects ...")
# Now we have our better selection of cosmics :
cosmics = np.logical_and(candidates, sp / f > self.objlim)
# Note the sp/f and not lplus/f ... due to the f = f/noise above.
nbcosmics = np.sum(cosmics)
if verbose:
print(" %5i remaining candidate pixels" % nbcosmics)
# What follows is a special treatment for neighbors, with more
# relaxed constains.
if verbose:
print("Finding neighboring pixels affected by cosmic rays ...")
# We grow these cosmics a first time to determine the immediate
# neighborhod:
growcosmics = np.cast['bool'](
signal.convolve2d(np.cast['float32'](cosmics), growkernel,
mode="same", boundary="symm"))
# From this grown set, we keep those that have sp > sigmalim
# so obviously not requiring sp/f > objlim, otherwise it would be
# pointless
growcosmics = np.logical_and(sp > self.sigclip, growcosmics)
# Now we repeat this procedure, but lower the detection limit to
# sigmalimlow :
finalsel = np.cast['bool'](
signal.convolve2d(np.cast['float32'](growcosmics), growkernel,
mode="same", boundary="symm"))
finalsel = np.logical_and(sp > self.sigcliplow, finalsel)
# Again, we have to kick out pixels on saturated stars :
if self.satstars is not None:
if verbose:
print("Masking saturated stars ...")
finalsel = np.logical_and(np.logical_not(self.satstars), finalsel)
nbfinal = np.sum(finalsel)
if verbose:
print(" %5i pixels detected as cosmics" % nbfinal)
# Now the replacement of the cosmics...
# we outsource this to the function clean(), as for some purposes the
# cleaning might not even be needed.
# Easy way without masking would be :
# self.cleanarray[finalsel] = m5[finalsel]
# We find how many cosmics are not yet known :
newmask = np.logical_and(np.logical_not(self.mask), finalsel)
nbnew = np.sum(newmask)
# We update the mask with the cosmics we have found :
self.mask = np.logical_or(self.mask, finalsel)
# We return
# (used by function lacosmic)
return {"niter": nbfinal, "nnew": nbnew, "itermask": finalsel,
"newmask": newmask}
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def findholes(self, verbose=True):
"""
Detects "negative cosmics" in the cleanarray and adds them to the mask.
This is not working yet.
"""
pass
"""
if verbose == None:
verbose = self.verbose
if verbose :
print "Finding holes ..."
m3 = ndimage.filters.median_filter(self.cleanarray, size=3, mode='mirror')
h = (m3 - self.cleanarray).clip(min=0.0)
tofits("h.fits", h)
sys.exit()
# The holes are the peaks in this image that are not stars
#holes = h > 300
"""
"""
subsam = subsample(self.cleanarray)
conved = -signal.convolve2d(subsam, laplkernel, mode="same", boundary="symm")
cliped = conved.clip(min=0.0)
lplus = rebin2x2(conved)
tofits("lplus.fits", lplus)
m5 = ndimage.filters.median_filter(self.cleanarray, size=5, mode='mirror')
m5clipped = m5.clip(min=0.00001)
noise = (1.0/self.gain) * np.sqrt(self.gain*m5clipped + self.readnoise*self.readnoise)
s = lplus / (2.0 * noise) # the 2.0 is from the 2x2 subsampling
# This s is called sigmap in the original lacosmic.cl
# We remove the large structures (s prime) :
sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')
holes = sp > self.sigclip
"""
"""
# We have to kick out pixels on saturated stars :
if self.satstars is not None:
if verbose:
print "Masking saturated stars ..."
holes = np.logical_and(np.logical_not(self.satstars), holes)
if verbose:
print "%i hole pixels found" % np.sum(holes)
# We update the mask with the holes we have found :
self.mask = np.logical_or(self.mask, holes)
"""
def run(self, maxiter=4, verbose=False):
"""
Full artillery :-)
- Find saturated stars
- Run maxiter L.A.Cosmic iterations (stops if no more cosmics are found)
Stops if no cosmics are found or if maxiter is reached.
"""
if self.satlevel > 0 and self.satstars is None:
self.findsatstars(verbose=True)
print("Starting %i L.A.Cosmic iterations ..." % maxiter)
for i in range(1, maxiter + 1):
print("Iteration %i" % i)
iterres = self.lacosmiciteration(verbose=verbose)
print("%i cosmic pixels (%i new)" %
(iterres["niter"], iterres["nnew"]))
# self.clean(mask = iterres["mask"]) # No, we want clean to operate
# on really clean pixels only!
# Thus we always apply it on the full mask, as lacosmic does :
self.clean(verbose=verbose)
# But note that for huge cosmics, one might want to revise this.
# Thats why I added a feature to skip saturated stars !
if iterres["niter"] == 0:
break
def bias_subtraction(data, header, prefix, bias_file):
"""
Subtract bias from data.
:param data: 2D numpy.ndarray containing data.
:param header: astropy.io.fits.Header instance.
:param prefix: string containg the filename prefix.
:param bias_file: string containing the filename that holds the BIAS image.
:return: data - bias subtracted.
:return: header - updated header.
:return: prefix - updated prefix.
"""
if bias_file is not None:
bias = pyfits.getdata(os.path.abspath(bias_file))
data -= bias
header['BIASFILE'] = bias_file
header.add_history('Bias subtracted')
prefix = 'b' + prefix
return data, header, prefix
def subsample(a): # this is more a generic function then a method ...
"""
Returns a 2x2-subsampled version of array a (no interpolation, just cutting
pixels in 4).
The version below is directly from the scipy cookbook on rebinning :
U{http://www.scipy.org/Cookbook/Rebinning}
There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it
makes funny borders.
"""
"""
# Ouuwww this is slow ...
outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
outarray[2*i,2*j] = a[i,j]
outarray[2*i+1,2*j] = a[i,j]
outarray[2*i,2*j+1] = a[i,j]
outarray[2*i+1,2*j+1] = a[i,j]
return outarray
"""
# much better :
newshape = (2 * a.shape[0], 2 * a.shape[1])
slices = [slice(0, old, float(old) / new) for old, new in
zip(a.shape, newshape)]
coordinates = np.mgrid[slices]
indices = coordinates.astype(
'i') # choose the biggest smaller integer index
return a[tuple(indices)]
# noinspection PyUnusedLocal
def rebin(a, newshape):
"""
Auxiliary function to rebin an ndarray a.
U{http://www.scipy.org/Cookbook/Rebinning}
> a=rand(6,4); b=rebin(a,(3,2))
"""
shape = a.shape
len_shape = len(shape)
factor = np.asarray(shape) / np.asarray(newshape)
# print factor
ev_list = ['a.reshape('] + \
['newshape[%d],factor[%d],' % (i, i) for i in xrange(len_shape)] +\
[')'] + ['.sum(%d)' % (i + 1) for i in xrange(len_shape)] + \
['/factor[%d]' % i for i in xrange(len_shape)]
return eval(''.join(ev_list))
def rebin2x2(a):
"""
Wrapper around rebin that actually rebins 2 by 2
"""
inshape = np.array(a.shape)
if not (inshape % 2 == np.zeros(
2)).all(): # Modulo check to see if size is even
raise (RuntimeError, "I want even image shapes !")
return rebin(a, inshape / 2)
def str2pixels(my_string):
my_string = my_string.replace('[', '')
my_string = my_string.replace(']', '')
x, y = my_string.split(',')
x = x.split(':')
y = y.split(':')
# "-1" fix from IDL to Python
x = np.array(x, dtype=int)
y = np.array(y, dtype=int)
return x, y
if __name__ == '__main__':
# Parsing Arguments ---
parser = argparse.ArgumentParser(
description="Join extensions existent in a single FITS file."
)
parser.add_argument('-b', '--bias', type=str, default=None,
help="Consider BIAS file for subtraction.")
parser.add_argument('-c', '--clean', action='store_true',
help="Clean known bad columns and lines by taking the "
"median value of their neighbours.")
parser.add_argument('-d', '--dark', type=str, default=None,
help="Consider DARK file for subtraction.")
parser.add_argument('-D', '--debug', action='store_true',
help="Turn on DEBUG mode (overwrite quiet mode).")
parser.add_argument('-f', '--flat', type=str, default=None,
help="Consider FLAT file for division.")
parser.add_argument('-g', '--glow', type=str, default=None,
help="Consider DARK file to correct lateral glows.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Run quietly.")
parser.add_argument('-r', '--rays', action='store_true',
help='Use LACosmic.py to remove cosmic rays and hot '
'pixels.')
parser.add_argument('-t', '--exptime', action='store_true',
help="Divide by exposure time.")
parser.add_argument('files', metavar='files', type=str, nargs='+',
help="input filenames.")
pargs = parser.parse_args()
SAMI_XJoin(pargs.files, bias_file=pargs.bias, clean=pargs.clean,
cosmic_rays=pargs.rays, dark_file=pargs.dark, debug=pargs.debug,
flat_file=pargs.flat, glow_file=pargs.glow, time=pargs.exptime,
verbose=not pargs.quiet)
|
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Skia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import csv
import fnmatch
import os
import re
import subprocess
import sys
import traceback
REVERT_CL_SUBJECT_PREFIX = 'Revert '
SKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com'
CQ_KEYWORDS_THAT_NEED_APPENDING = ('CQ_INCLUDE_TRYBOTS', 'CQ_EXTRA_TRYBOTS',
'CQ_EXCLUDE_TRYBOTS', 'CQ_TRYBOTS')
# Please add the complete email address here (and not just 'xyz@' or 'xyz').
PUBLIC_API_OWNERS = (
'reed@chromium.org',
'reed@google.com',
'bsalomon@chromium.org',
'bsalomon@google.com',
'djsollen@chromium.org',
'djsollen@google.com',
)
AUTHORS_FILE_NAME = 'AUTHORS'
DOCS_PREVIEW_URL = 'https://skia.org/?cl='
def _CheckChangeHasEol(input_api, output_api, source_file_filter=None):
"""Checks that files end with atleast one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in atleast one newline character.
if len(contents) > 1 and contents[-1:] != '\n':
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in a newline character:',
items=eof_files)]
return []
def _PythonChecks(input_api, output_api):
"""Run checks on any modified Python files."""
pylint_disabled_warnings = (
'F0401', # Unable to import.
'E0611', # No name in module.
'W0232', # Class has no __init__ method.
'E1002', # Use of super on an old style class.
'W0403', # Relative import used.
'R0201', # Method could be a function.
'E1003', # Using class name in super.
'W0613', # Unused argument.
)
# Run Pylint on only the modified python files. Unfortunately it still runs
# Pylint on the whole file instead of just the modified lines.
affected_python_files = []
for affected_file in input_api.AffectedSourceFiles(None):
affected_file_path = affected_file.LocalPath()
if affected_file_path.endswith('.py'):
affected_python_files.append(affected_file_path)
return input_api.canned_checks.RunPylint(
input_api, output_api,
disabled_warnings=pylint_disabled_warnings,
white_list=affected_python_files)
def _IfDefChecks(input_api, output_api):
"""Ensures if/ifdef are not before includes. See skbug/3362 for details."""
comment_block_start_pattern = re.compile('^\s*\/\*.*$')
comment_block_middle_pattern = re.compile('^\s+\*.*')
comment_block_end_pattern = re.compile('^\s+\*\/.*$')
single_line_comment_pattern = re.compile('^\s*//.*$')
def is_comment(line):
return (comment_block_start_pattern.match(line) or
comment_block_middle_pattern.match(line) or
comment_block_end_pattern.match(line) or
single_line_comment_pattern.match(line))
empty_line_pattern = re.compile('^\s*$')
def is_empty_line(line):
return empty_line_pattern.match(line)
failing_files = []
for affected_file in input_api.AffectedSourceFiles(None):
affected_file_path = affected_file.LocalPath()
if affected_file_path.endswith('.cpp') or affected_file_path.endswith('.h'):
f = open(affected_file_path)
for line in f.xreadlines():
if is_comment(line) or is_empty_line(line):
continue
# The below will be the first real line after comments and newlines.
if line.startswith('#if 0 '):
pass
elif line.startswith('#if ') or line.startswith('#ifdef '):
failing_files.append(affected_file_path)
break
results = []
if failing_files:
results.append(
output_api.PresubmitError(
'The following files have #if or #ifdef before includes:\n%s\n\n'
'See skbug.com/3362 for why this should be fixed.' %
'\n'.join(failing_files)))
return results
def _CopyrightChecks(input_api, output_api, source_file_filter=None):
results = []
year_pattern = r'\d{4}'
year_range_pattern = r'%s(-%s)?' % (year_pattern, year_pattern)
years_pattern = r'%s(,%s)*,?' % (year_range_pattern, year_range_pattern)
copyright_pattern = (
r'Copyright (\([cC]\) )?%s \w+' % years_pattern)
for affected_file in input_api.AffectedSourceFiles(source_file_filter):
if 'third_party' in affected_file.LocalPath():
continue
contents = input_api.ReadFile(affected_file, 'rb')
if not re.search(copyright_pattern, contents):
results.append(output_api.PresubmitError(
'%s is missing a correct copyright header.' % affected_file))
return results
def _ToolFlags(input_api, output_api):
"""Make sure `{dm,nanobench}_flags.py test` passes if modified."""
results = []
sources = lambda x: ('dm_flags.py' in x.LocalPath() or
'nanobench_flags.py' in x.LocalPath())
for f in input_api.AffectedSourceFiles(sources):
if 0 != subprocess.call(['python', f.LocalPath(), 'test']):
results.append(output_api.PresubmitError('`python %s test` failed' % f))
return results
def _CommonChecks(input_api, output_api):
"""Presubmit checks common to upload and commit."""
results = []
sources = lambda x: (x.LocalPath().endswith('.h') or
x.LocalPath().endswith('.gypi') or
x.LocalPath().endswith('.gyp') or
x.LocalPath().endswith('.py') or
x.LocalPath().endswith('.sh') or
x.LocalPath().endswith('.m') or
x.LocalPath().endswith('.mm') or
x.LocalPath().endswith('.go') or
x.LocalPath().endswith('.c') or
x.LocalPath().endswith('.cc') or
x.LocalPath().endswith('.cpp'))
results.extend(
_CheckChangeHasEol(
input_api, output_api, source_file_filter=sources))
results.extend(_PythonChecks(input_api, output_api))
results.extend(_IfDefChecks(input_api, output_api))
results.extend(_CopyrightChecks(input_api, output_api,
source_file_filter=sources))
results.extend(_ToolFlags(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def _CheckTreeStatus(input_api, output_api, json_url):
"""Check whether to allow commit.
Args:
input_api: input related apis.
output_api: output related apis.
json_url: url to download json style status.
"""
tree_status_results = input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api, json_url=json_url)
if not tree_status_results:
# Check for caution state only if tree is not closed.
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if ('caution' in status['message'].lower() and
os.isatty(sys.stdout.fileno())):
# Display a prompt only if we are in an interactive shell. Without this
# check the commit queue behaves incorrectly because it considers
# prompts to be failures.
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
tree_status_results.append(
output_api.PresubmitPromptWarning(
message=short_text, long_text=long_text))
else:
# Tree status is closed. Put in message about contacting sheriff.
connection = input_api.urllib2.urlopen(
SKIA_TREE_STATUS_URL + '/current-sheriff')
sheriff_details = input_api.json.loads(connection.read())
if sheriff_details:
tree_status_results[0]._message += (
'\n\nPlease contact the current Skia sheriff (%s) if you are trying '
'to submit a build fix\nand do not know how to submit because the '
'tree is closed') % sheriff_details['username']
return tree_status_results
def _CheckOwnerIsInAuthorsFile(input_api, output_api):
results = []
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=False)
owner_email = issue_properties['owner_email']
try:
authors_content = ''
for line in open(AUTHORS_FILE_NAME):
if not line.startswith('#'):
authors_content += line
email_fnmatches = re.findall('<(.*)>', authors_content)
for email_fnmatch in email_fnmatches:
if fnmatch.fnmatch(owner_email, email_fnmatch):
# Found a match, the user is in the AUTHORS file break out of the loop
break
else:
# TODO(rmistry): Remove the below CLA messaging once a CLA checker has
# been added to the CQ.
results.append(
output_api.PresubmitError(
'The email %s is not in Skia\'s AUTHORS file.\n'
'Issue owner, this CL must include an addition to the Skia AUTHORS '
'file.\n'
'Googler reviewers, please check that the AUTHORS entry '
'corresponds to an email address in http://goto/cla-signers. If it '
'does not then ask the issue owner to sign the CLA at '
'https://developers.google.com/open-source/cla/individual '
'(individual) or '
'https://developers.google.com/open-source/cla/corporate '
'(corporate).'
% owner_email))
except IOError:
# Do not fail if authors file cannot be found.
traceback.print_exc()
input_api.logging.error('AUTHORS file not found!')
return results
def _CheckLGTMsForPublicAPI(input_api, output_api):
"""Check LGTMs for public API changes.
For public API files make sure there is an LGTM from the list of owners in
PUBLIC_API_OWNERS.
"""
results = []
requires_owner_check = False
for affected_file in input_api.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, file_ext = os.path.splitext(affected_file_path)
# We only care about files that end in .h and are under the top-level
# include dir.
if file_ext == '.h' and 'include' == file_path.split(os.path.sep)[0]:
requires_owner_check = True
if not requires_owner_check:
return results
lgtm_from_owner = False
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=True)
if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties['subject'], re.I):
# It is a revert CL, ignore the public api owners check.
return results
# TODO(rmistry): Stop checking for COMMIT=false once crbug/470609 is
# resolved.
if issue_properties['cq_dry_run'] or re.search(
r'^COMMIT=false$', issue_properties['description'], re.M):
# Ignore public api owners check for dry run CLs since they are not
# going to be committed.
return results
match = re.search(r'^TBR=(.*)$', issue_properties['description'], re.M)
if match:
tbr_entries = match.group(1).strip().split(',')
for owner in PUBLIC_API_OWNERS:
if owner in tbr_entries or owner.split('@')[0] in tbr_entries:
# If an owner is specified in the TBR= line then ignore the public
# api owners check.
return results
if issue_properties['owner_email'] in PUBLIC_API_OWNERS:
# An owner created the CL that is an automatic LGTM.
lgtm_from_owner = True
messages = issue_properties.get('messages')
if messages:
for message in messages:
if (message['sender'] in PUBLIC_API_OWNERS and
'lgtm' in message['text'].lower()):
# Found an lgtm in a message from an owner.
lgtm_from_owner = True
break
if not lgtm_from_owner:
results.append(
output_api.PresubmitError(
'Since the CL is editing public API, you must have an LGTM from '
'one of: %s' % str(PUBLIC_API_OWNERS)))
return results
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook does the following:
* Adds a link to preview docs changes if there are any docs changes in the CL.
* Adds 'NOTRY=true' if the CL contains only docs changes.
* Adds 'NOTREECHECKS=true' for non master branch changes since they do not
need to be gated on the master branch's tree.
* Adds 'NOTRY=true' for non master branch changes since trybots do not yet
work on them.
* Adds 'NOPRESUBMIT=true' for non master branch changes since those don't
run the presubmit checks.
"""
results = []
atleast_one_docs_change = False
all_docs_changes = True
for affected_file in change.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, _ = os.path.splitext(affected_file_path)
if 'site' == file_path.split(os.path.sep)[0]:
atleast_one_docs_change = True
else:
all_docs_changes = False
if atleast_one_docs_change and not all_docs_changes:
break
issue = cl.issue
rietveld_obj = cl.RpcServer()
if issue and rietveld_obj:
original_description = rietveld_obj.get_description(issue)
new_description = original_description
# If the change includes only doc changes then add NOTRY=true in the
# CL's description if it does not exist yet.
if all_docs_changes and not re.search(
r'^NOTRY=true$', new_description, re.M | re.I):
new_description += '\nNOTRY=true'
results.append(
output_api.PresubmitNotifyResult(
'This change has only doc changes. Automatically added '
'\'NOTRY=true\' to the CL\'s description'))
# If there is atleast one docs change then add preview link in the CL's
# description if it does not already exist there.
if atleast_one_docs_change and not re.search(
r'^DOCS_PREVIEW=.*', new_description, re.M | re.I):
# Automatically add a link to where the docs can be previewed.
new_description += '\nDOCS_PREVIEW= %s%s' % (DOCS_PREVIEW_URL, issue)
results.append(
output_api.PresubmitNotifyResult(
'Automatically added a link to preview the docs changes to the '
'CL\'s description'))
# If the target ref is not master then add NOTREECHECKS=true and NOTRY=true
# to the CL's description if it does not already exist there.
target_ref = rietveld_obj.get_issue_properties(issue, False).get(
'target_ref', '')
if target_ref != 'refs/heads/master':
if not re.search(
r'^NOTREECHECKS=true$', new_description, re.M | re.I):
new_description += "\nNOTREECHECKS=true"
results.append(
output_api.PresubmitNotifyResult(
'Branch changes do not need to rely on the master branch\'s '
'tree status. Automatically added \'NOTREECHECKS=true\' to the '
'CL\'s description'))
if not re.search(
r'^NOTRY=true$', new_description, re.M | re.I):
new_description += "\nNOTRY=true"
results.append(
output_api.PresubmitNotifyResult(
'Trybots do not yet work for non-master branches. '
'Automatically added \'NOTRY=true\' to the CL\'s description'))
if not re.search(
r'^NOPRESUBMIT=true$', new_description, re.M | re.I):
new_description += "\nNOPRESUBMIT=true"
results.append(
output_api.PresubmitNotifyResult(
'Branch changes do not run the presubmit checks.'))
# Read and process the HASHTAGS file.
hashtags_fullpath = os.path.join(change._local_root, 'HASHTAGS')
with open(hashtags_fullpath, 'rb') as hashtags_csv:
hashtags_reader = csv.reader(hashtags_csv, delimiter=',')
for row in hashtags_reader:
if not row or row[0].startswith('#'):
# Ignore empty lines and comments
continue
hashtag = row[0]
# Search for the hashtag in the description.
if re.search('#%s' % hashtag, new_description, re.M | re.I):
for mapped_text in row[1:]:
# Special case handling for CQ_KEYWORDS_THAT_NEED_APPENDING.
appended_description = _HandleAppendingCQKeywords(
hashtag, mapped_text, new_description, results, output_api)
if appended_description:
new_description = appended_description
continue
# Add the mapped text if it does not already exist in the
# CL's description.
if not re.search(
r'^%s$' % mapped_text, new_description, re.M | re.I):
new_description += '\n%s' % mapped_text
results.append(
output_api.PresubmitNotifyResult(
'Found \'#%s\', automatically added \'%s\' to the CL\'s '
'description' % (hashtag, mapped_text)))
# If the description has changed update it.
if new_description != original_description:
rietveld_obj.update_description(issue, new_description)
return results
def _HandleAppendingCQKeywords(hashtag, keyword_and_value, description,
results, output_api):
"""Handles the CQ keywords that need appending if specified in hashtags."""
keyword = keyword_and_value.split('=')[0]
if keyword in CQ_KEYWORDS_THAT_NEED_APPENDING:
# If the keyword is already in the description then append to it.
match = re.search(
r'^%s=(.*)$' % keyword, description, re.M | re.I)
if match:
old_values = match.group(1).split(';')
new_value = keyword_and_value.split('=')[1]
if new_value in old_values:
# Do not need to do anything here.
return description
# Update the description with the new values.
new_description = description.replace(
match.group(0), "%s;%s" % (match.group(0), new_value))
results.append(
output_api.PresubmitNotifyResult(
'Found \'#%s\', automatically appended \'%s\' to %s in '
'the CL\'s description' % (hashtag, new_value, keyword)))
return new_description
return None
def CheckChangeOnCommit(input_api, output_api):
"""Presubmit checks for the change on commit.
The following are the presubmit checks:
* Check change has one and only one EOL.
* Ensures that the Skia tree is open in
http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'
state and an error if it is in 'Closed' state.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
_CheckTreeStatus(input_api, output_api, json_url=(
SKIA_TREE_STATUS_URL + '/banner-status?format=json')))
results.extend(_CheckLGTMsForPublicAPI(input_api, output_api))
results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api))
return results
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide tools for executing Selenium tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
# Bokeh imports
from bokeh.models import Button
from bokeh.util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'alt_click',
'ButtonWrapper',
'copy_table_rows',
'COUNT',
'element_to_finish_resizing',
'element_to_start_resizing',
'enter_text_in_cell',
'enter_text_in_cell_with_click_enter',
'enter_text_in_element',
'get_page_element',
'get_table_cell',
'get_table_column_cells',
'get_table_row',
'get_table_selected_rows',
'INIT',
'paste_values',
'RECORD',
'RESULTS',
'SCROLL',
'shift_click',
'sort_table_column',
'wait_for_canvas_resize',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def COUNT(key):
return 'Bokeh._testing.count(%r);' % key
INIT = 'Bokeh._testing.init();'
def RECORD(key, value):
return 'Bokeh._testing.record(%r, %s);' % (key, value)
RESULTS = 'return Bokeh._testing.results'
def SCROLL(amt):
return """
var elt = document.getElementsByClassName("bk-canvas-events")[0];
var event = new WheelEvent('wheel', { deltaY: %f, clientX: 100, clientY: 100} );
elt.dispatchEvent(event);
""" % amt
def alt_click(driver, element):
actions = ActionChains(driver)
actions.key_down(Keys.META)
actions.click(element)
actions.key_up(Keys.META)
actions.perform()
class ButtonWrapper(object):
def __init__(self, label, callback):
self.ref = "button-" + make_id()
self.obj = Button(label=label, css_classes=[self.ref])
self.obj.js_on_event('button_click', callback)
def click(self, driver):
button = driver.find_element_by_css_selector(".%s .bk-btn" % self.ref)
button.click()
class element_to_start_resizing(object):
''' An expectation for checking if an element has started resizing
'''
def __init__(self, element):
self.element = element
self.previous_width = self.element.size['width']
def __call__(self, driver):
current_width = self.element.size['width']
if self.previous_width != current_width:
return True
else:
self.previous_width = current_width
return False
class element_to_finish_resizing(object):
''' An expectation for checking if an element has finished resizing
'''
def __init__(self, element):
self.element = element
self.previous_width = self.element.size['width']
def __call__(self, driver):
current_width = self.element.size['width']
if self.previous_width == current_width:
return True
else:
self.previous_width = current_width
return False
def enter_text_in_element(driver, element, text, click=1, enter=True):
actions = ActionChains(driver)
actions.move_to_element(element)
if click == 1: actions.click()
elif click == 2: actions.double_click()
if enter:
text += Keys.ENTER
actions.send_keys(text)
actions.perform()
def enter_text_in_cell(driver, cell, text):
actions = ActionChains(driver)
actions.move_to_element(cell)
actions.double_click()
actions.send_keys(text + Keys.ENTER)
actions.perform()
def enter_text_in_cell_with_click_enter(driver, cell, text):
actions = ActionChains(driver)
actions.move_to_element(cell)
actions.click()
actions.send_keys(Keys.ENTER + text + Keys.ENTER)
actions.perform()
def copy_table_rows(driver, rows):
actions = ActionChains(driver)
row = get_table_row(driver, rows[0])
actions.move_to_element(row)
actions.click()
actions.key_down(Keys.SHIFT)
for r in rows[1:]:
row = get_table_row(driver, r)
actions.move_to_element(row)
actions.click()
actions.key_up(Keys.SHIFT)
actions.key_down(Keys.CONTROL)
actions.send_keys(Keys.INSERT)
actions.key_up(Keys.CONTROL)
#actions.send_keys(Keys.CONTROL, 'c')
actions.perform()
def paste_values(driver, el=None):
actions = ActionChains(driver)
if el:
actions.move_to_element(el)
actions.key_down(Keys.SHIFT)
actions.send_keys(Keys.INSERT)
actions.key_up(Keys.SHIFT)
#actions.send_keys(Keys.CONTROL, 'v')
actions.perform()
def get_table_column_cells(driver, col):
result = []
grid = driver.find_element_by_css_selector('.grid-canvas')
rows = grid.find_elements_by_css_selector(".slick-row")
for i, row in enumerate(rows):
elt = row.find_element_by_css_selector('.slick-cell.l%d.r%d' % (col, col))
result.append(elt.text)
return result
def get_table_row(driver, row):
return driver.find_element_by_css_selector('.grid-canvas .slick-row:nth-child(%d)' % row)
def get_table_selected_rows(driver):
result = set()
grid = driver.find_element_by_css_selector('.grid-canvas')
rows = grid.find_elements_by_css_selector(".slick-row")
for i, row in enumerate(rows):
elt = row.find_element_by_css_selector('.slick-cell.l1.r1')
if 'selected' in elt.get_attribute('class'):
result.add(i)
return result
def get_table_cell(driver, row, col):
return driver.find_element_by_css_selector('.grid-canvas .slick-row:nth-child(%d) .r%d' % (row, col))
def get_page_element(driver, element_selector):
return driver.find_element_by_css_selector(element_selector)
def shift_click(driver, element):
actions = ActionChains(driver)
actions.key_down(Keys.SHIFT)
actions.click(element)
actions.key_up(Keys.SHIFT)
actions.perform()
def sort_table_column(driver, col, double=False):
elt = driver.find_element_by_css_selector('.slick-header-columns .slick-header-column:nth-child(%d)' % col)
elt.click()
if double: elt.click()
def wait_for_canvas_resize(canvas, test_driver):
'''
'''
try:
wait = WebDriverWait(test_driver, 1)
wait.until(element_to_start_resizing(canvas))
wait.until(element_to_finish_resizing(canvas))
except TimeoutException:
# Resize may or may not happen instantaneously,
# Put the waits in to give some time, but allow test to
# try and process.
pass
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
from sqlalchemy import Column
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy.orm import backref
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import fixtures
class CompileTest(fixtures.MappedTest):
"""test various mapper compilation scenarios"""
def teardown_test(self):
clear_mappers()
def test_with_polymorphic(self):
metadata = MetaData()
order = Table(
"orders",
metadata,
Column("id", Integer, primary_key=True),
Column(
"employee_id",
Integer,
ForeignKey("employees.id"),
nullable=False,
),
Column("type", Unicode(16)),
)
employee = Table(
"employees",
metadata,
Column("id", Integer, primary_key=True),
Column("name", Unicode(16), unique=True, nullable=False),
)
product = Table(
"products", metadata, Column("id", Integer, primary_key=True)
)
orderproduct = Table(
"orderproducts",
metadata,
Column("id", Integer, primary_key=True),
Column(
"order_id", Integer, ForeignKey("orders.id"), nullable=False
),
Column(
"product_id",
Integer,
ForeignKey("products.id"),
nullable=False,
),
)
class Order:
pass
class Employee:
pass
class Product:
pass
class OrderProduct:
pass
order_join = order.select().alias("pjoin")
self.mapper_registry.map_imperatively(
Order,
order,
with_polymorphic=("*", order_join),
polymorphic_on=order_join.c.type,
polymorphic_identity="order",
properties={
"orderproducts": relationship(
OrderProduct, lazy="select", backref="order"
)
},
)
self.mapper_registry.map_imperatively(
Product,
product,
properties={
"orderproducts": relationship(
OrderProduct, lazy="select", backref="product"
)
},
)
self.mapper_registry.map_imperatively(
Employee,
employee,
properties={
"orders": relationship(
Order, lazy="select", backref="employee"
)
},
)
self.mapper_registry.map_imperatively(OrderProduct, orderproduct)
# this requires that the compilation of order_mapper's "surrogate
# mapper" occur after the initial setup of MapperProperty objects on
# the mapper.
configure_mappers()
def test_conflicting_backref_one(self):
"""test that conflicting backrefs raises an exception"""
metadata = MetaData()
order = Table(
"orders",
metadata,
Column("id", Integer, primary_key=True),
Column("type", Unicode(16)),
)
product = Table(
"products", metadata, Column("id", Integer, primary_key=True)
)
orderproduct = Table(
"orderproducts",
metadata,
Column("id", Integer, primary_key=True),
Column(
"order_id", Integer, ForeignKey("orders.id"), nullable=False
),
Column(
"product_id",
Integer,
ForeignKey("products.id"),
nullable=False,
),
)
class Order:
pass
class Product:
pass
class OrderProduct:
pass
order_join = order.select().alias("pjoin")
self.mapper_registry.map_imperatively(
Order,
order,
with_polymorphic=("*", order_join),
polymorphic_on=order_join.c.type,
polymorphic_identity="order",
properties={
"orderproducts": relationship(
OrderProduct, lazy="select", backref="product"
)
},
)
self.mapper_registry.map_imperatively(
Product,
product,
properties={
"orderproducts": relationship(
OrderProduct, lazy="select", backref="product"
)
},
)
self.mapper_registry.map_imperatively(OrderProduct, orderproduct)
assert_raises_message(
sa_exc.ArgumentError, "Error creating backref", configure_mappers
)
def test_misc_one(self, connection, metadata):
node_table = Table(
"node",
metadata,
Column("node_id", Integer, primary_key=True),
Column("name_index", Integer, nullable=True),
)
node_name_table = Table(
"node_name",
metadata,
Column("node_name_id", Integer, primary_key=True),
Column("node_id", Integer, ForeignKey("node.node_id")),
Column("host_id", Integer, ForeignKey("host.host_id")),
Column("name", String(64), nullable=False),
)
host_table = Table(
"host",
metadata,
Column("host_id", Integer, primary_key=True),
Column("hostname", String(64), nullable=False, unique=True),
)
metadata.create_all(connection)
connection.execute(node_table.insert(), dict(node_id=1, node_index=5))
class Node:
pass
class NodeName:
pass
class Host:
pass
self.mapper_registry.map_imperatively(Node, node_table)
self.mapper_registry.map_imperatively(Host, host_table)
self.mapper_registry.map_imperatively(
NodeName,
node_name_table,
properties={
"node": relationship(Node, backref=backref("names")),
"host": relationship(Host),
},
)
sess = Session(connection)
assert sess.get(Node, 1).names == []
def test_conflicting_backref_two(self):
meta = MetaData()
a = Table("a", meta, Column("id", Integer, primary_key=True))
b = Table(
"b",
meta,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
)
class A:
pass
class B:
pass
self.mapper_registry.map_imperatively(
A, a, properties={"b": relationship(B, backref="a")}
)
self.mapper_registry.map_imperatively(
B, b, properties={"a": relationship(A, backref="b")}
)
assert_raises_message(
sa_exc.ArgumentError, "Error creating backref", configure_mappers
)
def test_conflicting_backref_subclass(self):
meta = MetaData()
a = Table("a", meta, Column("id", Integer, primary_key=True))
b = Table(
"b",
meta,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
)
class A:
pass
class B:
pass
class C(B):
pass
self.mapper_registry.map_imperatively(
A,
a,
properties={
"b": relationship(B, backref="a"),
"c": relationship(C, backref="a"),
},
)
self.mapper_registry.map_imperatively(B, b)
self.mapper_registry.map_imperatively(C, None, inherits=B)
assert_raises_message(
sa_exc.ArgumentError, "Error creating backref", configure_mappers
)
|
|
"""
Tools to generate synthetic spectra given a table of line strengths
"""
import numpy as np
from astropy.modeling import models
from astropy import units as u
from astropy import constants as c
class SyntheticSpectrum(object):
"""
Synthetic Spectrum class - neato!
"""
def __init__(self, wcs, species, linewidth):
self.wcs = wcs
self.species = species
self.linewidth = linewidth
@classmethod
def from_table(cls, wcs, table, species,
linewidth=1.0*u.km/u.s,
profile_function=models.Gaussian1D):
"""
Create a synthetic spectrum from a RADEX (or DESPOTIC, eventually)
output
Parameters
----------
wcs: SpectralWCS
A spectral world coordinate system. You can generate one with
FrequencyArray or specutils.wcs.Spectrum1DLookupWCS
table: astropy.Table
Result of the RADEX query (from R.get_table())
linewidth: u.Quantity (km/s)
The width of the line to plot
npts: int
The number of spectral points to include
profile_function: astropy.modeling.model
The model function to use. Must accept, in order:
* flux (peak)
* frequency center (Hz)
* frequency width (Hz)
Examples
--------
>>> from pyradex import Radex,synthspec
>>> R = Radex(species='ch3cn', column=1e14, density=1e5, collider_densities=None)
>>> R.run_radex()
>>> wcs = synthspec.FrequencyArray(91.95*u.GHz, 92*u.GHz, npts=1000)
>>> S = synthspec.SyntheticSpectrum.from_table(wcs, R.get_table(),
... species='ch3cn')
>>> S.plot()
"""
self = cls(wcs, species, linewidth)
self.profile_function = profile_function
if hasattr(wcs,'minfreq'):
self.minfreq,self.maxfreq = wcs.minfreq,wcs.maxfreq
else:
self.minfreq,self.maxfreq = wcs.min(),wcs.max()
linefreqs = u.Quantity(table['frequency'],
unit=u.Unit(table['frequency'].unit))
self.table = table[(linefreqs>self.minfreq) & (linefreqs<self.maxfreq)]
self.linefreqs = linefreqs
self.width_frequency = (linewidth/c.c *
u.Quantity(self.table['frequency'],
unit=u.Unit(self.table['frequency'].unit)))
self.T_B = self.table['T_B']
self.data = self.get_profile()
#super(Spectrum,self).__init__(data=data, wcs=self.wcs,
# unit=u.Unit(table['T_B'].unit))
return self
@classmethod
def from_RADEX(cls, wcs, rad,
linewidth=1.0*u.km/u.s,
profile_function=models.Gaussian1D):
"""
Create a synthetic spectrum from a RADEX class
Parameters
----------
wcs: SpectralWCS
A spectral world coordinate system. You can generate one with
FrequencyArray or specutils.wcs.Spectrum1DLookupWCS
rad: pyradex.Radex instance
Result of the RADEX query
linewidth: u.Quantity (km/s)
The width of the line to plot
npts: int
The number of spectral points to include
profile_function: astropy.modeling.model
The model function to use. Must accept, in order:
* flux (peak)
* frequency center (Hz)
* frequency width (Hz)
Examples
--------
>>> from pyradex import Radex,synthspec
>>> R = Radex(species='ch3cn')
>>> R.run_radex()
>>> wcs = synthspec.FrequencyArray(91.95*u.GHz, 92*u.GHz, npts=1000)
>>> S = synthspec.SyntheticSpectrum.from_RADEX(wcs, R)
>>> S.plot()
"""
self = cls(wcs, rad.species, linewidth)
self.profile_function = profile_function
self.wcs = wcs
if hasattr(wcs,'minfreq'):
self.minfreq,self.maxfreq = wcs.minfreq,wcs.maxfreq
else:
self.minfreq,self.maxfreq = wcs.min(),wcs.max()
self.rad = rad
linefreqs = rad.frequency
linefreq_mask = (linefreqs>self.minfreq) & (linefreqs<self.maxfreq)
included_frequencies_mask = linefreq_mask[rad.inds_frequencies_included]
self.linefreqs = linefreqs[linefreq_mask]
self.T_B = rad.T_B[included_frequencies_mask]
self.width_frequency = (linewidth/c.c * self.linefreqs)
self.data = self.get_profile()
self.table = rad.get_table()
#super(Spectrum,self).__init__(data=data, wcs=self.wcs,
# unit=u.Unit(rad.T_B.unit))
return self
def get_profile(self, velocity_offset=0*u.km/u.s):
def model(xpts):
if isinstance(xpts,u.Quantity):
xpts = xpts.to(u.Hz).value
M = np.zeros_like(xpts)
freqs = self.linefreqs + (self.linefreqs*velocity_offset/c.c)
for freq,flux,width in zip(freqs,
self.T_B,
self.width_frequency):
fv = flux.value if hasattr(flux,'value') else flux
M += self.profile_function(fv, freq.to(u.Hz).value,
width.to(u.Hz).value)(xpts)
return M
try:
X = self.wcs(np.arange(self.wcs.npts))
except:
X = self.wcs
return model(X)
def plot(self, update_data=False, *args, **kwargs):
import pylab as pl
if update_data:
self.data = self.get_profile()
try:
dispersion = self.wcs(np.arange(self.wcs.npts))
except:
dispersion = self.wcs
pl.gca().set_xlabel(dispersion.unit.to_string())
if hasattr(self.data,'unit'):
pl.gca().set_ylabel(self.data.unit.to_string())
data = self.data.value
else:
data = self.data
return pl.plot(dispersion.value, data, *args, **kwargs)
def __call__(self, linewidth=None, velocity_offset=0*u.km/u.s, **kwargs):
"""
Return a synthetic spectrum created by calling RADEX. Parameters
are passed to pyradex.Radex (except linewidth)
Parameters
----------
linewidth: u.Quantity (km/s)
The width of the line to plot
Examples
--------
>>> from pyradex import Radex,synthspec
>>> radex_pars = dict(temperature=20, column=1e13,
... abundance=10**-8.5,
... collider_densities={'H2':1e4})
>>> R = Radex(species='oh2co-h2', **radex_pars)
>>> R.run_radex()
>>> wcs = synthspec.FrequencyArray(4.828*u.GHz, 4.830*u.GHz, npts=1000)
>>> S = synthspec.SyntheticSpectrum.from_RADEX(wcs, R)
>>> S.plot()
>>> radex_pars['temperature'] = 50
>>> S2 = S(velocity_offset=2*u.km/u.s, **radex_pars)
>>> S2.plot()
"""
from .core import Radex
rad = Radex(species=self.species, **kwargs)
if linewidth is None:
linewidth = self.linewidth
else:
self.linewidth = linewidth
self.rad = rad
linefreqs = rad.frequency
linefreq_mask = (linefreqs>self.minfreq) & (linefreqs<self.maxfreq)
self.linefreqs = linefreqs[linefreq_mask]
self.T_B = rad.T_B[linefreq_mask]
self.width_frequency = (linewidth/c.c * self.linefreqs)
self.data = self.get_profile()
return self
# I think this was intended to be a more flexible FrequencyArray class
# compatible with specutils, but I gave up on it. Instead, it's just a proxy
# for np.linspace
def FrequencyArray(minfreq, maxfreq, npoints=1000):
return np.linspace(minfreq, maxfreq, npoints)
|
|
""" Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty, diagonal
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0,1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0,1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k >= 0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, dtype=m.dtype)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N - 1):
X[:,i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent, interpolation='nearest')
<matplotlib.image.AxesImage object at ...>
>>> plt.colorbar()
<matplotlib.colorbar.Colorbar instance at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of a (N, N) array.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
|
|
# -*- coding: utf-8 -*-
#/usr/bin/python2
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.contrib.rnn import RNNCell
from params import Params
from zoneout import ZoneoutWrapper
'''
attention weights from https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf
W_u^Q.shape: (2 * attn_size, attn_size)
W_u^P.shape: (2 * attn_size, attn_size)
W_v^P.shape: (attn_size, attn_size)
W_g.shape: (4 * attn_size, 4 * attn_size)
W_h^P.shape: (2 * attn_size, attn_size)
W_v^Phat.shape: (2 * attn_size, attn_size)
W_h^a.shape: (2 * attn_size, attn_size)
W_v^Q.shape: (attn_size, attn_size)
'''
def get_attn_params(attn_size,initializer = tf.truncated_normal_initializer):
'''
Args:
attn_size: the size of attention specified in https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf
initializer: the author of the original paper used gaussian initialization however I found xavier converge faster
Returns:
params: A collection of parameters used throughout the layers
'''
with tf.variable_scope("attention_weights"):
params = {"W_u_Q":tf.get_variable("W_u_Q",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
#"W_ru_Q":tf.get_variable("W_ru_Q",dtype = tf.float32, shape = (2 * attn_size, 2 * attn_size), initializer = initializer()),
"W_u_P":tf.get_variable("W_u_P",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
"W_v_P":tf.get_variable("W_v_P",dtype = tf.float32, shape = (attn_size, attn_size), initializer = initializer()),
"W_v_P_2":tf.get_variable("W_v_P_2",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
"W_g":tf.get_variable("W_g",dtype = tf.float32, shape = (4 * attn_size, 4 * attn_size), initializer = initializer()),
"W_h_P":tf.get_variable("W_h_P",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
"W_v_Phat":tf.get_variable("W_v_Phat",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
"W_h_a":tf.get_variable("W_h_a",dtype = tf.float32, shape = (2 * attn_size, attn_size), initializer = initializer()),
"W_v_Q":tf.get_variable("W_v_Q",dtype = tf.float32, shape = (attn_size, attn_size), initializer = initializer()),
"v":tf.get_variable("v",dtype = tf.float32, shape = (attn_size), initializer =initializer())}
return params
def encoding(word, char, word_embeddings, char_embeddings, scope = "embedding"):
with tf.variable_scope(scope):
word_encoding = tf.nn.embedding_lookup(word_embeddings, word)
char_encoding = tf.nn.embedding_lookup(char_embeddings, char)
return word_encoding, char_encoding
def apply_dropout(inputs, size = None, is_training = True):
'''
Implementation of Zoneout from https://arxiv.org/pdf/1606.01305.pdf
'''
if Params.dropout is None and Params.zoneout is None:
return inputs
if Params.zoneout is not None:
return ZoneoutWrapper(inputs, state_zoneout_prob= Params.zoneout, is_training = is_training)
elif is_training:
return tf.contrib.rnn.DropoutWrapper(inputs,
output_keep_prob = 1 - Params.dropout,
# variational_recurrent = True,
# input_size = size,
dtype = tf.float32)
else:
return inputs
def bidirectional_GRU(inputs, inputs_len, cell = None, cell_fn = tf.contrib.rnn.GRUCell, units = Params.attn_size, layers = 1, scope = "Bidirectional_GRU", output = 0, is_training = True, reuse = None):
'''
Bidirectional recurrent neural network with GRU cells.
Args:
inputs: rnn input of shape (batch_size, timestep, dim)
inputs_len: rnn input_len of shape (batch_size, )
cell: rnn cell of type RNN_Cell.
output: if 0, output returns rnn output for every timestep,
if 1, output returns concatenated state of backward and
forward rnn.
'''
with tf.variable_scope(scope, reuse = reuse):
if cell is not None:
(cell_fw, cell_bw) = cell
else:
shapes = inputs.get_shape().as_list()
if len(shapes) > 3:
inputs = tf.reshape(inputs,(shapes[0]*shapes[1],shapes[2],-1))
inputs_len = tf.reshape(inputs_len,(shapes[0]*shapes[1],))
# if no cells are provided, use standard GRU cell implementation
if layers > 1:
cell_fw = MultiRNNCell([apply_dropout(cell_fn(units), size = inputs.shape[-1] if i == 0 else units, is_training = is_training) for i in range(layers)])
cell_bw = MultiRNNCell([apply_dropout(cell_fn(units), size = inputs.shape[-1] if i == 0 else units, is_training = is_training) for i in range(layers)])
else:
cell_fw, cell_bw = [apply_dropout(cell_fn(units), size = inputs.shape[-1], is_training = is_training) for _ in range(2)]
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs,
sequence_length = inputs_len,
dtype=tf.float32)
if output == 0:
return tf.concat(outputs, 2)
elif output == 1:
return tf.reshape(tf.concat(states,1),(Params.batch_size, shapes[1], 2*units))
def pointer_net(passage, passage_len, question, question_len, cell, params, scope = "pointer_network"):
'''
Answer pointer network as proposed in https://arxiv.org/pdf/1506.03134.pdf.
Args:
passage: RNN passage output from the bidirectional readout layer (batch_size, timestep, dim)
passage_len: variable lengths for passage length
question: RNN question output of shape (batch_size, timestep, dim) for question pooling
question_len: Variable lengths for question length
cell: rnn cell of type RNN_Cell.
params: Appropriate weight matrices for attention pooling computation
Returns:
softmax logits for the answer pointer of the beginning and the end of the answer span
'''
with tf.variable_scope(scope):
weights_q, weights_p = params
shapes = passage.get_shape().as_list()
initial_state = question_pooling(question, units = Params.attn_size, weights = weights_q, memory_len = question_len, scope = "question_pooling")
inputs = [passage, initial_state]
p1_logits = attention(inputs, Params.attn_size, weights_p, memory_len = passage_len, scope = "attention")
scores = tf.expand_dims(p1_logits, -1)
attention_pool = tf.reduce_sum(scores * passage,1)
_, state = cell(attention_pool, initial_state)
inputs = [passage, state]
p2_logits = attention(inputs, Params.attn_size, weights_p, memory_len = passage_len, scope = "attention", reuse = True)
return tf.stack((p1_logits,p2_logits),1)
def attention_rnn(inputs, inputs_len, units, attn_cell, bidirection = True, scope = "gated_attention_rnn", is_training = True):
with tf.variable_scope(scope):
if bidirection:
outputs = bidirectional_GRU(inputs,
inputs_len,
cell = attn_cell,
scope = scope + "_bidirectional",
output = 0,
is_training = is_training)
else:
outputs, _ = tf.nn.dynamic_rnn(attn_cell, inputs,
sequence_length = inputs_len,
dtype=tf.float32)
return outputs
def question_pooling(memory, units, weights, memory_len = None, scope = "question_pooling"):
with tf.variable_scope(scope):
shapes = memory.get_shape().as_list()
V_r = tf.get_variable("question_param", shape = (Params.max_q_len, units), initializer = tf.contrib.layers.xavier_initializer(), dtype = tf.float32)
inputs_ = [memory, V_r]
attn = attention(inputs_, units, weights, memory_len = memory_len, scope = "question_attention_pooling")
attn = tf.expand_dims(attn, -1)
return tf.reduce_sum(attn * memory, 1)
def gated_attention(memory, inputs, states, units, params, self_matching = False, memory_len = None, scope="gated_attention"):
with tf.variable_scope(scope):
weights, W_g = params
inputs_ = [memory, inputs]
states = tf.reshape(states,(Params.batch_size,Params.attn_size))
if not self_matching:
inputs_.append(states)
scores = attention(inputs_, units, weights, memory_len = memory_len)
scores = tf.expand_dims(scores,-1)
attention_pool = tf.reduce_sum(scores * memory, 1)
inputs = tf.concat((inputs,attention_pool),axis = 1)
g_t = tf.sigmoid(tf.matmul(inputs,W_g))
return g_t * inputs
def mask_attn_score(score, memory_sequence_length, score_mask_value = -1e8):
score_mask = tf.sequence_mask(
memory_sequence_length, maxlen=score.shape[1])
score_mask_values = score_mask_value * tf.ones_like(score)
return tf.where(score_mask, score, score_mask_values)
def attention(inputs, units, weights, scope = "attention", memory_len = None, reuse = None):
with tf.variable_scope(scope, reuse = reuse):
outputs_ = []
weights, v = weights
for i, (inp,w) in enumerate(zip(inputs,weights)):
shapes = inp.shape.as_list()
inp = tf.reshape(inp, (-1, shapes[-1]))
if w is None:
w = tf.get_variable("w_%d"%i, dtype = tf.float32, shape = [shapes[-1],Params.attn_size], initializer = tf.contrib.layers.xavier_initializer())
outputs = tf.matmul(inp, w)
# Hardcoded attention output reshaping. Equation (4), (8), (9) and (11) in the original paper.
if len(shapes) > 2:
outputs = tf.reshape(outputs, (shapes[0], shapes[1], -1))
elif len(shapes) == 2 and shapes[0] is Params.batch_size:
outputs = tf.reshape(outputs, (shapes[0],1,-1))
else:
outputs = tf.reshape(outputs, (1, shapes[0],-1))
outputs_.append(outputs)
outputs = sum(outputs_)
if Params.bias:
b = tf.get_variable("b", shape = outputs.shape[-1], dtype = tf.float32, initializer = tf.contrib.layers.xavier_initializer())
outputs += b
scores = tf.reduce_sum(tf.tanh(outputs) * v, [-1])
if memory_len is not None:
scores = mask_attn_score(scores, memory_len)
return tf.nn.softmax(scores) # all attention output is softmaxed now
def cross_entropy(output, target):
cross_entropy = target * tf.log(output + 1e-8)
cross_entropy = -tf.reduce_sum(cross_entropy, 2) # sum across passage timestep
cross_entropy = tf.reduce_mean(cross_entropy, 1) # average across pointer networks output
return tf.reduce_mean(cross_entropy) # average across batch size
def total_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total number of trainable parameters: {}".format(total_parameters))
|
|
# model.py BY Ben Burton for ParkMT at MTHacks
# This program will determine the attraction value for a parking lot on
# MTSU campus based on its location, the population of nearby buildings,
# and the amount of spots in the parking spot.
from math import exp
#the no no variables
parking_lots = []
buildings = []
populations={}
def main():
# Open lot and building position data files and convert to arrays
global parking_lots
global buildings
global populations
parking_lots=get_array("../../data/parkinglots.dat")
buildings = get_array("../../data/buildings.dat")
populations =get_pop_dict("RyansDesires.txt")
day, time, destination = get_user_input()
# day, time, destination = 1, 12, 19
# get attractiveness of each lot based on input
attractions = {}
for lot in parking_lots:
building_name = lot[0]
attractions[building_name] = attractiveness(lot, buildings[destination], time, day)
# sort by attraction
sorted_attractions = sorted(attractions.items(), key=lambda kv: kv[1], reverse=True)
# normalize
min_attract = sorted_attractions[-1][-1]
max_attract = sorted_attractions[0][1] - min_attract
final_attract = []
i=0
for pair in sorted_attractions:
final_attract.append([sorted_attractions[i][0]])
final_attract[i].append((sorted_attractions[i][1] - min_attract)/max_attract)
i+=1
print(final_attract)
# ouput building ranked by attraction
weekday = "MTWRF"
print("On",weekday[day],"at",time,"going to",buildings[destination][0],"these lots have these attractions:")
for i in range(len(final_attract)):
print(format(i+1,'2'),format(final_attract[i][0],'15'),format(final_attract[i][1],'>8.2f'))
# This function takes the input files and turn them in to arrays.
# Each element corresponds to one building or parking lot.
# building[i] = ["name", x, y]
# parking_lots[i] = ["name", x, y, capacity]
def get_array(filename):
data_array = []
in_file = open(filename,"r")
max = len(in_file.readline().split()) # sneakily read past heading line
for line in in_file:
data_array.append(line.split())
for i in range(max):
try:
data_array[-1][i] = int(data_array[-1][i])
except:
pass
in_file.close()
return data_array
# Makes a dictionary from Ryans magical 1 dimensional text file.
# Access values like so:
# dict[building_name][day][hour]
def get_pop_dict(filename):
dict1 = {}
in_file = open(filename,"r")
building = in_file.readline().strip()
while building != '':
days = [[],[],[],[],[]]
for i in range(24):
for j in range(5):
popval = in_file.readline()
days[j].append(int(popval))
dict1[building] = days
building = in_file.readline().strip()
in_file.close()
return dict1
# This function will get the distance between two spots of the map.
# distance = sqrt([x1-x2]^2+[y1-y2]^2)
# Inputs should be building[i] and/or parking_lot[i]
def get_distance(place1, place2):
x1 = place1[1]
x2 = place2[1]
y1 = place1[2]
y2 = place2[2]
distance = ((x1-x2)**2 + (y1-y2)**2)**0.5
return distance
# Ask user for day, time, and building. building should be number.
def get_user_input():
# initialize with sentinel values
day, time, destination= -1, -1, -1
# Get day. M=0, T=1, ... , F=4
while day == -1:
day = input("Enter day of week (M T W R F): ")
day = "MTWRF".find(day)
if day ==-1:
print("Please input M, T, W, R, or F!")
# Get time
while (time < 6) or (time > 18):
time = input("What time do will you arrive on campus? (24hr, enter integer from 6 through 18): ")
try: # I hope the at least put a number in
time = int(time)
except: # if user is dumb
time = -1
if (time < 6) or (time > 18):
print("Please input a whole number between 6 and 18!")
# Ouput building list:
for i in range(0,len(buildings)//2):
print(format(i+1,'2'),". ",format(buildings[i][0],'6'),sep='',end='')
print(format(len(buildings)//2+i+1,'2'),". ",buildings[len(buildings)//2+i][0],sep='')
# Get building number. Note that the user list is not zero origin index!
while (destination<1) or (destination>len(buildings)):
destination = input("Please select the number corresponding to your destination: ")
try: # I hope the at least put a number in
destination = int(destination)
except: # if user is dumb
destination = -1
if (destination<1) or (destination>len(buildings)):
print("Please input a number from the list")
return day, time, destination - 1
# This function will return the rating of a parking lot given a building destination and time.
# lot = parking_lots[i]
# destination = buildings[i]
def attractiveness(lot,destination,time,day):
capacity = lot[2]
attraction = distance_preference(lot,destination) * available_parking(lot, time, day) #/ capacity
return attraction
# This function is $100% cheese.
# It gives a value that decreases as distance increases.
# domain is 0 < y < 1
def distance_preference(lot, destination):
A = 16.0 # This is a parameter we can play with
distance = get_distance(lot, destination)
preference = 1/(A*distance/5280.0 + 1.0)
#B = .75
#walkfar = 5280/2
#preference = 1/(1 + exp(B/528*(distance-walkfar))+1)
return preference
#return 1
# This function returns a calculated value for amount of spots available.
# Value returned can be negative. Might change and make it return 0 if negative
def available_parking(lot, time, day):
capacity = lot[2]
# sum up all that may be parked
total_parked = 0
for building in buildings:
total_parked += parked(lot, building, time, day)
available = capacity - total_parked
return available
# This function will predict how many people from a building will be parked in a particular lot
def parked(lot, building, time, day):
distance = get_distance(lot, building)
pop = population(building, time, day)
# Sum up total distance of all paths from every parking lot to a building.
total_distance = 0
for lot in parking_lots:
total_distance += get_distance(lot, building)
parkers = pop * (1 - distance / total_distance)
return parkers
# This function will return the theoretical population for a given time.
# This function is probably going to cause the most error in our results.
# Access population of building on a day at a time as such:
# populations[building_name][day][hour]
def population(building, time, day):
pop_total = attempt_pop_read(building, time, day)
# for i in range(1,4):
# pop_total += attempt_pop_read(building, time+i, day)/(3**i)
# pop_total += attempt_pop_read(building, time-i, day)/(3**i)
a_rate = 0.8 # attendence
c_rate = 0.8 # commuters
return pop_total * a_rate * c_rate
def attempt_pop_read(building, time, day):
try: # See if we have data for building
pop = populations[building[0]][day][time]
except:
return 0
return pop
main()
|
|
from wallaby import *
import utils as u
import motorsPlusPlus as x
import constants as c
from logger import log as display
def test_ramp():
x.drive_speed(100, 100)
u.DEBUG()
enable_servos()
u.move_servo(c.DEPLOYABLE_WHEELS, c.WHEELS_DEPLOYED)
u.move_bin(c.ARM_SWING)
x.drive_speed(12, 100)
start_time = seconds()
x.drive_speed(5, 100)
while gyro_y() < 100 or seconds() < start_time + 2:
if u.on_black_front():
x.drive_forever(70, 100)
else:
x.drive_forever(100, 70)
msleep(10)
x.drive_speed(4, 100)
x.pivot_left_condition(30, u.on_black_front, False)
def alt_init():
u.move_servo(c.SERVO_JOINT, c.JOINT_MID, 2047)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_TUCKED, 2047)
enable_servos()
u.wait_for_button()
# u.move_bin(c.ARM_SWING)
# u.move_bin(c.ARM_ALL_UP)
x.drive_speed(70, 100)
# x.rotate(180, 50)
# u.wait_for_button()
# go_up_ramp()
def select():
end = seconds() + 3
exit_loop = True
state = 0
changed = False
begin = False
for setting in range(0, 8):
if digital(setting):
begin = True
if begin:
display("Starting selection")
while not exit_loop or seconds() < end:
for setting in range(0, 8):
if digital(setting) and setting != state:
state = setting
changed = True
while digital(setting):
pass
if state != 0:
exit_loop = False
if changed:
display("SELECTION: {}".format(state))
changed = False
if right_button():
while right_button():
pass
display("Running table setting {}".format(state))
msleep(300)
exit_loop = True
end = 0
display("Ended selection")
def select2():
selection = 0
if digital(0):
display("Started selection\n")
display("Set to: {}".format(selection))
while digital(0):
pass
while not right_button():
if digital(0):
while digital(0):
pass
selection += 1
display("Set to: {}".format(selection))
display("\n Ended selection\n")
def init():
display("\nFunction: init\n")
if c.IS_CLONE:
display("I AM CLONE")
else:
display("I AM PRIME")
# enable_servos()
# msleep(2500)
# x.linefollow_distance(23.46)
# u.DEBUG_WITH_WAIT()
def self_test():
display("\nFunction: self_test\n")
display("Click left button to use botguy hitter else hit right")
while not right_button() and not left_button():
pass
if right_button():
c.HIT_BOTGUY = False
display("wont hit botguy")
elif left_button():
c.HIT_BOTGUY = True
display("will hit botguy")
display("DONE SETTING")
if u.on_black_front() or u.on_black_back():
display("Something is wrong with the tophats!")
display("LTOPHAT: {}\tRTOPHAT: {}".format(u.on_black_front(), u.on_black_back()))
exit(1)
while not u.found_bump():
pass
display("Good gyro")
u.wait_for_button()
enable_servos()
x.drive_forever(80, 80)
x.drive_condition(80, 80, u.on_black_front, False)
msleep(500)
x.drive_condition(80, 80, u.on_black_back, False)
x.freeze_motors()
u.move_servo(c.SERVO_JOINT, c.JOINT_MID)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_SPINNER_TEST)
x.wait_for_someone_to_rotate()
u.wait_for_button()
x.rotate_until_stalled(20)
msleep(500)
x.rotate_spinner(.06, -30)
msleep(500)
x.set_spinner_safe()
u.move_servo(c.SERVO_JOINT, c.JOINT_TUCKED)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_TUCKED)
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_OUT)
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_IN)
msleep(500)
x.rotate(15,60)
msleep(1000)
x.rotate(-15,60)
display("DONE")
def start():
display("\nFunction: start\n")
u.wait_4_light(ignore=False)
if c.IS_CLONE:
msleep(2500)
else:
msleep(2000)
shut_down_in(119.75)
c.startTime = seconds()
display("NOTE: {}\t{}".format(seconds(), c.startTime))
u.move_servo(c.SERVO_JOINT, c.JOINT_TUCKED)
enable_servo(c.SERVO_JOINT)
def leave_startbox():
display("\nFunction: leave_startbox\n")
u.move_servo(c.SERVO_BIN_ARM, c.ARM_TUCKED)
x.drive_condition(80, 80, u.on_black_front, False)
x.drive_speed(-4, 60)
if c.IS_CLONE:
x.rotate(-92, 70)
else:
x.rotate(-96, 70)
x.drive_speed(-34, 100)
x.drive_condition(80, 80, u.on_black_front, False)
x.drive_speed(1, 80)
x.rotate(92, 60)
x.drive_speed(-7, 85)
def drive_till_bump():
display("\nFunction: drive_till_bump\n")
if c.IS_CLONE:
x.drive_speed(41, 100, True)
else:
x.drive_speed(42, 100, True)
def get_bin():
display("\nFunction: get_bin\n")
u.move_servo(c.SERVO_JOINT, c.JOINT_TUCKED, 100)
if c.IS_CLONE:
x.rotate(-86, 50)
else:
x.rotate(-86, 50)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_APPROACH)
u.move_servo(c.SERVO_JOINT, c.JOINT_SWING)
msleep(250)
if c.IS_CLONE:
x.drive_speed(12, 70)
else:
x.drive_speed(10, 70)
u.move_servo(c.SERVO_JOINT, c.JOINT_SWING)
u.move_bin(c.ARM_SWING, 5)
u.move_servo(c.SERVO_JOINT, c.JOINT_PARALLEL, 5)
u.move_bin(c.ARM_APPROACH, 5)
u.move_servo(c.SERVO_JOINT, c.JOINT_ROTATE, 5)
# x.drive_speed(-20, 100)
x.drive_speed(-16, 100)
x.drive_speed(-4, 50)
if c.HIT_BOTGUY:
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_OUT, 100)
x.pivot_right(30,75)
x.pivot_right(-30, 75)
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_IN, 100)
def go_to_spinner():
display("\nFunction: go_to_spinner\n")
u.move_servo(c.SERVO_BIN_ARM, c.ARM_TUCKED, 5)
if c.IS_CLONE:
x.drive_speed(8, 100)
else:
x.drive_speed(11, 100)
if c.IS_CLONE:
x.pivot_left(-90, 70)
else:
x.pivot_left(-88, 70)
x.drive_speed(22, -100, True)
x.pivot_left(-32, 50)
x.drive_speed(-11, 80)
x.pivot_right(-32, 50)
x.drive_speed(-3, 70)
x.drive_condition(50, 50, u.on_black_front, False)
if c.IS_CLONE:
x.rotate(90, 35)
else:
x.rotate(98, 35)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_TUCKED)
u.move_servo(c.SERVO_JOINT, c.JOINT_PARALLEL)
x.drive_condition(80, 80, u.on_black_front, False)
x.drive_condition(50, 50, u.on_black_front, True)
x.rotate_spinner(.25, 80)
x.drive_speed(5, 60)
u.move_servo(c.SERVO_JOINT, c.JOINT_GROUND)
x.rotate_spinner(4, -70)
x.rotate_to_safe(50)
def go_to_ramp():
display("\nFunction: go_to_ramp\n")
u.move_servo(c.SERVO_JOINT, c.JOINT_RAMP_ON)
u.move_servo(c.SERVO_JOINT, c.JOINT_ARM_TILT)
if c.IS_CLONE:
x.rotate(-5, 50)
else:
x.rotate(-5, 50)
x.drive_forever(-50, -50)
u.move_bin(c.ARM_TILT, 5)
x.drive_speed(-10, 100)
x.rotate(5, 65)
u.move_servo(c.SERVO_JOINT, c.JOINT_HOLD, 5)
u.move_bin(c.ARM_TUCKED, 5)
msleep(100)
x.drive_speed(-7, 100)
x.drive_speed(-6, 75)
x.drive_speed(2, 75)
x.pivot_right(-90, 60)
u.move_servo(c.SERVO_JOINT, c.JOINT_MID)
def go_up_ramp():
display("\nFunction: go_up_ramp\n")
u.move_bin(c.ARM_SWING)
x.drive_speed(12, 100)
start_time = seconds()
x.drive_speed(5, 100)
while gyro_y() < 100 or seconds() < start_time + 2:
if u.on_black_front():
x.drive_forever(50, 100)
else:
x.drive_forever(100, 50)
msleep(10)
x.drive_speed(8, 100)
u.move_servo(c.SERVO_JOINT, c.JOINT_GROUND)
# u.wait_for_button()
print("1")
x.pivot_left_condition(30, u.on_black_front, False)
# u.wait_for_button()
print("2")
# if u.on_black_back():
x.pivot_right_condition(30, u.on_black_back)
# x.pivot_right(35, 30)
# u.wait_for_button()
print("3")
x.pivot_right_condition(30, u.on_black_back, False)
# u.wait_for_button()
print("4")
x.pivot_left_condition(30, u.on_black_front, False)
# u.wait_for_button()
print("5")
u.move_bin(c.ARM_ALL_UP)
msleep(500)
def go_up_ramp2():
display("\nFunction: go_up_ramp\n")
u.move_bin(c.ARM_SWING)
x.drive_speed(12, 100)
start_time = seconds()
x.drive_speed(5, 100)
while gyro_y() < 100 or seconds() < start_time + 2:
if u.on_black_front():
x.drive_forever(70, 100)
else:
x.drive_forever(100, 70)
msleep(10)
x.freeze_motors()
u.move_servo(c.SERVO_JOINT, c.JOINT_GROUND)
if c.IS_CLONE:
x.drive_speed(5, 100)
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_ET)
x.pivot_right_condition(-30, u.lost_ramp, False)
x.pivot_left_condition(-30, u.on_black_back, False)
x.linefollow_distance(9)
if not c.IS_CLONE:
x.drive_speed(8, 100)
# u.wait_for_button()
print("1")
x.pivot_left_condition(30, u.on_black_front, False)
# u.wait_for_button()
print("2")
# if u.on_black_back():
x.pivot_right_condition(30, u.on_black_back)
# x.pivot_right(35, 30)
# u.wait_for_button()
print("3")
x.pivot_right_condition(30, u.on_black_back, False)
# u.wait_for_button()
print("4")
x.pivot_left_condition(30, u.on_black_front, False)
# u.wait_for_button()
print("5")
u.move_bin(c.ARM_ALL_UP)
msleep(500)
def go_and_score_the_bin():
display("\nFunction: go_and_score_the_bin\n")
u.move_servo(c.SERVO_JOINT, c.JOINT_DELIVER,4)
msleep(500)
u.move_servo(c.SERVO_BOT_GUY_HITTER, c.HITTER_OUT, 100)
# x.linefollow_distance(28, 50, 70)
x.linefollow_distance(21, 50, 70, 5)
x.pivot_right(-32.5, 50)
# x.drive_speed(-2, 50)
# x.rotate(-10, 50)
# x.drive_speed(2.5, 50)
# x.pivot_right(40, 50)
# x.drive_speed(2.5, 50)
# u.wait_for_button()
if not c.IS_CLONE:
x.drive_speed(1, 50)
disable_servo(c.SERVO_JOINT)
msleep(500)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_MAX)
msleep(500)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_HIT, 20)
msleep(300)
u.move_servo(c.SERVO_BIN_ARM, c.ARM_MAX, 20)
x.drive_speed(1, 50)
x.pivot_right(30, 50)
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
#
"""This is the interface for managing hunts."""
import collections as py_collections
import operator
import StringIO
import urllib
import logging
from grr.gui import plot_lib
from grr.gui import renderers
from grr.gui.plugins import crash_view
from grr.gui.plugins import fileview
from grr.gui.plugins import foreman
from grr.gui.plugins import forms
from grr.gui.plugins import searchclient
from grr.gui.plugins import semantic
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import hunts
from grr.lib import rdfvalue
from grr.lib import utils
class ManageHunts(renderers.Splitter2Way):
"""Manages Hunts GUI Screen."""
description = "Hunt Manager"
behaviours = frozenset(["General"])
top_renderer = "HuntTable"
bottom_renderer = "HuntViewTabs"
context_help_url = "user_manual.html#_creating_a_hunt"
layout_template = (renderers.Splitter2Way.layout_template +
renderers.TemplateRenderer.help_template)
def Layout(self, request, response):
response = super(ManageHunts, self).Layout(request, response)
return self.CallJavascript(response, "ManageHunts.Layout")
class HuntStateIcon(semantic.RDFValueRenderer):
"""Render the hunt state by using an icon.
This class is similar to FlowStateIcon, but it also adds STATE_STOPPED
state for hunts that were created but not yet started (because of lack of
approval, for example).
"""
layout_template = renderers.Template("""
<div class="centered hunt-state-icon" state="{{this.state_str|escape}}">
<img class='grr-icon grr-flow-icon'
src='/static/images/{{this.icon|escape}}' />
</div>
""")
# Maps the flow states to icons we can show
state_map = {"STOPPED": "stock_yes.png",
"STARTED": "clock.png",
"PAUSED": "pause.png"}
def Layout(self, request, response):
self.state_str = str(self.proxy)
self.icon = self.state_map.get(self.proxy, "question-red.png")
return super(HuntStateIcon, self).Layout(request, response)
class RunHuntConfirmationDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that asks confirmation to run a hunt and actually runs it."""
post_parameters = ["hunt_id"]
header = "Run a hunt?"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>run</strong> this hunt?</p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt started successfully!</p>
""")
def Layout(self, request, response):
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(RunHuntConfirmationDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
flow.GRRFlow.StartFlow(flow_name="StartHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class PauseHuntConfirmationDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that asks confirmation to pause a hunt and actually runs it."""
post_parameters = ["hunt_id"]
header = "Pause a hunt?"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>pause</strong> this hunt?</p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt paused successfully!</p>
""")
def Layout(self, request, response):
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(PauseHuntConfirmationDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
flow.GRRFlow.StartFlow(flow_name="PauseHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class ModifyHuntDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that allows user to modify certain hunt parameters."""
post_parameters = ["hunt_id"]
header = "Modify a hunt"
proceed_button_title = "Modify!"
expiry_time_dividers = ((60*60*24, "d"), (60*60, "h"), (60, "m"), (1, "s"))
content_template = renderers.Template("""
{{this.hunt_params_form|safe}}
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt modified successfully!</p>
""")
def Layout(self, request, response):
"""Layout handler."""
hunt_urn = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
with aff4.FACTORY.Open(hunt_urn, aff4_type="GRRHunt",
token=request.token) as hunt:
runner = hunt.GetRunner()
hunt_args = rdfvalue.ModifyHuntFlowArgs(
client_limit=runner.args.client_limit,
expiry_time=runner.context.expires,
)
self.hunt_params_form = forms.SemanticProtoFormRenderer(
hunt_args, supressions=["hunt_urn"]).RawHTML(request)
self.check_access_subject = hunt_urn
return super(ModifyHuntDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Starts ModifyHuntFlow that actually modifies a hunt."""
hunt_urn = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
args = forms.SemanticProtoFormRenderer(
rdfvalue.ModifyHuntFlowArgs()).ParseArgs(request)
flow.GRRFlow.StartFlow(flow_name="ModifyHuntFlow", token=request.token,
hunt_urn=hunt_urn, args=args)
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class DeleteHuntDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that confirms deletion of a hunt."""
post_parameters = ["hunt_id"]
header = "Delete a hunt"
proceed_button_title = "Delete!"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>delete</strong> this hunt? Note that
hunts can only be deleted if there are no results. </p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt Deleted!</p>
""")
def Layout(self, request, response):
"""Layout handler."""
# TODO(user) Switch from requiring approval to requiring ownership.
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(DeleteHuntDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Starts DeleteHuntFlow that actually modifies a hunt."""
flow.GRRFlow.StartFlow(flow_name="DeleteHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class HuntTable(fileview.AbstractFileTable):
"""Show all hunts."""
selection_publish_queue = "hunt_select"
custom_class = "HuntTable"
layout_template = """
<div id="new_hunt_dialog_{{unique|escape}}"
class="modal wide-modal high-modal" update_on_show="true"
tabindex="-1" role="dialog" aria-hidden="true">
</div>
<div id="run_hunt_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
<div id="pause_hunt_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
<div id="modify_hunt_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
<div id="delete_hunt_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
<ul class="breadcrumb">
<li>
<button id='new_hunt_{{unique|escape}}' title='New Hunt'
class="btn btn-default" name="NewHunt" data-toggle="modal"
data-target="#new_hunt_dialog_{{unique|escape}}">
<img src='/static/images/new.png' class='toolbar_icon'>
</button>
<div class="btn-group">
<button id='run_hunt_{{unique|escape}}' title='Run Hunt'
class="btn btn-default" disabled="yes" name="RunHunt" data-toggle="modal"
data-target="#run_hunt_dialog_{{unique|escape}}">
<img src='/static/images/play_button.png' class='toolbar_icon'>
</button>
<button id='pause_hunt_{{unique|escape}}' title='Pause Hunt'
class="btn btn-default" disabled="yes" name="PauseHunt" data-toggle="modal"
data-target="#pause_hunt_dialog_{{unique|escape}}">
<img src='/static/images/pause_button.png' class='toolbar_icon'>
</button>
<button id='modify_hunt_{{unique|escape}}' title='Modify Hunt'
class="btn btn-default" disabled="yes" name="ModifyHunt" data-toggle="modal"
data-target="#modify_hunt_dialog_{{unique|escape}}">
<img src='/static/images/modify.png' class='toolbar_icon'>
</button>
<button id='toggle_robot_hunt_display_{{unique|escape}}'
title='Show/Hide Automated hunts'
class="btn btn-default" name="ToggleRobotHuntDisplay">
<img src='/static/images/robot.png' class='toolbar_icon'>
</button>
<button id='delete_hunt_{{unique|escape}}' title='Delete Hunt'
class="btn btn-default" disabled="yes" name="DeleteHunt" data-toggle="modal"
data-target="#delete_hunt_dialog_{{unique|escape}}">
<img src='/static/images/editdelete.png' class='toolbar_icon'>
</button>
</div>
<div class="new_hunt_dialog" id="new_hunt_dialog_{{unique|escape}}"
class="hide" />
</li>
</ul>
""" + fileview.AbstractFileTable.layout_template
root_path = "aff4:/hunts"
def __init__(self, **kwargs):
super(HuntTable, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"Status", renderer=HuntStateIcon, width="40px"))
# The hunt id is the AFF4 URN for the hunt object.
self.AddColumn(semantic.RDFValueColumn(
"Hunt ID", renderer=semantic.SubjectRenderer))
self.AddColumn(semantic.RDFValueColumn("Name"))
self.AddColumn(semantic.RDFValueColumn("Start Time", width="16em"))
self.AddColumn(semantic.RDFValueColumn("Expires", width="16em"))
self.AddColumn(semantic.RDFValueColumn("Client Limit"))
self.AddColumn(semantic.RDFValueColumn("Creator"))
self.AddColumn(semantic.RDFValueColumn("Description", width="100%"))
def Layout(self, request, response):
response = super(HuntTable, self).Layout(request, response)
return self.CallJavascript(response, "HuntTable.Layout")
def BuildTable(self, start_row, end_row, request):
fd = aff4.FACTORY.Open("aff4:/hunts", mode="r", token=request.token)
try:
children = list(fd.ListChildren())
nr_hunts = len(children)
children.sort(key=operator.attrgetter("age"), reverse=True)
children = children[start_row:end_row]
hunt_list = []
for hunt in fd.OpenChildren(children=children):
# Skip hunts that could not be unpickled.
if not isinstance(hunt, hunts.GRRHunt) or not hunt.state:
continue
hunt.create_time = hunt.GetRunner().context.create_time
hunt_list.append(hunt)
hunt_list.sort(key=lambda x: x.create_time, reverse=True)
could_not_display = []
row_index = start_row
for hunt_obj in hunt_list:
if not isinstance(hunt_obj, hunts.GRRHunt):
could_not_display.append((hunt_obj, "Object is not a valid hunt."))
continue
if hunt_obj.state.Empty():
logging.error("Hunt without a valid state found: %s", hunt_obj)
could_not_display.append((hunt_obj,
"Hunt doesn't have a valid state."))
continue
runner = hunt_obj.GetRunner()
description = (runner.args.description or
hunt_obj.__class__.__doc__.split("\n", 1)[0])
self.AddRow({"Hunt ID": hunt_obj.urn,
"Name": hunt_obj.__class__.__name__,
"Status": hunt_obj.Get(hunt_obj.Schema.STATE),
"Start Time": runner.context.start_time,
"Expires": runner.context.expires,
"Client Limit": runner.args.client_limit,
"Creator": runner.context.creator,
"Description": description},
row_index=row_index)
# Hide automated hunts by default
if runner.context.creator == "GRRWorker":
self.SetRowClass(row_index, "robot-hunt hide")
row_index += 1
for hunt_obj, reason in could_not_display:
self.AddRow({"Hunt ID": hunt_obj.urn,
"Description": reason},
row_index=row_index)
row_index += 1
except IOError as e:
logging.error("Bad hunt %s", e)
return nr_hunts >= end_row
class HuntViewTabs(renderers.TabLayout):
"""Show a tabset to inspect the selected hunt.
Listening Javascript Events:
- file_select(aff4_path, age) - A selection event on the hunt table
informing us of a new hunt to show. We redraw the entire bottom right
side using a new renderer.
"""
names = ["Overview", "Log", "Errors", "Graph", "Results", "Stats",
"Crashes", "Outstanding", "Context Detail"]
delegated_renderers = ["HuntOverviewRenderer", "HuntLogRenderer",
"HuntErrorRenderer",
"HuntClientGraphRenderer", "HuntResultsRenderer",
"HuntStatsRenderer", "HuntCrashesRenderer",
"HuntOutstandingRenderer", "HuntContextView"]
empty_template = renderers.Template("""
<div class="padded" id="{{unique|escape}}">
<p>Please select a hunt to see its details here.</p>
</div>
""")
post_parameters = ["hunt_id"]
def Layout(self, request, response):
hunt_id = request.REQ.get("hunt_id")
if hunt_id:
response = super(HuntViewTabs, self).Layout(request, response)
else:
response = super(HuntViewTabs, self).Layout(
request, response, apply_template=self.empty_template)
return self.CallJavascript(response, "HuntViewTabs.Layout")
class ManageHuntsClientView(renderers.Splitter2Way):
"""Manages the clients involved in a hunt."""
description = "Hunt Client View"
top_renderer = "HuntClientTableRenderer"
bottom_renderer = "HuntClientViewTabs"
class ResourceRenderer(semantic.RDFValueRenderer):
"""Renders resource usage as meters."""
cls = "vertical_aligned"
layout_template = renderers.Template(
"<div>"
"<meter value=\"{{this.proxy|escape}}\"></meter>"
"</div>")
class FloatRenderer(semantic.RDFValueRenderer):
layout_template = renderers.Template("{{this.value|escape}}")
def Layout(self, request, response):
if self.proxy is None:
self.value = "0.0"
else:
self.value = "%.2f" % self.proxy
super(FloatRenderer, self).Layout(request, response)
class HuntClientTableRenderer(fileview.AbstractFileTable):
"""Displays the clients."""
selection_publish_queue = "hunt_client_select"
layout_template = """
{{this.title|escape}}
<a id="backlink_{{unique|escape}}" href='#{{this.hash|escape}}'>
back to hunt view</a>
<span class='pull-right'> Filter by State
<select id='{{unique|escape}}_select'>
<option>ALL</option>
<option>OUTSTANDING</option>
<option>COMPLETED</option>
<option>BAD</option>
</select>
</span>
""" + fileview.AbstractFileTable.layout_template
post_parameters = ["hunt_id"]
def __init__(self, **kwargs):
super(HuntClientTableRenderer, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"Client ID", width="20%", renderer=semantic.SubjectRenderer))
self.AddColumn(semantic.RDFValueColumn("Hostname", width="10%"))
self.AddColumn(semantic.RDFValueColumn("Status", width="10%"))
self.AddColumn(semantic.RDFValueColumn("User CPU seconds", width="10%",
renderer=FloatRenderer))
self.AddColumn(semantic.RDFValueColumn("System CPU seconds", width="10%",
renderer=FloatRenderer))
self.AddColumn(semantic.RDFValueColumn("CPU",
renderer=ResourceRenderer,
width="10%"))
self.AddColumn(semantic.RDFValueColumn("Network bytes sent", width="10%"))
self.AddColumn(semantic.RDFValueColumn("Network",
renderer=ResourceRenderer,
width="10%"))
self.AddColumn(semantic.RDFValueColumn("Last Checkin", width="10%"))
def Layout(self, request, response):
"""Ensure our hunt is in our state for HTML layout."""
hunt_id = request.REQ.get("hunt_id")
self.title = "Viewing Hunt %s" % hunt_id
h = dict(main="ManageHunts", hunt_id=hunt_id)
self.hunt_hash = urllib.urlencode(sorted(h.items()))
response = super(HuntClientTableRenderer, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientTableRenderer.Layout",
hunt_hash=self.hunt_hash)
def BuildTable(self, start_row, end_row, request):
"""Called to fill in the data in the table."""
hunt_id = request.REQ.get("hunt_id")
completion_status_filter = request.REQ.get("completion_status", "ALL")
if hunt_id is None:
return
try:
self.hunt = aff4.FACTORY.Open(hunt_id, token=request.token,
aff4_type="GRRHunt")
except IOError:
logging.error("Invalid hunt %s", hunt_id)
return
# TODO(user): enable per-client resource usage display.
resource_usage = {}
resource_max = [0, 0, 0]
for resource in resource_usage.values():
for i in range(3):
if resource_max[i] < resource[i]:
resource_max[i] = resource[i]
results = {}
for status, client_list in self.hunt.GetClientsByStatus().items():
if (completion_status_filter == "ALL" or
status == completion_status_filter):
for client in client_list:
results[client] = status
# Get the list of clients and sort so that we can page accurately.
client_list = results.keys()
client_list.sort()
client_list = client_list[start_row:end_row]
row_index = start_row
for c_urn, cdict in self.hunt.GetClientStates(client_list):
row = {"Client ID": c_urn,
"Hostname": cdict.get("hostname"),
"Status": results[c_urn],
"Last Checkin": searchclient.FormatLastSeenTime(
cdict.get("age") or 0),
}
client_id = c_urn.Basename()
if client_id in resource_usage:
usage = resource_usage[client_id]
row["User CPU seconds"] = usage[0]
row["System CPU seconds"] = usage[1]
row["Network bytes sent"] = usage[2]
usage_percent = []
for i in range(3):
if resource_max[i]:
usage_percent.append(round(usage[i], 2) / resource_max[i])
else:
usage_percent.append(0.0)
row["CPU"] = usage_percent[0]
row["Network"] = usage_percent[2]
else:
row["User CPU seconds"] = 0
row["System CPU seconds"] = 0
row["Network bytes sent"] = 0
row["CPU"] = 0
row["Network"] = 0
self.AddRow(row, row_index)
row_index += 1
self.size = len(results)
class AbstractLogRenderer(renderers.TemplateRenderer):
"""Render a page for view a Log file.
Implements a very simple view. That will be extended with filtering
capabilities.
Implementations should implement the GetLog function.
"""
show_total_count = False
layout_template = renderers.Template("""
<table class="proto_table">
{% if this.log|length > 0 %}
{% if this.show_total_count %}
<h5>{{this.log|length}} Entries</h5>
{% endif %}
{% endif %}
{% for line in this.log %}
<tr>
{% for val in line %}
<td class="proto_key">{{ val|safe }}</td>
{% endfor %}
</tr>
{% empty %}
<tr><td>No entries</tr></td>
{% endfor %}
<table>
""")
def GetLog(self, request):
"""Take a request and return a list of tuples for a log."""
_ = request
return []
def Layout(self, request, response):
"""Fill in the form with the specific fields for the flow requested."""
self.log = []
for row in self.GetLog(request):
rendered_row = []
for item in row:
item_renderer = semantic.FindRendererForObject(item)
rendered_row.append(item_renderer.RawHTML(request))
self.log.append(rendered_row)
return super(AbstractLogRenderer, self).Layout(request, response)
class HuntOverviewRenderer(AbstractLogRenderer):
"""Renders the overview tab."""
# Will be retrieved from request.REQ if not set.
hunt_id = None
layout_template = renderers.Template("""
<a id="ViewHuntDetails_{{unique}}" href='#{{this.hash|escape}}'
onclick='grr.loadFromHash("{{this.hash|escape}}");'
class="btn btn-info">
View hunt details
</a>
<br/>
<dl class="dl-horizontal dl-hunt">
<dt>Name</dt><dd>{{ this.hunt_name|escape }}</dd>
<dt>Hunt ID</dt>
<dd>{{ this.hunt.urn.Basename|escape }}</dd>
<dt>Hunt URN</dt>
<dd>{{ this.hunt.urn|escape }}</dd>
<dt>Creator</dt>
<dd>{{ this.hunt_creator|escape }}</dd>
<dt>Client Limit</dt>
{% if this.client_limit == 0 %}
<dd>None</dd>
{% else %}
<dd>{{ this.client_limit|escape }}</dd>
{% endif %}
<dt>Client Rate (clients/min)</dt>
{% if this.client_rate == 0.0 %}
<dd>No rate limit</dd>
{% else %}
<dd>{{ this.client_rate|escape }}</dd>
{% endif %}
<dt>Clients Scheduled</dt>
<dd>{{ this.all_clients_count|escape }}</dd>
<dt>Outstanding</dt>
<dd>{{ this.outstanding_clients_count|escape }}</dd>
<dt>Completed</dt>
<dd>{{ this.completed_clients_count|escape }}</dd>
<dt>Total CPU seconds used</dt>
<dd>{{ this.cpu_sum|escape }}</dd>
<dt>Total network traffic</dt>
<dd>{{ this.net_sum|filesizeformat }}</dd>
<dt>Regex Rules</dt>
<dd>{{ this.regex_rules|safe }}</dd>
<dt>Integer Rules</dt>
<dd>{{ this.integer_rules|safe }}</dd>
<dt>Arguments</dt><dd>{{ this.args_str|safe }}</dd>
{% for key, val in this.data.items %}
<dt>{{ key|escape }}</dt><dd>{{ val|escape }}</dd>
{% endfor %}
</dl>
""")
error_template = renderers.Template(
"No information available for this Hunt.")
ajax_template = renderers.Template("""
<div id="RunHuntResult_{{unique|escape}}"></div>
""")
def RenderAjax(self, request, response):
self.hunt_id = request.REQ.get("hunt_id")
self.subject = rdfvalue.RDFURN(self.hunt_id)
response = renderers.TemplateRenderer.Layout(
self, request, response, apply_template=self.ajax_template)
return self.CallJavascript(response, "HuntOverviewRenderer.RenderAjax",
subject=self.subject, hunt_id=self.hunt_id)
def Layout(self, request, response):
"""Display the overview."""
if not self.hunt_id:
self.hunt_id = request.REQ.get("hunt_id")
h = dict(main="ManageHuntsClientView", hunt_id=self.hunt_id)
self.hash = urllib.urlencode(sorted(h.items()))
self.data = {}
self.args_str = ""
if self.hunt_id:
try:
self.hunt = aff4.FACTORY.Open(self.hunt_id, aff4_type="GRRHunt",
token=request.token)
if self.hunt.state.Empty():
raise IOError("No valid state could be found.")
hunt_stats = self.hunt.state.context.usage_stats
self.cpu_sum = "%.2f" % hunt_stats.user_cpu_stats.sum
self.net_sum = hunt_stats.network_bytes_sent_stats.sum
(self.all_clients_count,
self.completed_clients_count, _) = self.hunt.GetClientsCounts()
self.outstanding_clients_count = (self.all_clients_count -
self.completed_clients_count)
runner = self.hunt.GetRunner()
self.hunt_name = runner.args.hunt_name
self.hunt_creator = runner.context.creator
self.data = py_collections.OrderedDict()
self.data["Start Time"] = runner.context.start_time
self.data["Expiry Time"] = runner.context.expires
self.data["Status"] = self.hunt.Get(self.hunt.Schema.STATE)
self.client_limit = runner.args.client_limit
self.client_rate = runner.args.client_rate
self.args_str = renderers.DictRenderer(
self.hunt.state, filter_keys=["context"]).RawHTML(request)
if runner.args.regex_rules:
self.regex_rules = foreman.RegexRuleArray(
runner.args.regex_rules).RawHTML(request)
else:
self.regex_rules = "None"
if runner.args.integer_rules:
self.integer_rules = foreman.IntegerRuleArray(
runner.args.integer_rules).RawHTML(request)
else:
self.integer_rules = "None"
except IOError:
self.layout_template = self.error_template
return super(AbstractLogRenderer, self).Layout(request, response)
class HuntContextView(renderers.TemplateRenderer):
"""Render a the hunt context."""
layout_template = renderers.Template("""
{{this.args_str|safe}}
""")
def Layout(self, request, response):
"""Display hunt's context presented as dict."""
if not hasattr(self, "hunt_id"):
self.hunt_id = request.REQ.get("hunt_id")
self.hunt = aff4.FACTORY.Open(self.hunt_id, aff4_type="GRRHunt",
token=request.token)
if self.hunt.state.Empty():
raise IOError("No valid state could be found.")
self.args_str = renderers.DictRenderer(
self.hunt.state.context).RawHTML(request)
return super(HuntContextView, self).Layout(request, response)
class HuntLogRenderer(renderers.AngularDirectiveRenderer):
directive = "grr-hunt-log"
def Layout(self, request, response):
self.directive_args = {}
self.directive_args["hunt-urn"] = request.REQ.get("hunt_id")
return super(HuntLogRenderer, self).Layout(request, response)
class HuntErrorRenderer(renderers.AngularDirectiveRenderer):
directive = "grr-hunt-errors"
def Layout(self, request, response):
self.directive_args = {}
self.directive_args["hunt-urn"] = request.REQ.get("hunt_id")
return super(HuntErrorRenderer, self).Layout(request, response)
class HuntClientViewTabs(renderers.TabLayout):
"""Show a tabset to inspect the selected client of the selected hunt."""
names = ["Status", "Hunt Log", "Hunt Errors", "Client Detail"]
delegated_renderers = ["HuntClientOverviewRenderer", "HuntLogRenderer",
"HuntErrorRenderer", "HuntHostInformationRenderer"]
post_parameters = ["hunt_id", "hunt_client"]
def Layout(self, request, response):
response = super(HuntClientViewTabs, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientViewTabs.Layout",
hunt_id=self.state["hunt_id"])
class HuntClientOverviewRenderer(renderers.TemplateRenderer):
"""Renders the Client Hunt Overview tab."""
layout_template = renderers.Template("""
<a href='#{{this.hash|escape}}' onclick='grr.loadFromHash(
"{{this.hash|escape}}");' ">
Go to client {{ this.client.urn|escape }}
</a>
<table class="proto_table">
<tr><td class="proto_key">Last Checkin</td>
<td>{{ this.last_checkin|escape }}</td>
</table>
""")
def Layout(self, request, response):
"""Display the overview."""
hunt_id = request.REQ.get("hunt_id")
hunt_client = request.REQ.get("hunt_client")
if hunt_id is not None and hunt_client is not None:
try:
self.client = aff4.FACTORY.Open(hunt_client, token=request.token,
aff4_type="VFSGRRClient")
self.last_checkin = rdfvalue.RDFDatetime(
self.client.Get(self.client.Schema.PING))
h = dict(main="HostInformation", c=self.client.client_id)
self.hash = urllib.urlencode(sorted(h.items()))
except IOError as e:
logging.error("Attempt to open client %s. Err %s", hunt_client, e)
return super(HuntClientOverviewRenderer, self).Layout(request, response)
class HuntClientGraphRenderer(renderers.TemplateRenderer):
"""Renders the button to download a hunt graph."""
layout_template = renderers.Template("""
{% if this.clients %}
<button id="{{ unique|escape }}">
Generate
</button>
{% else %}
No data to graph yet.
{% endif %}
""")
def Layout(self, request, response):
self.hunt_id = request.REQ.get("hunt_id")
hunt = aff4.FACTORY.Open(self.hunt_id, token=request.token)
all_count, _, _ = hunt.GetClientsCounts()
self.clients = bool(all_count)
response = super(HuntClientGraphRenderer, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientGraphRenderer.Layout",
hunt_id=self.hunt_id)
class HuntClientCompletionGraphRenderer(renderers.ImageDownloadRenderer):
def Content(self, request, _):
"""Generates the actual image to display."""
hunt_id = request.REQ.get("hunt_id")
hunt = aff4.FACTORY.Open(hunt_id, aff4_type="GRRHunt", token=request.token)
clients_by_status = hunt.GetClientsByStatus()
cl = clients_by_status["STARTED"]
fi = clients_by_status["COMPLETED"]
cdict = {}
for c in cl:
cdict.setdefault(c, []).append(c.age)
fdict = {}
for c in fi:
fdict.setdefault(c, []).append(c.age)
cl_age = [int(min(x)/1e6) for x in cdict.values()]
fi_age = [int(min(x)/1e6) for x in fdict.values()]
cl_hist = {}
fi_hist = {}
for age in cl_age:
cl_hist.setdefault(age, 0)
cl_hist[age] += 1
for age in fi_age:
fi_hist.setdefault(age, 0)
fi_hist[age] += 1
t0 = min(cl_age) - 1
times = [t0]
cl = [0]
fi = [0]
all_times = set(cl_age) | set(fi_age)
cl_count = 0
fi_count = 0
for time in sorted(all_times):
# Check if there is a datapoint one second earlier, add one if not.
if times[-1] != time-1:
times.append(time)
cl.append(cl_count)
fi.append(fi_count)
cl_count += cl_hist.get(time, 0)
fi_count += fi_hist.get(time, 0)
times.append(time)
cl.append(cl_count)
fi.append(fi_count)
# Convert to hours, starting from 0.
times = [(t-t0)/3600.0 for t in times]
params = {"backend": "png"}
plot_lib.plt.rcParams.update(params)
plot_lib.plt.figure(1)
plot_lib.plt.clf()
plot_lib.plt.plot(times, cl, label="Agents issued.")
plot_lib.plt.plot(times, fi, label="Agents completed.")
plot_lib.plt.title("Agent Coverage")
plot_lib.plt.xlabel("Time (h)")
plot_lib.plt.ylabel(r"Agents")
plot_lib.plt.grid(True)
plot_lib.plt.legend(loc=4)
buf = StringIO.StringIO()
plot_lib.plt.savefig(buf)
buf.seek(0)
return buf.read()
class HuntHostInformationRenderer(fileview.AFF4Stats):
"""Modified HostInformation that reads from hunt_client variable."""
description = "Hunt Client Host Information"
css_class = "TableBody"
attributes_to_show = ["USERNAMES", "HOSTNAME", "MAC_ADDRESS", "INSTALL_DATE",
"SYSTEM", "CLOCK", "CLIENT_INFO"]
def Layout(self, request, response):
"""Produce a summary of the client information."""
client_id = request.REQ.get("hunt_client")
if client_id:
super(HuntHostInformationRenderer, self).Layout(
request, response, client_id=client_id,
aff4_path=rdfvalue.ClientURN(client_id),
age=aff4.ALL_TIMES)
class OutputPluginNoteRenderer(renderers.TemplateRenderer):
"""Baseclass for renderers who render output-plugin-specific notes."""
# Name of the output plugin class that this class should deal with.
for_output_plugin = None
def __init__(self, plugin_def=None, plugin_state=None, **kwargs):
super(OutputPluginNoteRenderer, self).__init__(**kwargs)
if plugin_def is None:
raise ValueError("plugin_def can't be None")
if plugin_state is None:
raise ValueError("plugin_state can't be None")
self.plugin_def = plugin_def
self.plugin_state = plugin_state
class CSVOutputPluginNoteRenderer(OutputPluginNoteRenderer):
"""Note renderer for CSV output plugin."""
for_output_plugin = "CSVOutputPlugin"
layout_template = renderers.Template("""
{% if this.output_urns %}
<div id="{{unique|escape}}" class="well well-small csv-output-note">
<p>CSV output plugin writes to following files
(last update on {{this.plugin_state.last_updated|escape}}):<br/>
{% for output_urn in this.output_urns %}
<a href="#" aff4_path="{{output_urn}}">{{output_urn|escape}}</a><br/>
{% endfor %}
</p>
</div>
{% endif %}
""")
def Layout(self, request, response):
self.output_urns = []
for output_file in self.plugin_state.files_by_type.values():
self.output_urns.append(output_file.urn)
response = super(CSVOutputPluginNoteRenderer, self).Layout(request,
response)
return self.CallJavascript(response, "CSVOutputPluginNoteRenderer.Layout")
class HuntResultsRenderer(semantic.RDFValueCollectionRenderer):
"""Displays a collection of hunt's results."""
layout_template = renderers.Template("""
{% for output_plugin_note in this.output_plugins_notes %}
{{output_plugin_note|safe}}
{% endfor %}
{% if this.exportable_results %}
<div id="generate_archive_{{unique|escape}}" class="well well-small">
<div class="export_tar pull-left">
Results of this hunt can be downloaded as an archive:
<div class="btn-group">
<button name="generate_tar" class="btn btn-default DownloadButton">
Generate TAR.GZ
</button>
<button class="btn btn-default dropdown-toggle" data-toggle="dropdown">
<span class="caret"></span>
</button>
<ul class="dropdown-menu">
<li><a name="generate_zip" href="#">Generate ZIP</a></li>
</ul>
</div>
</div>
<div class="export_zip pull-left">
Results of this hunt can be downloaded as an archive:
<div class="btn-group">
<button class="btn btn-default DownloadButton" name="generate_zip">
Generate ZIP
</button>
<button class="btn btn-default dropdown-toggle" data-toggle="dropdown">
<span class="caret"></span>
</button>
<ul class="dropdown-menu">
<li><a name="generate_tar" href="#">Generate TAR.GZ</a></li>
</ul>
</div>
</div>
<div class="pull-right">
<em>NOTE: generated archive will contain <strong>symlinks</strong>.<br/>
Unsure whether your archive utility supports them?<br/>
Just unpack the archive before browsing its contents.</em>
</div>
<div class="clearfix"></div>
</div>
<div id='generate_action_{{unique|escape}}'></div>
{% endif %}
""") + semantic.RDFValueCollectionRenderer.layout_template
error_template = renderers.Template("""
<p>This hunt hasn't stored any results yet.</p>
""")
context_help_url = "user_manual.html#_exporting_a_collection"
def Layout(self, request, response):
"""Layout the hunt results."""
hunt_id = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
hunt = aff4.FACTORY.Open(hunt_id, token=request.token)
metadata_urn = hunt.urn.Add("ResultsMetadata")
metadata = aff4.FACTORY.Create(
metadata_urn, aff4_type="HuntResultsMetadata", mode="r",
token=request.token)
output_plugins = metadata.Get(metadata.Schema.OUTPUT_PLUGINS)
self.output_plugins_notes = []
for _, (plugin_def, plugin_state) in output_plugins.iteritems():
plugin_name = plugin_def.plugin_name
for renderer_class in renderers.Renderer.classes.values():
if getattr(renderer_class, "for_output_plugin", None) == plugin_name:
renderer = renderer_class(plugin_def=plugin_def,
plugin_state=plugin_state)
self.output_plugins_notes.append(renderer.RawHTML(request))
export_view = renderers.CollectionExportView
self.exportable_results = export_view.IsCollectionExportable(
hunt.state.context.results_collection_urn,
token=request.token)
# In this renderer we show hunt results stored in the results collection.
response = super(HuntResultsRenderer, self).Layout(
request, response,
aff4_path=hunt.GetRunner().context.results_collection_urn)
return self.CallJavascript(response, "HuntResultsRenderer.Layout",
exportable_results=self.exportable_results,
hunt_id=hunt_id)
class HuntGenerateResultsArchive(renderers.TemplateRenderer):
layout_template = renderers.Template("""
<div class="alert alert-success">
<em>Generation has started. An email will be sent upon completion.</em>
</div>
""")
def Layout(self, request, response):
"""Start the flow to generate zip file."""
hunt_id = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
archive_format = utils.SmartStr(request.REQ.get("format"))
if (archive_format not in
rdfvalue.ExportHuntResultsFilesAsArchiveArgs.ArchiveFormat.enum_dict):
raise ValueError("Invalid format: %s.", format)
urn = flow.GRRFlow.StartFlow(flow_name="ExportHuntResultFilesAsArchive",
hunt_urn=hunt_id, format=archive_format,
token=request.token)
logging.info("Generating %s results for %s with flow %s.", format,
hunt_id, urn)
return super(HuntGenerateResultsArchive, self).Layout(request, response)
class HuntStatsRenderer(renderers.TemplateRenderer):
"""Display hunt's resources usage stats."""
layout_template = renderers.Template("""
<h3>Total number of clients: {{this.stats.user_cpu_stats.num|escape}}</h3>
<h3>User CPU</h3>
<dl class="dl-horizontal">
<dt>User CPU mean</dt>
<dd>{{this.stats.user_cpu_stats.mean|floatformat}}</dd>
<dt>User CPU stdev</dt>
<dd>{{this.stats.user_cpu_stats.std|floatformat}}</dd>
<dt>Clients Histogram</dt>
<dd class="histogram">
<div id="user_cpu_{{unique|escape}}"></div>
</dd>
</dl>
<h3>System CPU</h3>
<dl class="dl-horizontal">
<dt>System CPU mean</dt>
<dd>{{this.stats.system_cpu_stats.mean|floatformat}}</dd>
<dt>System CPU stdev</dt>
<dd>{{this.stats.system_cpu_stats.std|floatformat}}</dd>
<dt>Clients Histogram</dt>
<dd class="histogram">
<div id="system_cpu_{{unique|escape}}"></div>
</dd>
</dl>
<h3>Network bytes sent</h3>
<dl class="dl-horizontal">
<dt>Network bytes sent mean</dt>
<dd>{{this.stats.network_bytes_sent_stats.mean|floatformat}}</dd>
<dt>Network bytes sent stdev</dt>
<dd>{{this.stats.network_bytes_sent_stats.std|floatformat}}</dd>
<dt>Clients Hisogram</dt>
<dd class="histogram">
<div id="network_bytes_sent_{{unique|escape}}"></div>
</dd>
</dl>
<h3>Worst performers</h3>
<div class="row">
<div class="col-md-8">
<table id="performers_{{unique|escape}}"
class="table table-condensed table-striped table-bordered">
<thead>
<th>Client Id</th>
<th>User CPU</th>
<th>System CPU</th>
<th>Network bytes sent</th>
</thead>
<tbody>
{% for r in this.stats.worst_performers %}
<tr>
<td>{{r.client_html|safe}}</td>
<td>{{r.cpu_usage.user_cpu_time|floatformat}}</td>
<td>{{r.cpu_usage.system_cpu_time|floatformat}}</td>
<td>{{r.network_bytes_sent|escape}}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
""")
error_template = renderers.Template(
"No information available for this Hunt.")
def _HistogramToJSON(self, histogram):
hist_data = [(b.range_max_value, b.num) for b in histogram.bins]
return renderers.JsonDumpForScriptContext(hist_data)
def Layout(self, request, response):
"""Layout the HuntStatsRenderer data."""
hunt_id = request.REQ.get("hunt_id")
if hunt_id:
try:
hunt = aff4.FACTORY.Open(hunt_id,
aff4_type="GRRHunt",
token=request.token)
if hunt.state.Empty():
raise IOError("No valid state could be found.")
self.stats = hunt.state.context.usage_stats
for item in self.stats.worst_performers:
renderer = semantic.FindRendererForObject(item.client_id)
item.client_html = renderer.RawHTML()
self.user_cpu_json_data = self._HistogramToJSON(
self.stats.user_cpu_stats.histogram)
self.system_cpu_json_data = self._HistogramToJSON(
self.stats.user_cpu_stats.histogram)
self.network_bytes_sent_json_data = self._HistogramToJSON(
self.stats.network_bytes_sent_stats.histogram)
response = super(HuntStatsRenderer, self).Layout(request, response)
return self.CallJavascript(
response, "HuntStatsRenderer.Layout",
user_cpu_json_data=self.user_cpu_json_data,
system_cpu_json_data=self.system_cpu_json_data,
network_bytes_sent_json_data=self.network_bytes_sent_json_data)
except IOError:
self.layout_template = self.error_template
return super(HuntStatsRenderer, self).Layout(request, response)
class HuntCrashesRenderer(crash_view.ClientCrashCollectionRenderer):
"""View launched flows in a tree."""
def Layout(self, request, response):
hunt_id = request.REQ.get("hunt_id")
self.crashes_urn = rdfvalue.RDFURN(hunt_id).Add("crashes")
super(HuntCrashesRenderer, self).Layout(request, response)
class HuntOutstandingRenderer(renderers.TableRenderer):
"""A renderer that shows debug information for outstanding clients."""
post_parameters = ["hunt_id"]
def __init__(self, **kwargs):
super(HuntOutstandingRenderer, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("Client"))
self.AddColumn(semantic.RDFValueColumn("Flow"))
self.AddColumn(semantic.RDFValueColumn("Incomplete Request #"))
self.AddColumn(semantic.RDFValueColumn("State"))
self.AddColumn(semantic.RDFValueColumn("Args Expected"))
self.AddColumn(semantic.RDFValueColumn("Available Responses"))
self.AddColumn(semantic.RDFValueColumn("Status"))
self.AddColumn(semantic.RDFValueColumn("Expected Responses"))
self.AddColumn(semantic.RDFValueColumn("Client Requests Pending"))
def GetClientRequests(self, client_urns, token):
"""Returns all client requests for the given client urns."""
task_urns = [urn.Add("tasks") for urn in client_urns]
client_requests_raw = data_store.DB.MultiResolveRegex(task_urns, "task:.*",
token=token)
client_requests = {}
for client_urn, requests in client_requests_raw:
client_id = str(client_urn)[6:6+18]
client_requests.setdefault(client_id, [])
for _, serialized, _ in requests:
client_requests[client_id].append(rdfvalue.GrrMessage(serialized))
return client_requests
def GetAllSubflows(self, hunt_urn, client_urns, token):
"""Lists all subflows for a given hunt for all clients in client_urns."""
client_ids = [urn.Split()[0] for urn in client_urns]
client_bases = [hunt_urn.Add(client_id) for client_id in client_ids]
all_flows = []
act_flows = client_bases
while act_flows:
next_flows = []
for _, children in aff4.FACTORY.MultiListChildren(act_flows, token=token):
for flow_urn in children:
next_flows.append(flow_urn)
all_flows.extend(next_flows)
act_flows = next_flows
return all_flows
def GetFlowRequests(self, flow_urns, token):
"""Returns all outstanding requests for the flows in flow_urns."""
flow_requests = {}
flow_request_urns = [flow_urn.Add("state") for flow_urn in flow_urns]
for flow_urn, values in data_store.DB.MultiResolveRegex(
flow_request_urns, "flow:.*", token=token):
for subject, serialized, _ in values:
try:
if "status" in subject:
msg = rdfvalue.GrrMessage(serialized)
else:
msg = rdfvalue.RequestState(serialized)
except Exception as e: # pylint: disable=broad-except
logging.warn("Error while parsing: %s", e)
continue
flow_requests.setdefault(flow_urn, []).append(msg)
return flow_requests
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
hunt_id = request.REQ.get("hunt_id")
token = request.token
if hunt_id is None:
return
hunt_id = rdfvalue.RDFURN(hunt_id)
hunt = aff4.FACTORY.Open(hunt_id, aff4_type="GRRHunt", age=aff4.ALL_TIMES,
token=token)
clients_by_status = hunt.GetClientsByStatus()
outstanding = clients_by_status["OUTSTANDING"]
self.size = len(outstanding)
outstanding = sorted(outstanding)[start_row:end_row]
all_flow_urns = self.GetAllSubflows(hunt_id, outstanding, token)
flow_requests = self.GetFlowRequests(all_flow_urns, token)
try:
client_requests = self.GetClientRequests(outstanding, token)
except access_control.UnauthorizedAccess:
client_requests = None
waitingfor = {}
status_by_request = {}
for flow_urn in flow_requests:
for obj in flow_requests[flow_urn]:
if isinstance(obj, rdfvalue.RequestState):
waitingfor.setdefault(flow_urn, obj)
if waitingfor[flow_urn].id > obj.id:
waitingfor[flow_urn] = obj
elif isinstance(obj, rdfvalue.GrrMessage):
status_by_request.setdefault(flow_urn, {})[obj.request_id] = obj
response_urns = []
for request_base_urn, request in waitingfor.iteritems():
response_urns.append(rdfvalue.RDFURN(request_base_urn).Add(
"request:%08X" % request.id))
response_dict = dict(data_store.DB.MultiResolveRegex(
response_urns, "flow:.*", token=token))
row_index = start_row
for flow_urn in sorted(all_flow_urns):
request_urn = flow_urn.Add("state")
client_id = flow_urn.Split()[2]
try:
request_obj = waitingfor[request_urn]
response_urn = rdfvalue.RDFURN(request_urn).Add(
"request:%08X" % request_obj.id)
responses_available = len(response_dict.setdefault(response_urn, []))
status_available = "No"
responses_expected = "Unknown"
if request_obj.id in status_by_request.setdefault(request_urn, {}):
status_available = "Yes"
status = status_by_request[request_urn][request_obj.id]
responses_expected = status.response_id
if client_requests is None:
client_requests_available = "Must use raw access."
else:
client_requests_available = 0
for client_req in client_requests.setdefault(client_id, []):
if request_obj.request.session_id == client_req.session_id:
client_requests_available += 1
row_data = {
"Client": client_id,
"Flow": flow_urn,
"Incomplete Request #": request_obj.id,
"State": request_obj.next_state,
"Args Expected": request_obj.request.args_rdf_name,
"Available Responses": responses_available,
"Status": status_available,
"Expected Responses": responses_expected,
"Client Requests Pending": client_requests_available}
except KeyError:
row_data = {
"Client": client_id,
"Flow": flow_urn,
"Incomplete Request #": "No request found"}
self.AddRow(row_data, row_index=row_index)
row_index += 1
|
|
# Hub's borrow heavily from django-organization
from django.conf import settings
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from django_extensions.db.models import TimeStampedModel
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class HubQueryset(models.query.QuerySet):
def enabled(self):
return self.filter(is_enabled=True)
def disabled(self):
return self.filter(is_enabled=False)
def private(self):
return self.filter(is_private=True)
def public(self):
return self.filter(is_enabled=True, is_private=False)
def get_for_user(self, user):
return self.filter(users=user)
class HubManager(models.Manager):
def get_query_set(self):
return HubQueryset(self.model, using=self._db)
def get_for_user(self, user):
return self.get_query_set().get_for_user(user)
class PublicHubManager(HubManager):
"""
A more useful extension of the default manager which returns querysets
including only enabled and public hubs
"""
def get_query_set(self):
return super(
PublicHubManager, self).get_query_set().public()
class Hub(TimeStampedModel):
"""
Handles relations to different users and their shared tools
An hub can have multiple users but only one who can be designated
the owner user.
"""
name = models.CharField(
max_length=200,
help_text=_("The name of the hub"))
slug = AutoSlugField(
max_length=200, blank=False, editable=True, populate_from='name',
unique=True)
users = models.ManyToManyField(USER_MODEL, through="HubUser")
is_enabled = models.BooleanField(default=True)
is_private = models.BooleanField(default=False)
objects = HubManager()
public = PublicHubManager()
class Meta:
ordering = ['name']
verbose_name = _("hub")
verbose_name_plural = _("hubs")
def __unicode__(self):
return self.name
# TODO: add more permalinks to be used throughout the templates
@permalink
def get_absolute_url(self):
return ('hubs:detail', (), {'hub_slug': self.slug})
@permalink
def get_edit_url(self):
return ('hubs:edit', (), {'hub_slug': self.slug})
def add_user(self, user, is_admin=False):
"""
Adds a new user and if they are the first user makes the user an
admin and the owner. """
users_count = self.user_count
if users_count == 0:
is_admin = True
hub_user = HubUser.objects.create(
user=user, hub=self, is_admin=is_admin)
if users_count == 0:
HubOwner.objects.create(
hub=self, hub_user=hub_user)
return hub_user
def get_or_add_user(self, user, is_admin=False):
"""
Adds a new user to the hub, and if it's the first user makes
the user an admin and the owner. Uses the `get_or_create` method to
create or return the existing user.
`user` should be a user instance, e.g. `auth.User`.
Returns the same tuple as the `get_or_create` method, the
`HubUser` and a boolean value indicating whether the
HubUser was created or not.
"""
users_count = self.user_count
if users_count == 0:
is_admin = True
hub_user, created = HubUser.objects.get_or_create(
hub=self, user=user, defaults={'is_admin': is_admin})
if users_count == 0:
HubOwner.objects.create(
hub=self, hub_user=hub_user)
return hub_user, created
@property
def user_count(self):
return self.users.all().count()
def is_member(self, user):
return self.hub_users.filter(user=user).exists()
def is_admin(self, user):
return True if self.hub_users.filter(
user=user, is_admin=True) else False
class HubUser(TimeStampedModel):
"""
ManyToMany through field relating Users to Hub.
It is possible for a User to be a member of multiple hubs, so this
class relates the HubUser to the User model using a ForeignKey
relationship, rather than a OneToOne relationship.
Authentication and general user information is handled by the User class
and the contrib.auth application.
"""
user = models.ForeignKey(USER_MODEL, related_name="hub_users")
hub = models.ForeignKey(Hub, related_name="hub_users")
is_admin = models.BooleanField(default=False)
class Meta:
ordering = ['hub', 'user']
unique_together = ('user', 'hub')
verbose_name = _("hub user")
verbose_name_plural = _("hub users")
def __unicode__(self):
return u"%s (%s)" % (self.name if self.user.is_active else (
self.user.email), self.hub.name)
def delete(self, using=None):
"""
If the hub user is also the owner, this should not be deleted
unless it's part of a cascade from the Hub.
If there is no owner then the deletion should proceed.
"""
from .exceptions import OwnershipRequired
try:
if self.hub.owner.hub_user.id == self.id:
raise OwnershipRequired(_("Cannot delete hub owner before"
" hub or transferring ownership."))
except HubOwner.DoesNotExist:
pass
super(HubUser, self).delete(using=using)
@permalink
def get_absolute_url(self):
return ('hubs:user_detail', (),
{'hub_slug': self.hub.slug,
'user_username': self.user.username})
@property
def name(self):
if hasattr(self.user, 'get_full_name'):
name = self.user.get_full_name()
if name:
return name
return "%s" % self.user
class HubOwner(TimeStampedModel):
"""Each hub must have one and only one hub owner."""
hub = models.OneToOneField(Hub, related_name="owner")
hub_user = models.OneToOneField(
HubUser, related_name="owned_hub")
class Meta:
verbose_name = _("hub owner")
verbose_name_plural = _("hub owners")
def __unicode__(self):
return u"%s: %s" % (self.hub, self.hub_user)
def save(self, *args, **kwargs):
"""
Extends the default save method by verifying that the chosen
hub user is associated with the hub.
"""
from .exceptions import HubMismatch
if self.hub_user.hub != self.hub:
raise HubMismatch
else:
super(HubOwner, self).save(*args, **kwargs)
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from designate.central import rpcapi
from designate import context
from designate import plugin
from designate import rpc
from designate.worker import rpcapi as worker_rpcapi
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PeriodicTask(plugin.ExtensionPlugin):
"""Abstract Producer periodic task
"""
__plugin_ns__ = 'designate.producer_tasks'
__plugin_type__ = 'producer_task'
def __init__(self):
super(PeriodicTask, self).__init__()
self.my_partitions = None
@property
def central_api(self):
return rpcapi.CentralAPI.get_instance()
@property
def worker_api(self):
return worker_rpcapi.WorkerAPI.get_instance()
@property
def zone_api(self):
return self.worker_api
def on_partition_change(self, my_partitions, members, event):
"""Refresh partitions attribute
"""
self.my_partitions = my_partitions
def _my_range(self):
"""Returns first and last partitions
"""
return self.my_partitions[0], self.my_partitions[-1]
def _filter_between(self, col):
"""Generate BETWEEN filter based on _my_range
"""
return {col: "BETWEEN %s,%s" % self._my_range()}
def _iter(self, method, *args, **kwargs):
kwargs.setdefault("limit", CONF[self.name].per_page)
while True:
items = method(*args, **kwargs)
# Stop fetching if there's no more items
if len(items) == 0:
return
else:
kwargs["marker"] = items[-1].id
for i in items:
yield i
def _iter_zones(self, ctxt, criterion=None):
criterion = criterion or {}
criterion.update(self._filter_between('shard'))
return self._iter(self.central_api.find_zones, ctxt, criterion)
class DeletedZonePurgeTask(PeriodicTask):
"""Purge deleted zones that are exceeding the grace period time interval.
Deleted zones have values in the deleted_at column.
Purging means removing them from the database entirely.
"""
__plugin_name__ = 'zone_purge'
def __init__(self):
super(DeletedZonePurgeTask, self).__init__()
def __call__(self):
"""Call the Central API to perform a purge of deleted zones based on
expiration time and sharding range.
"""
pstart, pend = self._my_range()
LOG.info(
"Performing deleted zone purging for %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
delta = datetime.timedelta(seconds=CONF[self.name].time_threshold)
time_threshold = timeutils.utcnow() - delta
LOG.debug("Filtering deleted zones before %s", time_threshold)
criterion = self._filter_between('shard')
criterion['deleted'] = '!0'
criterion['deleted_at'] = "<=%s" % time_threshold
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
self.central_api.purge_zones(
ctxt,
criterion,
limit=CONF[self.name].batch_size,
)
class PeriodicExistsTask(PeriodicTask):
__plugin_name__ = 'periodic_exists'
def __init__(self):
super(PeriodicExistsTask, self).__init__()
self.notifier = rpc.get_notifier('producer')
@staticmethod
def _get_period(seconds):
interval = datetime.timedelta(seconds=seconds)
end = timeutils.utcnow()
return end - interval, end
def __call__(self):
pstart, pend = self._my_range()
LOG.info(
"Emitting zone exist events for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
start, end = self._get_period(CONF[self.name].interval)
extra_data = {
"audit_period_beginning": start,
"audit_period_ending": end
}
counter = 0
for zone in self._iter_zones(ctxt):
counter += 1
zone_data = zone.to_dict()
zone_data.update(extra_data)
self.notifier.info(ctxt, 'dns.domain.exists', zone_data)
self.notifier.info(ctxt, 'dns.zone.exists', zone_data)
LOG.info(
"Finished emitting %(counter)d events for shards "
"%(start)s to %(end)s",
{
"start": pstart,
"end": pend,
"counter": counter
})
class PeriodicSecondaryRefreshTask(PeriodicTask):
__plugin_name__ = 'periodic_secondary_refresh'
def __call__(self):
pstart, pend = self._my_range()
LOG.info(
"Refreshing zones for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
# each zone can have a different refresh / expire etc interval defined
# in the SOA at the source / master servers
criterion = {
"type": "SECONDARY"
}
for zone in self._iter_zones(ctxt, criterion):
# NOTE: If the zone isn't transferred yet, ignore it.
if zone.transferred_at is None:
continue
now = timeutils.utcnow(True)
transferred = timeutils.parse_isotime(zone.transferred_at)
seconds = timeutils.delta_seconds(transferred, now)
if seconds > zone.refresh:
msg = "Zone %(id)s has %(seconds)d seconds since last " \
"transfer, executing AXFR"
LOG.debug(msg, {"id": zone.id, "seconds": seconds})
self.central_api.xfr_zone(ctxt, zone.id)
class PeriodicGenerateDelayedNotifyTask(PeriodicTask):
"""Generate delayed NOTIFY transactions
Scan the database for zones with the delayed_notify flag set.
"""
__plugin_name__ = 'delayed_notify'
def __init__(self):
super(PeriodicGenerateDelayedNotifyTask, self).__init__()
def __call__(self):
"""Fetch a list of zones with the delayed_notify flag set up to
"batch_size"
Call Worker to emit NOTIFY transactions,
Reset the flag.
"""
pstart, pend = self._my_range()
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
# Select zones where "delayed_notify" is set and starting from the
# oldest "updated_at".
# There's an index on delayed_notify.
criterion = self._filter_between('shard')
criterion['delayed_notify'] = True
zones = self.central_api.find_zones(
ctxt,
criterion,
limit=CONF[self.name].batch_size,
sort_key='updated_at',
sort_dir='asc',
)
for zone in zones:
self.zone_api.update_zone(ctxt, zone)
zone.delayed_notify = False
self.central_api.update_zone(ctxt, zone)
LOG.debug(
'Performed delayed NOTIFY for %(id)s',
{
'id': zone.id,
}
)
class WorkerPeriodicRecovery(PeriodicTask):
__plugin_name__ = 'worker_periodic_recovery'
def __call__(self):
pstart, pend = self._my_range()
LOG.info(
"Recovering zones for shards %(start)s to %(end)s",
{
"start": pstart,
"end": pend
})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
self.worker_api.recover_shard(ctxt, pstart, pend)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
If TensorFlow is installed on your system through pip, the 'saved_model_cli'
binary can be invoked directly from command line.
At a high level, SavedModel CLI allows users to both inspect and execute
computations on a MetaGraphDef in a SavedModel. These are done through `show`
and `run` commands. Following is the usage of the two commands. SavedModel
CLI will also display these information with -h option.
'show' command usage: saved_model_cli show [-h] --dir DIR [--tag_set TAG_SET]
[--signature_def SIGNATURE_DEF_KEY]
Examples:
To show all available tag-sets in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model
To show all available SignatureDef keys in a MetaGraphDef specified by its
tag-set:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
For a MetaGraphDef with multiple tags in the tag-set, all tags must be passed
in, separated by ',':
$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu
To show all inputs and outputs TensorInfo for a specific SignatureDef specified
by the SignatureDef key in a MetaGraphDef:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
--signature_def serving_default
Example output:
The given SavedModel SignatureDef contains the following input(s):
inputs['input0'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
inputs['input1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
The given SavedModel SignatureDef contains the following output(s):
outputs['output'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
Method name is: tensorflow/serving/regress
To show all available information in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model --all
'run' command usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET
--signature_def SIGNATURE_DEF_KEY --inputs INPUTS
[--outdir OUTDIR] [--overwrite]
Examples:
To run input tensors from files through a MetaGraphDef and save the output
tensors to files:
$saved_model_cli run --dir /tmp/saved_model --tag_set serve
--signature_def serving_default --inputs x:0=/tmp/124.npz,x2=/tmp/123.npy
--outdir /tmp/out
To build this tool from source, run:
$bazel build tensorflow/python/tools:saved_model_cli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import warnings
import numpy as np
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import app
from tensorflow.python.saved_model import loader
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print(', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
"""
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
print('inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor)
print('The given SavedModel SignatureDef contains the following output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
print('outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor)
print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _print_tensor_info(tensor_info):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
"""
print(' dtype: ' + types_pb2.DataType.keys()[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
print(' shape: ' + shape)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
tag_set = ', '.join(tag_set)
print('\nMetaGraphDef with tag-set: \'' + tag_set +
'\' contains the following SignatureDefs:')
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Gets MetaGraphDef from SavedModel.
Returns the MetaGraphDef for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError('MetaGraphDef associated with tag-set ' + tag_set +
' could not be found in SavedModel')
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
Raises:
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(graph=ops_lib.Graph()) as sess:
loader.load(sess, tag_set.split(','), saved_model_dir)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_input_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and varaible name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by comma.
* If the command line arg for inputs is quoted and contains
whitespace(s), all whitespaces will be ignored.
* For each input key:
'input=filename<[variable_name]>'
* The "[variable_name]" key is optional. Will be set to None if not
specified.
Returns:
A dictionary that maps input keys to a tuple of file name and varaible name.
Raises:
RuntimeError: An error when the given input is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(',')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Remove quotes and whitespaces
input_raw = input_raw.replace('"', '').replace('\'', '').replace(' ', '')
# Format of input=filename[variable_name]'
match = re.match(r'^([\w\-]+)=([\w\-.\/]+)\[([\w\-]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = (match.group(2), match.group(3))
else:
# Format of input=filename'
match = re.match(r'^([\w\-]+)=([\w\-.\/]+)$', input_raw)
if match:
input_dict[match.group(1)] = (match.group(2), None)
else:
raise RuntimeError(
'Input \"%s\" format is incorrect. Please follow \"--inputs '
'input_key=file_name[variable_name]\" or input_key=file_name' %
input_raw)
return input_dict
def load_inputs_from_input_arg_string(inputs_str):
"""Parses input arg string and load inputs into a dictionary.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input1:0': ndarray_saved_as_variable_name1_in_filename1 ,
'input2:0': ndarray_saved_in_filename2}
, which maps input keys to a numpy ndarray loaded from file. See Args section
for more details on inputs format.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by comma.
* If the command line arg for inputs is quoted and contains
whitespace(s), all whitespaces will be ignored.
* For each input key:
'input=filename[variable_name]'
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
varaible_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
Returns:
A dictionary that maps input tensor keys to a numpy ndarray loaded from
file.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
for input_tensor_key, (
filename,
variable_name) in preprocess_input_arg_string(inputs_str).items():
# When a variable_name key is specified for the input file
if variable_name:
data = np.load(filename)
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
warnings.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
data = np.load(filename)
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signaure_def key is
# specified, display all SignatureDef keys, else show input output tensor
# infomation corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
"""
tensor_key_feed_dict = load_inputs_from_input_arg_string(args.inputs)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \',\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default\n\n'
'To show all available information in the SavedModel\n:'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all infomation in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
# run command
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default '
'--inputs x1=/tmp/124.npz[x],x2=/tmp/123.npy'
'--outdir=/out\n\n'
'For more information about input file format, please see:\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('inputs in the format of \'input_key=filename[variable_name]\', '
'separated by \',\'. Inputs can only be loaded from .npy, .npz or '
'pickle files.')
parser_run.add_argument('--inputs', type=str, required=True, help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.set_defaults(func=run)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Simple DOM for both SGML and XML documents.
"""
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import absolute_import
import sys
if sys.version_info[0] == 2:
STRING_TYPES = (basestring,)
else:
STRING_TYPES = (str,)
class Container:
def __init__(self):
self.children = []
def add(self, child):
child.parent = self
self.children.append(child)
def extend(self, children):
for child in children:
child.parent = self
self.children.append(child)
class Component:
def __init__(self):
self.parent = None
def index(self):
if self.parent:
return self.parent.children.index(self)
else:
return 0
def _line(self, file, line, column):
self.file = file
self.line = line
self.column = column
class DispatchError(Exception):
def __init__(self, scope, f):
msg = "no such attribute"
class Dispatcher:
def is_type(self, type):
cls = self
while cls is not None:
if cls.type == type:
return True
cls = cls.base
return False
def dispatch(self, f, attrs=""):
cls = self
while cls is not None:
if hasattr(f, cls.type):
return getattr(f, cls.type)(self)
else:
cls = cls.base
cls = self
while cls is not None:
if attrs:
sep = ", "
if cls.base is None:
sep += "or "
else:
sep = ""
attrs += "%s'%s'" % (sep, cls.type)
cls = cls.base
raise AttributeError("'%s' object has no attribute %s" %
(f.__class__.__name__, attrs))
class Node(Container, Component, Dispatcher):
type = "node"
base = None
def __init__(self):
Container.__init__(self)
Component.__init__(self)
self.query = Query([self])
def __getitem__(self, name):
for nd in self.query[name]:
return nd
def text(self):
from . import transforms
return self.dispatch(transforms.Text())
def tag(self, name, *attrs, **kwargs):
t = Tag(name, *attrs, **kwargs)
self.add(t)
return t
def data(self, s):
d = Data(s)
self.add(d)
return d
def entity(self, s):
e = Entity(s)
self.add(e)
return e
class Tree(Node):
type = "tree"
base = Node
class Tag(Node):
type = "tag"
base = Node
def __init__(self, _name, *attrs, **kwargs):
Node.__init__(self)
self.name = _name
self.attrs = list(attrs)
self.attrs.extend(kwargs.items())
self.singleton = False
def get_attr(self, name):
for k, v in self.attrs:
if name == k:
return v
def _idx(self, attr):
idx = 0
for k, v in self.attrs:
if k == attr:
return idx
idx += 1
return None
def set_attr(self, name, value):
idx = self._idx(name)
if idx is None:
self.attrs.append((name, value))
else:
self.attrs[idx] = (name, value)
def dispatch(self, f):
try:
attr = "do_" + self.name
method = getattr(f, attr)
except AttributeError:
return Dispatcher.dispatch(self, f, "'%s'" % attr)
return method(self)
class Leaf(Component, Dispatcher):
type = "leaf"
base = None
def __init__(self, data):
assert isinstance(data, STRING_TYPES)
self.data = data
class Data(Leaf):
type = "data"
base = Leaf
class Entity(Leaf):
type = "entity"
base = Leaf
class Character(Leaf):
type = "character"
base = Leaf
class Comment(Leaf):
type = "comment"
base = Leaf
###################
## Query Classes ##
###########################################################################
class Adder:
def __add__(self, other):
return Sum(self, other)
class Sum(Adder):
def __init__(self, left, right):
self.left = left
self.right = right
def __iter__(self):
for x in self.left:
yield x
for x in self.right:
yield x
class View(Adder):
def __init__(self, source):
self.source = source
class Filter(View):
def __init__(self, predicate, source):
View.__init__(self, source)
self.predicate = predicate
def __iter__(self):
for nd in self.source:
if self.predicate(nd):
yield nd
class Flatten(View):
def __iter__(self):
sources = [iter(self.source)]
while sources:
try:
nd = next(sources[-1])
if isinstance(nd, Tree):
sources.append(iter(nd.children))
else:
yield nd
except StopIteration:
sources.pop()
class Children(View):
def __iter__(self):
for nd in self.source:
for child in nd.children:
yield child
class Attributes(View):
def __iter__(self):
for nd in self.source:
for a in nd.attrs:
yield a
class Values(View):
def __iter__(self):
for name, value in self.source:
yield value
def flatten_path(path):
if isinstance(path, STRING_TYPES):
for part in path.split("/"):
yield part
elif callable(path):
yield path
else:
for p in path:
for fp in flatten_path(p):
yield fp
class Query(View):
def __iter__(self):
for nd in self.source:
yield nd
def __getitem__(self, path):
query = self.source
for p in flatten_path(path):
if callable(p):
select = Query
pred = p
source = query
elif isinstance(p, STRING_TYPES):
if p[0] == "@":
select = Values
pred = lambda x, n=p[1:]: x[0] == n
source = Attributes(query)
elif p[0] == "#":
select = Query
pred = lambda x, t=p[1:]: x.is_type(t)
source = Children(query)
else:
select = Query
def pred(x, n=p): return isinstance(x, Tag) and x.name == n
source = Flatten(Children(query))
else:
raise ValueError(p)
query = select(Filter(pred, source))
return query
|
|
"""
QuestHandler handles a character's quests.
"""
from django.conf import settings
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from evennia.utils import logger
from muddery.utils.builder import build_object
from muddery.utils.quest_dependency_handler import QUEST_DEP_HANDLER
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils.localized_strings_handler import _
from muddery.utils.exception import MudderyError
from muddery.utils.object_key_handler import OBJECT_KEY_HANDLER
from muddery.utils.game_settings import GAME_SETTINGS
class QuestHandler(object):
"""
Handles a character's quests.
"""
def __init__(self, owner):
"""
Initialize handler
"""
self.owner = owner
self.current_quests = owner.db.current_quests
self.completed_quests = owner.db.completed_quests
def accept(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key in self.current_quests:
return
# Create quest object.
new_quest = build_object(quest_key)
if not new_quest:
return
new_quest.set_owner(self.owner)
self.current_quests[quest_key] = new_quest
self.owner.msg({"msg": _("Accepted quest {c%s{n.") % new_quest.get_name()})
self.show_quests()
self.owner.show_location()
def give_up(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if not GAME_SETTINGS.get("can_give_up_quests"):
logger.log_tracemsg("Can not give up quests.")
raise MudderyError(_("Can not give up this quest."))
if quest_key not in self.current_quests:
raise MudderyError(_("Can not find this quest."))
del(self.current_quests[quest_key])
self.completed_quests.add(quest_key)
if quest_key in self.completed_quests:
self.completed_quests.remove(quest_key)
self.show_quests()
def complete(self, quest_key):
"""
Complete a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return
if not self.current_quests[quest_key].is_accomplished:
return
# Get quest's name.
name = self.current_quests[quest_key].get_name()
# Call complete function in the quest.
self.current_quests[quest_key].complete()
# Delete the quest.
del (self.current_quests[quest_key])
self.completed_quests.add(quest_key)
self.owner.msg({"msg": _("Completed quest {c%s{n.") % name})
self.show_quests()
self.owner.show_location()
def get_accomplished_quests(self):
"""
Get all quests that their objectives are accomplished.
"""
quests = set()
for quest in self.current_quests:
if self.current_quests[quest].is_accomplished():
quests.add(quest)
return quests
def is_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return self.current_quests[quest_key].is_accomplished()
def is_not_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return not self.current_quests[quest_key].is_accomplished()
def is_completed(self, quest_key):
"""
Whether the character completed this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.completed_quests
def is_in_progress(self, quest_key):
"""
If the character is doing this quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.current_quests
def can_provide(self, quest_key):
"""
If can provide this quest to the owner.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if self.is_completed(quest_key):
return False
if self.is_in_progress(quest_key):
return False
if not self.match_dependencies(quest_key):
return False
if not self.match_condition(quest_key):
return False
return True
def match_dependencies(self, quest_key):
"""
Check quest's dependencies
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
return QUEST_DEP_HANDLER.match_quest_dependencies(self.owner, quest_key)
def match_condition(self, quest_key):
"""
Check if the quest matches its condition.
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
# Get quest's record.
model_names = OBJECT_KEY_HANDLER.get_models(quest_key)
if not model_names:
return False
for model_name in model_names:
model_quest = apps.get_model(settings.WORLD_DATA_APP, model_name)
try:
record = model_quest.objects.get(key=quest_key)
return STATEMENT_HANDLER.match_condition(record.condition, self.owner, None)
except ObjectDoesNotExist:
continue
except AttributeError:
continue
return True
def show_quests(self):
"""
Send quests to player.
"""
quests = self.return_quests()
self.owner.msg({"quests": quests})
def return_quests(self):
"""
Get quests' data.
"""
quests = []
for quest in self.current_quests.values():
info = {"dbref": quest.dbref,
"name": quest.name,
"desc": quest.db.desc,
"objectives": quest.return_objectives(),
"accomplished": quest.is_accomplished()}
quests.append(info)
return quests
def at_objective(self, object_type, object_key, number=1):
"""
Called when the owner may complete some objectives.
Call relative hooks.
Args:
object_type: (type) objective's type
object_key: (string) object's key
number: (int) objective's number
Returns:
None
"""
status_changed = False
for quest in self.current_quests.values():
if quest.at_objective(object_type, object_key, number):
status_changed = True
if quest.is_accomplished():
self.owner.msg({"msg":
_("Quest {c%s{n's goals are accomplished.") % quest.name})
if status_changed:
self.show_quests()
|
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
import base64
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import keymgr
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db_driver=None):
super(BackupMetadataAPI, self).__init__(db_driver)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_LI("Value with type=%s is not serializable"),
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
value = keymgr.API().copy_key(self.context, value)
LOG.debug("Copying encryption key uuid for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup"), entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
"""
if not fields:
return metadata
subset = {}
for field in fields:
if field in metadata:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
metadata = self._filter(metadata, fields)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning(_LW("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed."))
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db_driver=None):
super(BackupDriver, self).__init__(db_driver)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db_driver)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume."""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup."""
return
@abc.abstractmethod
def delete(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export backup record.
Default backup driver implementation.
Serialize the backup record describing the backup into a string.
:param backup: backup entry to export
:returns backup_url - a string describing the backup record
"""
retval = jsonutils.dumps(backup)
if six.PY3:
retval = retval.encode('utf-8')
return base64.encodestring(retval)
def import_record(self, backup_url):
"""Import and verify backup record.
Default backup driver implementation.
De-serialize the backup record into a dictionary, so we can
update the database.
:param backup_url: driver specific backup record string
:returns dictionary object with database updates
"""
return jsonutils.loads(base64.decodestring(backup_url))
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises: InvalidBackup, NotImplementedError
"""
return
|
|
# -*- coding: utf-8 -*-
# MIT License (see LICENSE.txt or https://opensource.org/licenses/MIT)
"""Implements generic widevine functions used across architectures"""
from __future__ import absolute_import, division, unicode_literals
import os
from time import time
from .. import config
from ..kodiutils import addon_profile, exists, get_setting_int, listdir, localize, log, mkdirs, ok_dialog, open_file, set_setting, translate_path, yesno_dialog
from ..utils import arch, cmd_exists, hardlink, http_download, http_get, http_head, parse_version, remove_tree, run_cmd, store, system_os
from ..unicodes import compat_path, to_unicode
def install_cdm_from_backup(version):
"""Copies files from specified backup version to cdm dir"""
filenames = listdir(os.path.join(backup_path(), version))
for filename in filenames:
backup_fpath = os.path.join(backup_path(), version, filename)
install_fpath = os.path.join(ia_cdm_path(), filename)
hardlink(backup_fpath, install_fpath)
log(0, 'Installed CDM version {version} from backup', version=version)
set_setting('last_modified', time())
remove_old_backups(backup_path())
def widevine_eula():
"""Displays the Widevine EULA and prompts user to accept it."""
if 'x86' in arch():
cdm_version = latest_available_widevine_from_repo().get('version')
cdm_os = config.WIDEVINE_OS_MAP[system_os()]
cdm_arch = config.WIDEVINE_ARCH_MAP_X86[arch()]
else: # grab the license from the x86 files
log(0, 'Acquiring Widevine EULA from x86 files.')
cdm_version = latest_widevine_version(eula=True)
cdm_os = 'mac'
cdm_arch = 'x64'
url = config.WIDEVINE_DOWNLOAD_URL.format(version=cdm_version, os=cdm_os, arch=cdm_arch)
downloaded = http_download(url, message=localize(30025), background=True) # Acquiring EULA
if not downloaded:
return False
from zipfile import ZipFile
with ZipFile(compat_path(store('download_path'))) as archive:
with archive.open(config.WIDEVINE_LICENSE_FILE) as file_obj:
eula = file_obj.read().decode().strip().replace('\n', ' ')
return yesno_dialog(localize(30026), eula, nolabel=localize(30028), yeslabel=localize(30027)) # Widevine CDM EULA
def backup_path():
"""Return the path to the cdm backups"""
path = os.path.join(addon_profile(), 'backup', '')
if not exists(path):
mkdirs(path)
return path
def widevine_config_path():
"""Return the full path to the widevine or recovery config file"""
iacdm = ia_cdm_path()
if iacdm is None:
return None
if 'x86' in arch():
return os.path.join(iacdm, config.WIDEVINE_CONFIG_NAME)
return os.path.join(iacdm, 'config.json')
def load_widevine_config():
"""Load the widevine or recovery config in JSON format"""
from json import loads
if exists(widevine_config_path()):
with open_file(widevine_config_path(), 'r') as config_file:
return loads(config_file.read())
return None
def widevinecdm_path():
"""Get full Widevine CDM path"""
widevinecdm_filename = config.WIDEVINE_CDM_FILENAME[system_os()]
if widevinecdm_filename is None:
return None
if ia_cdm_path() is None:
return None
return os.path.join(ia_cdm_path(), widevinecdm_filename)
def has_widevinecdm():
"""Whether a Widevine CDM is installed on the system"""
if system_os() == 'Android': # Widevine CDM is built into Android
return True
widevinecdm = widevinecdm_path()
if widevinecdm is None:
return False
if not exists(widevinecdm):
log(3, 'Widevine CDM is not installed.')
return False
log(0, 'Found Widevine CDM at {path}', path=widevinecdm)
return True
def ia_cdm_path():
"""Return the specified CDM path for inputstream.adaptive, usually ~/.kodi/cdm"""
from xbmcaddon import Addon
try:
addon = Addon('inputstream.adaptive')
except RuntimeError:
return None
cdm_path = translate_path(os.path.join(to_unicode(addon.getSetting('DECRYPTERPATH')), ''))
if not exists(cdm_path):
mkdirs(cdm_path)
return cdm_path
def missing_widevine_libs():
"""Parses ldd output of libwidevinecdm.so and displays dialog if any depending libraries are missing."""
if system_os() != 'Linux': # this should only be needed for linux
return None
if cmd_exists('ldd'):
widevinecdm = widevinecdm_path()
if not os.access(widevinecdm, os.X_OK):
log(0, 'Changing {path} permissions to 744.', path=widevinecdm)
os.chmod(widevinecdm, 0o744)
missing_libs = []
cmd = ['ldd', widevinecdm]
output = run_cmd(cmd, sudo=False)
if output['success']:
for line in output['output'].splitlines():
if '=>' not in str(line):
continue
lib_path = str(line).strip().split('=>')
lib = lib_path[0].strip()
path = lib_path[1].strip()
if path == 'not found':
missing_libs.append(lib)
if missing_libs:
log(4, 'Widevine is missing the following libraries: {libs}', libs=missing_libs)
return missing_libs
log(0, 'There are no missing Widevine libraries! :-)')
return None
log(4, 'Failed to check for missing Widevine libraries.')
return None
def latest_widevine_version(eula=False):
"""Returns the latest available version of Widevine CDM/Chrome OS."""
if eula or 'x86' in arch():
url = config.WIDEVINE_VERSIONS_URL
versions = http_get(url)
return versions.split()[-1]
from .arm import chromeos_config, select_best_chromeos_image
devices = chromeos_config()
arm_device = select_best_chromeos_image(devices)
if arm_device is None:
log(4, 'We could not find an ARM device in the Chrome OS recovery.json')
ok_dialog(localize(30004), localize(30005))
return ''
return arm_device.get('version')
def latest_available_widevine_from_repo():
"""Returns the latest available Widevine CDM version and url from Google's library CDM repository"""
cdm_versions = http_get(config.WIDEVINE_VERSIONS_URL).strip('\n').split('\n')
cdm_os = config.WIDEVINE_OS_MAP[system_os()]
cdm_arch = config.WIDEVINE_ARCH_MAP_X86[arch()]
available_cdms = []
for cdm_version in cdm_versions:
cdm_url = config.WIDEVINE_DOWNLOAD_URL.format(version=cdm_version, os=cdm_os, arch=cdm_arch)
http_status = http_head(cdm_url)
if http_status == 200:
available_cdms.append(dict(version=cdm_version, url=cdm_url))
return available_cdms[-1]
def remove_old_backups(bpath):
"""Removes old Widevine backups, if number of allowed backups is exceeded"""
max_backups = get_setting_int('backups', 4)
versions = sorted([parse_version(version) for version in listdir(bpath)])
if len(versions) < 2:
return
installed_version = load_widevine_config()['version']
while len(versions) > max_backups + 1:
remove_version = str(versions[1] if versions[0] == parse_version(installed_version) else versions[0])
log(0, 'Removing oldest backup which is not installed: {version}', version=remove_version)
remove_tree(os.path.join(bpath, remove_version))
versions = sorted([parse_version(version) for version in listdir(bpath)])
return
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class P2SVpnServerConfigurationsOperations:
"""P2SVpnServerConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> "_models.P2SVpnServerConfiguration":
"""Retrieves the details of a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: P2SVpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs: Any
) -> "_models.P2SVpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(p2_s_vpn_server_configuration_parameters, 'P2SVpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs: Any
) -> AsyncLROPoller["_models.P2SVpnServerConfiguration"]:
"""Creates a P2SVpnServerConfiguration to associate with a VirtualWan if it doesn't exist else
updates the existing P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:param p2_s_vpn_server_configuration_parameters: Parameters supplied to create or Update a
P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_parameters: ~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either P2SVpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
p2_s_vpn_server_configuration_parameters=p2_s_vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
def list_by_virtual_wan(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListP2SVpnServerConfigurationsResult"]:
"""Retrieves all P2SVpnServerConfigurations for a particular VirtualWan.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListP2SVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.ListP2SVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListP2SVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_virtual_wan.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListP2SVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_virtual_wan.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations'} # type: ignore
|
|
from datetime import datetime
from hashlib import md5
from itertools import izip_longest
import json
import os
from urllib import urlencode
from StringIO import StringIO
from aludel.database import MetaData
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from twisted.web.client import Agent, FileBodyProducer, readBody
from twisted.web.http_headers import Headers
from twisted.web.server import Site
from unique_code_service.api import UniqueCodeServiceApp
from unique_code_service.models import UniqueCodePool
from .helpers import populate_pool, mk_audit_params, sorted_dicts
class ApiClient(object):
def __init__(self, base_url):
self._base_url = base_url
def _make_url(self, url_path):
return '%s/%s' % (self._base_url, url_path.lstrip('/'))
def _make_call(self, method, url_path, headers, body, expected_code):
agent = Agent(reactor)
url = self._make_url(url_path)
d = agent.request(method, url, headers, body)
return d.addCallback(self._get_response_body, expected_code)
def _get_response_body(self, response, expected_code):
assert response.code == expected_code
return readBody(response).addCallback(json.loads)
def get(self, url_path, params, expected_code):
url_path = '?'.join([url_path, urlencode(params)])
return self._make_call('GET', url_path, None, None, expected_code)
def put(self, url_path, headers, content, expected_code=200):
body = FileBodyProducer(StringIO(content))
return self._make_call('PUT', url_path, headers, body, expected_code)
def put_json(self, url_path, params, expected_code=200):
headers = Headers({'Content-Type': ['application/json']})
return self.put(
url_path, headers, json.dumps(params), expected_code)
def put_redeem(self, request_id, unique_code, expected_code=200):
params = mk_audit_params(request_id)
params.update({
'unique_code': unique_code,
})
params.pop('request_id')
url_path = 'testpool/redeem/%s' % (request_id,)
return self.put_json(url_path, params, expected_code)
def put_create(self, expected_code=201):
url_path = 'testpool'
return self.put(url_path, Headers({}), None, expected_code)
def put_import(self, request_id, content, content_md5=None,
expected_code=201):
url_path = 'testpool/import/%s' % (request_id,)
hdict = {
'Content-Type': ['text/csv'],
}
if content_md5 is None:
content_md5 = md5(content).hexdigest()
if content_md5:
hdict['Content-MD5'] = [content_md5]
return self.put(url_path, Headers(hdict), content, expected_code)
def get_audit_query(self, request_id, field, value, expected_code=200):
params = {'request_id': request_id, 'field': field, 'value': value}
return self.get('testpool/audit_query', params, expected_code)
def get_unique_code_counts(self, request_id, expected_code=200):
params = {'request_id': request_id}
return self.get('testpool/unique_code_counts', params, expected_code)
class TestUniqueCodeServiceApp(TestCase):
timeout = 5
@inlineCallbacks
def setUp(self):
# We need to make sure all our queries run in the same thread,
# otherwise sqlite gets very sad.
reactor.suggestThreadPoolSize(1)
connection_string = os.environ.get(
"ALUDEL_TEST_CONNECTION_STRING", "sqlite://")
self._using_mysql = connection_string.startswith('mysql')
self.asapp = UniqueCodeServiceApp(connection_string, reactor=reactor)
site = Site(self.asapp.app.resource())
self.listener = reactor.listenTCP(0, site, interface='localhost')
self.listener_port = self.listener.getHost().port
self._drop_tables()
self.conn = yield self.asapp.engine.connect()
self.pool = UniqueCodePool('testpool', self.conn)
self.client = ApiClient('http://localhost:%s' % (self.listener_port,))
@inlineCallbacks
def tearDown(self):
yield self.conn.close()
self._drop_tables()
yield self.listener.loseConnection()
def _drop_tables(self):
# NOTE: This is a blocking operation!
md = MetaData(bind=self.asapp.engine._engine)
md.reflect()
md.drop_all()
assert self.asapp.engine._engine.table_names() == []
@inlineCallbacks
def assert_unique_code_counts(self, expected_rows):
rows = yield self.pool.count_unique_codes()
assert sorted(tuple(r) for r in rows) == sorted(expected_rows)
@inlineCallbacks
def test_request_missing_params(self):
params = mk_audit_params('req-0')
params.pop('request_id')
rsp = yield self.client.put_json(
'testpool/redeem/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': "Missing request parameters: 'unique_code'",
}
@inlineCallbacks
def test_request_missing_audit_params(self):
params = {'unique_code': 'vanilla0'}
rsp = yield self.client.put_json(
'testpool/redeem/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': (
"Missing request parameters: 'transaction_id', 'user_id'"),
}
@inlineCallbacks
def test_request_extra_params(self):
params = mk_audit_params('req-0')
params.pop('request_id')
params.update({
'unique_code': 'vanilla0',
'foo': 'bar',
})
rsp = yield self.client.put_json(
'testpool/redeem/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': "Unexpected request parameters: 'foo'",
}
@inlineCallbacks
def test_redeem_missing_pool(self):
rsp = yield self.client.put_redeem(
'req-0', 'vanilla0', expected_code=404)
assert rsp == {
'request_id': 'req-0',
'error': 'Unique code pool does not exist.',
}
@inlineCallbacks
def test_issue_response_contains_request_id(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['vanilla'], [0])
rsp0 = yield self.client.put_redeem('req-0', 'vanilla0')
assert rsp0['request_id'] == 'req-0'
@inlineCallbacks
def test_redeem(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['vanilla', 'chocolate'], [0, 1])
rsp0 = yield self.client.put_redeem('req-0', 'vanilla0')
assert rsp0 == {
'request_id': 'req-0',
'unique_code': 'vanilla0',
'flavour': 'vanilla',
}
rsp1 = yield self.client.put_redeem('req-1', 'chocolate1')
assert rsp1 == {
'request_id': 'req-1',
'unique_code': 'chocolate1',
'flavour': 'chocolate',
}
@inlineCallbacks
def test_redeem_idempotent(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['vanilla'], [0])
rsp0 = yield self.client.put_redeem('req-0', 'vanilla0')
assert rsp0 == {
'request_id': 'req-0',
'unique_code': 'vanilla0',
'flavour': 'vanilla',
}
rsp1 = yield self.client.put_redeem('req-0', 'vanilla0')
assert rsp1 == {
'request_id': 'req-0',
'unique_code': 'vanilla0',
'flavour': 'vanilla',
}
rsp2 = yield self.client.put_redeem('req-1', 'vanilla0')
assert rsp2 == {
'request_id': 'req-1',
'error': 'Cannot redeem unique code: used',
}
rsp3 = yield self.client.put_redeem(
'req-0', 'VaNiLlA0', expected_code=400)
assert rsp3 == {
'request_id': 'req-0',
'error': (
'This request has already been performed with different'
' parameters.'),
}
@inlineCallbacks
def test_redeem_invalid_unique_code(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['vanilla'], [0])
rsp = yield self.client.put_redeem('req-0', 'chocolate7')
assert rsp == {
'request_id': 'req-0',
'error': 'Cannot redeem unique code: invalid',
}
@inlineCallbacks
def test_redeem_used_unique_code(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['vanilla'], [0])
yield self.client.put_redeem('req-0', 'vanilla0')
rsp = yield self.client.put_redeem('req-1', 'vanilla0')
assert rsp == {
'request_id': 'req-1',
'error': 'Cannot redeem unique code: used',
}
def _assert_audit_entries(self, request_id, response, expected_entries):
def created_ats():
format_str = '%Y-%m-%dT%H:%M:%S.%f'
if self._using_mysql:
format_str = format_str.replace('.%f', '')
for result in response['results']:
yield datetime.strptime(
result['created_at'], format_str).isoformat()
expected_results = [{
'request_id': entry['audit_params']['request_id'],
'transaction_id': entry['audit_params']['transaction_id'],
'user_id': entry['audit_params']['user_id'],
'request_data': entry['request_data'],
'response_data': entry['response_data'],
'error': entry['error'],
'created_at': created_at,
} for entry, created_at in izip_longest(
expected_entries, created_ats())]
assert response == {
'request_id': request_id,
'results': expected_results,
}
@inlineCallbacks
def test_query_bad_field(self):
yield self.pool.create_tables()
rsp = yield self.client.get_audit_query(
'audit-0', 'foo', 'req-0', expected_code=400)
assert rsp == {
'request_id': 'audit-0',
'error': 'Invalid audit field.',
}
@inlineCallbacks
def test_query_by_request_id(self):
yield self.pool.create_tables()
audit_params = mk_audit_params('req-0')
rsp = yield self.client.get_audit_query(
'audit-0', 'request_id', 'req-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params, 'req_data', 'resp_data', 'vanilla0')
rsp = yield self.client.get_audit_query(
'audit-1', 'request_id', 'req-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params,
'request_data': u'req_data',
'response_data': u'resp_data',
'error': False,
}])
@inlineCallbacks
def test_query_by_transaction_id(self):
yield self.pool.create_tables()
audit_params_0 = mk_audit_params('req-0', 'transaction-0')
audit_params_1 = mk_audit_params('req-1', 'transaction-0')
rsp = yield self.client.get_audit_query(
'audit-0', 'transaction_id', 'transaction-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params_0, 'req_data_0', 'resp_data_0', 'vanilla0')
yield self.pool._audit_request(
audit_params_1, 'req_data_1', 'resp_data_1', 'vanilla0')
rsp = yield self.client.get_audit_query(
'audit-1', 'transaction_id', 'transaction-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params_0,
'request_data': u'req_data_0',
'response_data': u'resp_data_0',
'error': False,
}, {
'audit_params': audit_params_1,
'request_data': u'req_data_1',
'response_data': u'resp_data_1',
'error': False,
}])
@inlineCallbacks
def test_query_by_user_id(self):
yield self.pool.create_tables()
audit_params_0 = mk_audit_params('req-0', 'transaction-0', 'user-0')
audit_params_1 = mk_audit_params('req-1', 'transaction-1', 'user-0')
rsp = yield self.client.get_audit_query('audit-0', 'user_id', 'user-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params_0, 'req_data_0', 'resp_data_0', 'vanilla0')
yield self.pool._audit_request(
audit_params_1, 'req_data_1', 'resp_data_1', 'vanilla0')
rsp = yield self.client.get_audit_query('audit-1', 'user_id', 'user-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params_0,
'request_data': u'req_data_0',
'response_data': u'resp_data_0',
'error': False,
}, {
'audit_params': audit_params_1,
'request_data': u'req_data_1',
'response_data': u'resp_data_1',
'error': False,
}])
@inlineCallbacks
def test_query_by_unique_code(self):
yield self.pool.create_tables()
audit_params_0 = mk_audit_params('req-0', 'transaction-0', 'user-0')
audit_params_1 = mk_audit_params('req-1', 'transaction-1', 'user-0')
rsp = yield self.client.get_audit_query('audit-0', 'user_id', 'user-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params_0, 'req_data_0', 'resp_data_0', 'vanilla0')
yield self.pool._audit_request(
audit_params_1, 'req_data_1', 'resp_data_1', 'vanilla0')
rsp = yield self.client.get_audit_query(
'audit-1', 'unique_code', 'vanilla0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params_0,
'request_data': u'req_data_0',
'response_data': u'resp_data_0',
'error': False,
}, {
'audit_params': audit_params_1,
'request_data': u'req_data_1',
'response_data': u'resp_data_1',
'error': False,
}])
@inlineCallbacks
def test_create(self):
resp = yield self.client.put_create()
assert resp == {
'request_id': None,
'created': True,
}
# Recreating a pool has a different response.
resp = yield self.client.put_create(expected_code=200)
assert resp == {
'request_id': None,
'created': False,
}
@inlineCallbacks
def test_import(self):
yield self.pool.create_tables()
yield self.assert_unique_code_counts([])
content = '\n'.join([
'unique_code,flavour',
'vanilla0,vanilla',
'vanilla1,vanilla',
'chocolate0,chocolate',
'chocolate1,chocolate',
])
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_unique_code_counts([
('vanilla', False, 2),
('chocolate', False, 2),
])
@inlineCallbacks
def test_import_missing_pool(self):
content = '\n'.join([
'unique_code,flavour',
'vanilla0,vanilla',
'vanilla1,vanilla',
'chocolate0,chocolate',
'chocolate1,chocolate',
])
rsp = yield self.client.put_import('req-0', content, expected_code=404)
assert rsp == {
'request_id': 'req-0',
'error': 'Unique code pool does not exist.',
}
@inlineCallbacks
def test_import_heading_case_mismatch(self):
yield self.pool.create_tables()
yield self.assert_unique_code_counts([])
content = '\n'.join([
'Unique_cOdE,fLavoUr',
'vanilla0,vanilla',
'vanilla1,vanilla',
'chocolate0,chocolate',
'chocolate1,chocolate',
])
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_unique_code_counts([
('vanilla', False, 2),
('chocolate', False, 2),
])
@inlineCallbacks
def test_import_no_content_md5(self):
yield self.pool.create_tables()
resp = yield self.client.put_import(
'req-0', 'content', '', expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': 'Missing Content-MD5 header.',
}
@inlineCallbacks
def test_import_bad_content_md5(self):
yield self.pool.create_tables()
resp = yield self.client.put_import(
'req-0', 'content', 'badmd5', expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': 'Content-MD5 header does not match content.',
}
@inlineCallbacks
def test_import_idempotent(self):
yield self.pool.create_tables()
yield self.assert_unique_code_counts([])
content = '\n'.join([
'unique_code,flavour',
'vanilla0,vanilla',
'vanilla1,vanilla',
'chocolate0,chocolate',
'chocolate1,chocolate',
])
expected_counts = [
('vanilla', False, 2),
('chocolate', False, 2),
]
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_unique_code_counts(expected_counts)
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_unique_code_counts(expected_counts)
content_2 = '\n'.join([
'unique_code,flavour',
'vanilla6,vanilla',
'vanilla7,vanilla',
'chocolate8,chocolate',
'chocolate9,chocolate',
])
resp = yield self.client.put_import(
'req-0', content_2, expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': (
'This request has already been performed with different'
' parameters.'),
}
yield self.assert_unique_code_counts(expected_counts)
@inlineCallbacks
def test_unique_code_counts(self):
yield self.pool.create_tables()
rsp0 = yield self.client.get_unique_code_counts('req-0')
assert rsp0 == {
'request_id': 'req-0',
'unique_code_counts': [],
}
yield populate_pool(self.pool, ['vanilla'], [0, 1])
rsp1 = yield self.client.get_unique_code_counts('req-1')
assert rsp1 == {
'request_id': 'req-1',
'unique_code_counts': [
{
'flavour': 'vanilla',
'used': False,
'count': 2,
},
],
}
yield populate_pool(self.pool, ['chocolate'], [0, 1])
yield self.pool.redeem_unique_code(
'chocolate0', mk_audit_params('req-0'))
rsp2 = yield self.client.get_unique_code_counts('req-2')
assert sorted_dicts(rsp2['unique_code_counts']) == sorted_dicts([
{
'flavour': 'vanilla',
'used': False,
'count': 2,
},
{
'flavour': 'chocolate',
'used': False,
'count': 1,
},
{
'flavour': 'chocolate',
'used': True,
'count': 1,
},
])
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.extensions import portsecurity as psec
from neutron.i18n import _LI
LOG = logging.getLogger(__name__)
SG_CHAIN = 'sg-chain'
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
SPOOF_FILTER = 'spoof-filter'
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
EGRESS_DIRECTION: 'o',
SPOOF_FILTER: 's'}
DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
'egress': 'dest_ip_prefix'}
IPSET_DIRECTION = {INGRESS_DIRECTION: 'src',
EGRESS_DIRECTION: 'dst'}
LINUX_DEV_LEN = 14
comment_rule = iptables_manager.comment_rule
class IptablesFirewallDriver(firewall.FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out',
EGRESS_DIRECTION: 'physdev-in'}
def __init__(self, namespace=None):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=ipv6_utils.is_enabled(),
namespace=namespace)
# TODO(majopela, shihanzhang): refactor out ipset to a separate
# driver composed over this one
self.ipset = ipset_manager.IpsetManager(namespace=namespace)
# list of port which has security group
self.filtered_ports = {}
self.unfiltered_ports = {}
self._add_fallback_chain_v4v6()
self._defer_apply = False
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None
# List of security group rules for ports residing on this host
self.sg_rules = {}
self.pre_sg_rules = None
# List of security group member ips for ports residing on this host
self.sg_members = collections.defaultdict(
lambda: collections.defaultdict(list))
self.pre_sg_members = None
self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset
self._enabled_netfilter_for_bridges = False
def _enable_netfilter_for_bridges(self):
# we only need to set these values once, but it has to be when
# we create a bridge; before that the bridge module might not
# be loaded and the proc values aren't there.
if self._enabled_netfilter_for_bridges:
return
else:
self._enabled_netfilter_for_bridges = True
# These proc values ensure that netfilter is enabled on
# bridges; essential for enforcing security groups rules with
# OVS Hybrid. Distributions can differ on whether this is
# enabled by default or not (Ubuntu - yes, Redhat - no, for
# example).
LOG.debug("Enabling netfilter for bridges")
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-arptables=1'],
run_as_root=True)
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-ip6tables=1'],
run_as_root=True)
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-iptables=1'],
run_as_root=True)
@property
def ports(self):
return dict(self.filtered_ports, **self.unfiltered_ports)
def update_security_group_rules(self, sg_id, sg_rules):
LOG.debug("Update rules of security group (%s)", sg_id)
self.sg_rules[sg_id] = sg_rules
def update_security_group_members(self, sg_id, sg_members):
LOG.debug("Update members of security group (%s)", sg_id)
self.sg_members[sg_id] = collections.defaultdict(list, sg_members)
def _ps_enabled(self, port):
return port.get(psec.PORTSECURITY, True)
def _set_ports(self, port):
if not self._ps_enabled(port):
self.unfiltered_ports[port['device']] = port
self.filtered_ports.pop(port['device'], None)
else:
self.filtered_ports[port['device']] = port
self.unfiltered_ports.pop(port['device'], None)
def _unset_ports(self, port):
self.unfiltered_ports.pop(port['device'], None)
self.filtered_ports.pop(port['device'], None)
def prepare_port_filter(self, port):
LOG.debug("Preparing device (%s) filter", port['device'])
self._remove_chains()
self._set_ports(port)
self._enable_netfilter_for_bridges()
# each security group has it own chains
self._setup_chains()
self.iptables.apply()
def update_port_filter(self, port):
LOG.debug("Updating device (%s) filter", port['device'])
if port['device'] not in self.ports:
LOG.info(_LI('Attempted to update port filter which is not '
'filtered %s'), port['device'])
return
self._remove_chains()
self._set_ports(port)
self._setup_chains()
self.iptables.apply()
def remove_port_filter(self, port):
LOG.debug("Removing device (%s) filter", port['device'])
if port['device'] not in self.ports:
LOG.info(_LI('Attempted to remove port filter which is not '
'filtered %r'), port)
return
self._remove_chains()
self._unset_ports(port)
self._setup_chains()
self.iptables.apply()
def _add_accept_rule_port_sec(self, port, direction):
self._update_port_sec_rules(port, direction, add=True)
def _remove_rule_port_sec(self, port, direction):
self._update_port_sec_rules(port, direction, add=False)
def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].remove_rule(chain_name, rule)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].remove_rule(chain_name, rule)
def _setup_chains(self):
"""Setup ingress and egress chain for a port."""
if not self._defer_apply:
self._setup_chains_apply(self.filtered_ports,
self.unfiltered_ports)
def _setup_chains_apply(self, ports, unfiltered_ports):
self._add_chain_by_name_v4v6(SG_CHAIN)
for port in ports.values():
self._setup_chain(port, INGRESS_DIRECTION)
self._setup_chain(port, EGRESS_DIRECTION)
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
for port in unfiltered_ports.values():
self._add_accept_rule_port_sec(port, INGRESS_DIRECTION)
self._add_accept_rule_port_sec(port, EGRESS_DIRECTION)
def _remove_chains(self):
"""Remove ingress and egress chain for a port."""
if not self._defer_apply:
self._remove_chains_apply(self.filtered_ports,
self.unfiltered_ports)
def _remove_chains_apply(self, ports, unfiltered_ports):
for port in ports.values():
self._remove_chain(port, INGRESS_DIRECTION)
self._remove_chain(port, EGRESS_DIRECTION)
self._remove_chain(port, SPOOF_FILTER)
for port in unfiltered_ports.values():
self._remove_rule_port_sec(port, INGRESS_DIRECTION)
self._remove_rule_port_sec(port, EGRESS_DIRECTION)
self._remove_chain_by_name_v4v6(SG_CHAIN)
def _setup_chain(self, port, DIRECTION):
self._add_chain(port, DIRECTION)
self._add_rules_by_security_group(port, DIRECTION)
def _remove_chain(self, port, DIRECTION):
chain_name = self._port_chain_name(port, DIRECTION)
self._remove_chain_by_name_v4v6(chain_name)
def _add_fallback_chain_v4v6(self):
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP)
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP)
def _add_raw_chain(self, chain_name):
self.iptables.ipv4['raw'].add_chain(chain_name)
self.iptables.ipv6['raw'].add_chain(chain_name)
def _add_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].add_chain(chain_name)
self.iptables.ipv6['filter'].add_chain(chain_name)
def _remove_raw_chain(self, chain_name):
self.iptables.ipv4['raw'].remove_chain(chain_name)
self.iptables.ipv6['raw'].remove_chain(chain_name)
def _remove_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].remove_chain(chain_name)
self.iptables.ipv6['filter'].remove_chain(chain_name)
def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules,
comment=None):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule,
comment=comment)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule,
comment=comment)
def _get_device_name(self, port):
return port['device']
def _update_port_sec_rules(self, port, direction, add=False):
# add/remove rules in FORWARD and INPUT chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
device)]
if add:
self._add_rules_to_chain_v4v6(
'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT)
else:
self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule)
if direction == EGRESS_DIRECTION:
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
device)]
if add:
self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
comment=ic.PORT_SEC_ACCEPT)
else:
self._remove_rule_from_chain_v4v6(
'INPUT', jump_rule, jump_rule)
def _add_chain(self, port, direction):
chain_name = self._port_chain_name(port, direction)
self._add_chain_by_name_v4v6(chain_name)
# Note(nati) jump to the security group chain (SG_CHAIN)
# This is needed because the packet may much two rule in port
# if the two port is in the same host
# We accept the packet at the end of SG_CHAIN.
# jump to the security group chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
SG_CHAIN)]
self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule,
comment=ic.VM_INT_SG)
# jump to the chain based on the device
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
chain_name)]
self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule,
comment=ic.SG_TO_VM_SG)
if direction == EGRESS_DIRECTION:
self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
comment=ic.INPUT_TO_SG)
def _split_sgr_by_ethertype(self, security_group_rules):
ipv4_sg_rules = []
ipv6_sg_rules = []
for rule in security_group_rules:
if rule.get('ethertype') == constants.IPv4:
ipv4_sg_rules.append(rule)
elif rule.get('ethertype') == constants.IPv6:
if rule.get('protocol') == 'icmp':
rule['protocol'] = 'icmpv6'
ipv6_sg_rules.append(rule)
return ipv4_sg_rules, ipv6_sg_rules
def _select_sgr_by_direction(self, port, direction):
return [rule
for rule in port.get('security_group_rules', [])
if rule['direction'] == direction]
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
if mac_ip_pairs:
chain_name = self._port_chain_name(port, SPOOF_FILTER)
table.add_chain(chain_name)
for mac, ip in mac_ip_pairs:
if ip is None:
# If fixed_ips is [] this rule will be added to the end
# of the list after the allowed_address_pair rules.
table.add_rule(chain_name,
'-m mac --mac-source %s -j RETURN'
% mac.upper(), comment=ic.PAIR_ALLOW)
else:
table.add_rule(chain_name,
'-s %s -m mac --mac-source %s -j RETURN'
% (ip, mac.upper()), comment=ic.PAIR_ALLOW)
table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP)
rules.append('-j $%s' % chain_name)
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
mac_ipv6_pairs):
mac = str(netaddr.EUI(mac, dialect=netaddr.mac_unix))
if netaddr.IPNetwork(ip_address).version == 4:
mac_ipv4_pairs.append((mac, ip_address))
else:
mac_ipv6_pairs.append((mac, ip_address))
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
# Allow dhcp client packets
ipv4_rules += [comment_rule('-p udp -m udp --sport 68 --dport 67 '
'-j RETURN', comment=ic.DHCP_CLIENT)]
# Drop Router Advts from the port.
ipv6_rules += [comment_rule('-p icmpv6 --icmpv6-type %s '
'-j DROP' % constants.ICMPV6_TYPE_RA,
comment=ic.IPV6_RA_DROP)]
ipv6_rules += [comment_rule('-p icmpv6 -j RETURN',
comment=ic.IPV6_ICMP_ALLOW)]
ipv6_rules += [comment_rule('-p udp -m udp --sport 546 --dport 547 '
'-j RETURN', comment=None)]
mac_ipv4_pairs = []
mac_ipv6_pairs = []
if isinstance(port.get('allowed_address_pairs'), list):
for address_pair in port['allowed_address_pairs']:
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
address_pair['ip_address'],
mac_ipv4_pairs,
mac_ipv6_pairs)
for ip in port['fixed_ips']:
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
mac_ipv4_pairs, mac_ipv6_pairs)
if not port['fixed_ips']:
mac_ipv4_pairs.append((port['mac_address'], None))
mac_ipv6_pairs.append((port['mac_address'], None))
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
mac_ipv4_pairs, ipv4_rules)
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
mac_ipv6_pairs, ipv6_rules)
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
#Note(nati) Drop dhcp packet from VM
ipv4_rules += [comment_rule('-p udp -m udp --sport 67 --dport 68 '
'-j DROP', comment=ic.DHCP_SPOOF)]
ipv6_rules += [comment_rule('-p udp -m udp --sport 547 --dport 546 '
'-j DROP', comment=None)]
def _accept_inbound_icmpv6(self):
# Allow multicast listener, neighbor solicitation and
# neighbor advertisement into the instance
icmpv6_rules = []
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type]
return icmpv6_rules
def _select_sg_rules_for_port(self, port, direction):
"""Select rules from the security groups the port is member of."""
port_sg_ids = port.get('security_groups', [])
port_rules = []
for sg_id in port_sg_ids:
for rule in self.sg_rules.get(sg_id, []):
if rule['direction'] == direction:
if self.enable_ipset:
port_rules.append(rule)
else:
port_rules.extend(
self._expand_sg_rule_with_remote_ips(
rule, port, direction))
return port_rules
def _expand_sg_rule_with_remote_ips(self, rule, port, direction):
"""Expand a remote group rule to rule per remote group IP."""
remote_group_id = rule.get('remote_group_id')
if remote_group_id:
ethertype = rule['ethertype']
port_ips = port.get('fixed_ips', [])
for ip in self.sg_members[remote_group_id][ethertype]:
if ip not in port_ips:
ip_rule = rule.copy()
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
ip_prefix = str(netaddr.IPNetwork(ip).cidr)
ip_rule[direction_ip_prefix] = ip_prefix
yield ip_rule
else:
yield rule
def _get_remote_sg_ids(self, port, direction=None):
sg_ids = port.get('security_groups', [])
remote_sg_ids = {constants.IPv4: [], constants.IPv6: []}
for sg_id in sg_ids:
for rule in self.sg_rules.get(sg_id, []):
if not direction or rule['direction'] == direction:
remote_sg_id = rule.get('remote_group_id')
ether_type = rule.get('ethertype')
if remote_sg_id and ether_type:
remote_sg_ids[ether_type].append(remote_sg_id)
return remote_sg_ids
def _add_rules_by_security_group(self, port, direction):
# select rules for current port and direction
security_group_rules = self._select_sgr_by_direction(port, direction)
security_group_rules += self._select_sg_rules_for_port(port, direction)
# make sure ipset members are updated for remote security groups
if self.enable_ipset:
remote_sg_ids = self._get_remote_sg_ids(port, direction)
self._update_ipset_members(remote_sg_ids)
# split groups by ip version
# for ipv4, iptables command is used
# for ipv6, iptables6 command is used
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
security_group_rules)
ipv4_iptables_rules = []
ipv6_iptables_rules = []
# include fixed egress/ingress rules
if direction == EGRESS_DIRECTION:
self._add_fixed_egress_rules(port,
ipv4_iptables_rules,
ipv6_iptables_rules)
elif direction == INGRESS_DIRECTION:
ipv6_iptables_rules += self._accept_inbound_icmpv6()
# include IPv4 and IPv6 iptable rules from security group
ipv4_iptables_rules += self._convert_sgr_to_iptables_rules(
ipv4_sg_rules)
ipv6_iptables_rules += self._convert_sgr_to_iptables_rules(
ipv6_sg_rules)
# finally add the rules to the port chain for a given direction
self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction),
ipv4_iptables_rules,
ipv6_iptables_rules)
def _add_fixed_egress_rules(self, port, ipv4_iptables_rules,
ipv6_iptables_rules):
self._spoofing_rule(port,
ipv4_iptables_rules,
ipv6_iptables_rules)
self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules)
def _update_ipset_members(self, security_group_ids):
for ip_version, sg_ids in security_group_ids.items():
for sg_id in sg_ids:
current_ips = self.sg_members[sg_id][ip_version]
self.ipset.set_members(sg_id, ip_version, current_ips)
def _generate_ipset_rule_args(self, sg_rule, remote_gid):
ethertype = sg_rule.get('ethertype')
ipset_name = self.ipset.get_name(remote_gid, ethertype)
if not self.ipset.set_exists(remote_gid, ethertype):
#NOTE(mangelajo): ipsets for empty groups are not created
# thus we can't reference them.
return None
ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')]
args = self._generate_protocol_and_port_args(sg_rule)
args += ['-m set', '--match-set', ipset_name, ipset_direction]
args += ['-j RETURN']
return args
def _generate_protocol_and_port_args(self, sg_rule):
args = self._protocol_arg(sg_rule.get('protocol'))
args += self._port_arg('sport',
sg_rule.get('protocol'),
sg_rule.get('source_port_range_min'),
sg_rule.get('source_port_range_max'))
args += self._port_arg('dport',
sg_rule.get('protocol'),
sg_rule.get('port_range_min'),
sg_rule.get('port_range_max'))
return args
def _generate_plain_rule_args(self, sg_rule):
# These arguments MUST be in the format iptables-save will
# display them: source/dest, protocol, sport, dport, target
# Otherwise the iptables_manager code won't be able to find
# them to preserve their [packet:byte] counts.
args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix'))
args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix'))
args += self._generate_protocol_and_port_args(sg_rule)
args += ['-j RETURN']
return args
def _convert_sg_rule_to_iptables_args(self, sg_rule):
remote_gid = sg_rule.get('remote_group_id')
if self.enable_ipset and remote_gid:
return self._generate_ipset_rule_args(sg_rule, remote_gid)
else:
return self._generate_plain_rule_args(sg_rule)
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._drop_invalid_packets(iptables_rules)
self._allow_established(iptables_rules)
for rule in security_group_rules:
args = self._convert_sg_rule_to_iptables_args(rule)
if args:
iptables_rules += [' '.join(args)]
iptables_rules += [comment_rule('-j $sg-fallback',
comment=ic.UNMATCHED)]
return iptables_rules
def _drop_invalid_packets(self, iptables_rules):
# Always drop invalid packets
iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP',
comment=ic.INVALID_DROP)]
return iptables_rules
def _allow_established(self, iptables_rules):
# Allow established connections
iptables_rules += [comment_rule(
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=ic.ALLOW_ASSOC)]
return iptables_rules
def _protocol_arg(self, protocol):
if not protocol:
return []
iptables_rule = ['-p', protocol]
# iptables always adds '-m protocol' for udp and tcp
if protocol in ['udp', 'tcp']:
iptables_rule += ['-m', protocol]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
or not port_range_min):
return []
if protocol in ['icmp', 'icmpv6']:
# Note(xuhanp): port_range_min/port_range_max represent
# icmp type/code when protocol is icmp or icmpv6
# icmp code can be 0 so we cannot use "if port_range_max" here
if port_range_max is not None:
return ['--%s-type' % protocol,
'%s/%s' % (port_range_min, port_range_max)]
return ['--%s-type' % protocol, '%s' % port_range_min]
elif port_range_min == port_range_max:
return ['--%s' % direction, '%s' % (port_range_min,)]
else:
return ['-m', 'multiport',
'--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
return ['-%s' % direction, ip_prefix]
return []
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
self.iptables.defer_apply_on()
self._pre_defer_filtered_ports = dict(self.filtered_ports)
self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports)
self.pre_sg_members = dict(self.sg_members)
self.pre_sg_rules = dict(self.sg_rules)
self._defer_apply = True
def _remove_unused_security_group_info(self):
"""Remove any unnecessary local security group info or unused ipsets.
This function has to be called after applying the last iptables
rules, so we're in a point where no iptable rule depends
on an ipset we're going to delete.
"""
filtered_ports = self.filtered_ports.values()
remote_sgs_to_remove = self._determine_remote_sgs_to_remove(
filtered_ports)
for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove):
self._clear_sg_members(ip_version, remote_sg_ids)
if self.enable_ipset:
self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids)
self._remove_unused_sg_members()
# Remove unused security group rules
for remove_group_id in self._determine_sg_rules_to_remove(
filtered_ports):
self.sg_rules.pop(remove_group_id, None)
def _determine_remote_sgs_to_remove(self, filtered_ports):
"""Calculate which remote security groups we don't need anymore.
We do the calculation for each ip_version.
"""
sgs_to_remove_per_ipversion = {constants.IPv4: set(),
constants.IPv6: set()}
remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion(
filtered_ports)
for ip_version, remote_group_id_set in (
six.iteritems(remote_group_id_sets)):
sgs_to_remove_per_ipversion[ip_version].update(
set(self.pre_sg_members) - remote_group_id_set)
return sgs_to_remove_per_ipversion
def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports):
"""Given a port, calculates the remote sg references by ip_version."""
remote_group_id_sets = {constants.IPv4: set(),
constants.IPv6: set()}
for port in filtered_ports:
remote_sg_ids = self._get_remote_sg_ids(port)
for ip_version, sg_ids in six.iteritems(remote_sg_ids):
remote_group_id_sets[ip_version].update(sg_ids)
return remote_group_id_sets
def _determine_sg_rules_to_remove(self, filtered_ports):
"""Calculate which security groups need to be removed.
We find out by subtracting our previous sg group ids,
with the security groups associated to a set of ports.
"""
port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports)
return set(self.pre_sg_rules) - port_group_ids
def _get_sg_ids_set_for_ports(self, filtered_ports):
"""Get the port security group ids as a set."""
port_group_ids = set()
for port in filtered_ports:
port_group_ids.update(port.get('security_groups', []))
return port_group_ids
def _clear_sg_members(self, ip_version, remote_sg_ids):
"""Clear our internal cache of sg members matching the parameters."""
for remote_sg_id in remote_sg_ids:
if self.sg_members[remote_sg_id][ip_version]:
self.sg_members[remote_sg_id][ip_version] = []
def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids):
"""Remove system ipsets matching the provided parameters."""
for remote_sg_id in remote_sg_ids:
self.ipset.destroy(remote_sg_id, ip_version)
def _remove_unused_sg_members(self):
"""Remove sg_member entries where no IPv4 or IPv6 is associated."""
for sg_id in list(self.sg_members.keys()):
sg_has_members = (self.sg_members[sg_id][constants.IPv4] or
self.sg_members[sg_id][constants.IPv6])
if not sg_has_members:
del self.sg_members[sg_id]
def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
self._remove_chains_apply(self._pre_defer_filtered_ports,
self._pre_defer_unfiltered_ports)
self._setup_chains_apply(self.filtered_ports,
self.unfiltered_ports)
self.iptables.defer_apply_off()
self._remove_unused_security_group_info()
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
def _get_device_name(self, port):
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
def _get_br_device_name(self, port):
return ('qvb' + port['device'])[:LINUX_DEV_LEN]
def _get_jump_rule(self, port, direction):
if direction == INGRESS_DIRECTION:
device = self._get_br_device_name(port)
else:
device = self._get_device_name(port)
jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % (
device, port['zone_id'])
return jump_rule
def _add_raw_chain_rules(self, port, direction):
if port['zone_id']:
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule)
def _remove_raw_chain_rules(self, port, direction):
if port['zone_id']:
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule)
def _add_chain(self, port, direction):
super(OVSHybridIptablesFirewallDriver, self)._add_chain(port,
direction)
if direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
self._add_raw_chain_rules(port, direction)
def _remove_chain(self, port, direction):
super(OVSHybridIptablesFirewallDriver, self)._remove_chain(port,
direction)
if direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
self._remove_raw_chain_rules(port, direction)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import logging
from common import modelzoo
import mxnet as mx
from mxnet.contrib.quantization import *
def download_calib_dataset(dataset_url, calib_dataset, logger=None):
if logger is not None:
logger.info('Downloading calibration dataset from %s to %s' % (dataset_url, calib_dataset))
mx.test_utils.download(dataset_url, calib_dataset)
def download_model(model_name, logger=None):
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_path, 'model')
if logger is not None:
logger.info('Downloading model %s... into path %s' % (model_name, model_path))
return modelzoo.download_model(args.model, os.path.join(dir_path, 'model'))
def save_symbol(fname, sym, logger=None):
if logger is not None:
logger.info('Saving symbol into file at %s' % fname)
sym.save(fname)
def save_params(fname, arg_params, aux_params, logger=None):
if logger is not None:
logger.info('Saving params into file at %s' % fname)
save_dict = {('arg:%s' % k): v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k): v.as_in_context(cpu()) for k, v in aux_params.items()})
mx.nd.save(fname, save_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a calibrated quantized model from a FP32 model')
parser.add_argument('--ctx', type=str, default='gpu')
parser.add_argument('--model', type=str, choices=['imagenet1k-resnet-152', 'imagenet1k-inception-bn'],
help='currently only supports imagenet1k-resnet-152 or imagenet1k-inception-bn')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--label-name', type=str, default='softmax_label')
parser.add_argument('--calib-dataset', type=str, default='data/val_256_q90.rec',
help='path of the calibration dataset')
parser.add_argument('--image-shape', type=str, default='3,224,224')
parser.add_argument('--data-nthreads', type=int, default=60,
help='number of threads for data decoding')
parser.add_argument('--num-calib-batches', type=int, default=10,
help='number of batches for calibration')
parser.add_argument('--exclude-first-conv', action='store_true', default=True,
help='excluding quantizing the first conv layer since the'
' number of channels is usually not a multiple of 4 in that layer'
' which does not satisfy the requirement of cuDNN')
parser.add_argument('--shuffle-dataset', action='store_true', default=True,
help='shuffle the calibration dataset')
parser.add_argument('--shuffle-chunk-seed', type=int, default=3982304,
help='shuffling chunk seed, see'
' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--shuffle-seed', type=int, default=48564309,
help='shuffling seed, see'
' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'
' for more details')
parser.add_argument('--calib-mode', type=str, default='entropy',
help='calibration mode used for generating calibration table for the quantized symbol; supports'
' 1. none: no calibration will be used. The thresholds for quantization will be calculated'
' on the fly. This will result in inference speed slowdown and loss of accuracy'
' in general.'
' 2. naive: simply take min and max values of layer outputs as thresholds for'
' quantization. In general, the inference accuracy worsens with more examples used in'
' calibration. It is recommended to use `entropy` mode as it produces more accurate'
' inference results.'
' 3. entropy: calculate KL divergence of the fp32 output and quantized output for optimal'
' thresholds. This mode is expected to produce the best inference accuracy of all three'
' kinds of quantized models if the calibration dataset is representative enough of the'
' inference dataset.')
parser.add_argument('--quantized-dtype', type=str, default='int8',
choices=['int8', 'uint8'],
help='quantization destination data type for input data')
args = parser.parse_args()
if args.ctx == 'gpu':
ctx = mx.gpu(0)
elif args.ctx == 'cpu':
ctx = mx.cpu(0)
else:
raise ValueError('ctx %s is not supported in this script' % args.ctx)
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logger.info('shuffle_dataset=%s' % args.shuffle_dataset)
calib_mode = args.calib_mode
logger.info('calibration mode set to %s' % calib_mode)
# download calibration dataset
if calib_mode != 'none':
download_calib_dataset('http://data.mxnet.io/data/val_256_q90.rec', args.calib_dataset)
# download model
prefix, epoch = download_model(model_name=args.model, logger=logger)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# get batch size
batch_size = args.batch_size
logger.info('batch size = %d for calibration' % batch_size)
# get number of batches for calibration
num_calib_batches = args.num_calib_batches
if calib_mode != 'none':
logger.info('number of batches = %d for calibration' % num_calib_batches)
# get number of threads for decoding the dataset
data_nthreads = args.data_nthreads
# get image shape
image_shape = args.image_shape
exclude_first_conv = args.exclude_first_conv
excluded_sym_names = []
if args.model == 'imagenet1k-resnet-152':
rgb_mean = '0,0,0'
if args.ctx == 'gpu':
calib_layer = lambda name: name.endswith('_output') and (name.find('conv') != -1
or name.find('sc') != -1
or name.find('fc') != -1)
else:
calib_layer = lambda name: name.endswith('_output') and (name.find('conv') != -1
or name.find('sc') != -1)
excluded_sym_names += ['flatten0', 'fc1']
if exclude_first_conv:
excluded_sym_names += ['conv0']
elif args.model == 'imagenet1k-inception-bn':
rgb_mean = '123.68,116.779,103.939'
if args.ctx == 'gpu':
calib_layer = lambda name: name.endswith('_output') and (name.find('conv') != -1
or name.find('fc') != -1)
else:
calib_layer = lambda name: name.endswith('_output') and (name.find('conv') != -1)
excluded_sym_names += ['flatten', 'fc1']
if exclude_first_conv:
excluded_sym_names += ['conv_1']
else:
raise ValueError('model %s is not supported in this script' % args.model)
label_name = args.label_name
logger.info('label_name = %s' % label_name)
data_shape = tuple([int(i) for i in image_shape.split(',')])
logger.info('Input data shape = %s' % str(data_shape))
logger.info('rgb_mean = %s' % rgb_mean)
rgb_mean = [float(i) for i in rgb_mean.split(',')]
mean_args = {'mean_r': rgb_mean[0], 'mean_g': rgb_mean[1], 'mean_b': rgb_mean[2]}
if calib_mode == 'none':
logger.info('Quantizing FP32 model %s' % args.model)
qsym, qarg_params, aux_params = quantize_model(sym=sym, arg_params=arg_params, aux_params=aux_params,
ctx=ctx, excluded_sym_names=excluded_sym_names,
calib_mode=calib_mode, quantized_dtype=args.quantized_dtype,
logger=logger)
sym_name = '%s-symbol.json' % (prefix + '-quantized')
save_symbol(sym_name, qsym, logger)
else:
logger.info('Creating ImageRecordIter for reading calibration dataset')
data = mx.io.ImageRecordIter(path_imgrec=args.calib_dataset,
label_width=1,
preprocess_threads=data_nthreads,
batch_size=batch_size,
data_shape=data_shape,
label_name=label_name,
rand_crop=False,
rand_mirror=False,
shuffle=args.shuffle_dataset,
shuffle_chunk_seed=args.shuffle_chunk_seed,
seed=args.shuffle_seed,
**mean_args)
cqsym, qarg_params, aux_params = quantize_model(sym=sym, arg_params=arg_params, aux_params=aux_params,
ctx=ctx, excluded_sym_names=excluded_sym_names,
calib_mode=calib_mode, calib_data=data,
num_calib_examples=num_calib_batches * batch_size,
calib_layer=calib_layer, quantized_dtype=args.quantized_dtype,
logger=logger)
if calib_mode == 'entropy':
suffix = '-quantized-%dbatches-entropy' % num_calib_batches
elif calib_mode == 'naive':
suffix = '-quantized-%dbatches-naive' % num_calib_batches
else:
raise ValueError('unknow calibration mode %s received, only supports `none`, `naive`, and `entropy`'
% calib_mode)
sym_name = '%s-symbol.json' % (prefix + suffix)
save_symbol(sym_name, cqsym, logger)
param_name = '%s-%04d.params' % (prefix + '-quantized', epoch)
save_params(param_name, qarg_params, aux_params, logger)
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Scenario Model
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ScenarioModel",
"S3ScenarioAssetModel",
"S3ScenarioHRModel",
"S3ScenarioMapModel",
"S3ScenarioOrganisationModel",
"S3ScenarioSiteModel",
"S3ScenarioTaskModel",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ScenarioModel(S3Model):
"""
Scenario Model
http://eden.sahanafoundation.org/wiki/BluePrintScenario
Link tables are in separate classes to increase performance & allow
the system to be more modular
"""
names = ["scenario_scenario",
"scenario_scenario_id",
]
def model(self):
T = current.T
db = current.db
add_components = self.add_components
# ---------------------------------------------------------------------
# Scenarios
#
# Scenarios are Templates for Incidents to plan what resources are required
#
tablename = "scenario_scenario"
self.define_table(tablename,
self.event_incident_type_id(),
Field("name", notnull=True,
length=64, # Mayon compatiblity
label=T("Name")),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
# Open Map Config to set the default Location
create_next=URL(args=["[id]", "config"]),
deduplicate=self.scenario_duplicate,
)
# CRUD strings
ADD_SCENARIO = T("New Scenario")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_SCENARIO,
title_display = T("Scenario Details"),
title_list = T("Scenarios"),
title_update = T("Edit Scenario"),
title_upload = T("Import Scenarios"),
label_list_button = T("List Scenarios"),
label_delete_button = T("Delete Scenario"),
msg_record_created = T("Scenario added"),
msg_record_modified = T("Scenario updated"),
msg_record_deleted = T("Scenario deleted"),
msg_list_empty = T("No Scenarios currently registered"))
# Components
add_components(tablename,
# Tasks
project_task={"link": "scenario_task",
"joinby": "scenario_id",
"key": "task_id",
# @ToDo: Widget to handle embedded LocationSelector
#"actuate": "embed",
"actuate": "link",
"autocomplete": "name",
"autodelete": False,
},
# Human Resources
hrm_human_resource={"link": "scenario_human_resource",
"joinby": "scenario_id",
"key": "human_resource_id",
# @ToDo: Widget to handle embedded AddPersonWidget
#"actuate": "embed",
"actuate": "link",
"autocomplete": "name",
"autodelete": False,
},
# Assets
asset_asset={"link": "scenario_asset",
"joinby": "scenario_id",
"key": "asset_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Facilities
scenario_site="scenario_id",
# Organisations
org_organisation={"link": "scenario_organisation",
"joinby": "scenario_id",
"key": "organisation_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Map Config as a component of Scenarios
gis_config={"link": "scenario_config",
"joinby": "scenario_id",
"multiple": False,
"key": "config_id",
"actuate": "replace",
"autocomplete": "name",
"autodelete": True,
},
)
scenario_id = S3ReusableField("scenario_id", "reference %s" % tablename,
sortby="name",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "scenario_scenario.id",
self.scenario_represent,
orderby="scenario_scenario.name",
sort=True)),
represent = self.scenario_represent,
label = T("Scenario"),
ondelete = "SET NULL",
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget()
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Scenario"),
# current.messages.AUTOCOMPLETE_HELP))
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage(
scenario_scenario_id = scenario_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return Storage(
scenario_scenario_id = S3ReusableField("scenario_id",
"integer",
readable=False,
writable=False),
)
# ---------------------------------------------------------------------
@staticmethod
def scenario_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages["NONE"]
db = current.db
table = db.scenario_scenario
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def scenario_duplicate(item):
"""
Deduplication of Scenarios
"""
if item.tablename != "scenario_scenario":
return
data = item.data
name = data.get("name", None)
table = item.table
query = (table.name == name)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ScenarioAssetModel(S3Model):
"""
Link Assets to Scenarios
"""
names = ["scenario_asset"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Assets
# @ToDo: Use generic Supply Items not Asset instances? (Typed resources)
# Depends on the scale of the scenario!
# So support both...
# @ToDo: Search Widget
tablename = "scenario_asset"
self.define_table(tablename,
self.scenario_scenario_id(),
self.asset_asset_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
label_list_button = T("List Assets"),
label_delete_button = T("Remove Asset from this scenario"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset removed"),
msg_list_empty = T("No assets currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# =============================================================================
class S3ScenarioHRModel(S3Model):
"""
Link Human Resources (Staff/Volunteers) to Scenarios
"""
names = ["scenario_human_resource"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Staff/Volunteers
# @ToDo: Use Positions, not individual HRs (Typed resources?)
# @ToDo: Search Widget
tablename = "scenario_human_resource"
self.define_table(tablename,
self.scenario_scenario_id(),
self.hrm_human_resource_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Human Resource"),
title_display = T("Human Resource Details"),
title_list = T("Human Resources"),
title_update = T("Edit Human Resource"),
label_list_button = T("List Human Resources"),
label_delete_button = T("Remove Human Resource from this scenario"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Human Resource updated"),
msg_record_deleted = T("Human Resource removed"),
msg_list_empty = T("No Human Resources currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# =============================================================================
class S3ScenarioMapModel(S3Model):
"""
Link Map Configs to Scenarios
"""
names = ["scenario_config"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Link Table for Map Config used in this Scenario
# @ToDo: Widget suitable for a 1-1 relationship where we can assume
# that the Config is pre-created
tablename = "scenario_config"
self.define_table(tablename,
self.scenario_scenario_id(),
self.gis_config_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Map Configuration"),
title_display = T("Map Configuration Details"),
title_list = T("Map Configurations"),
title_update = T("Edit Map Configuration"),
label_list_button = T("List Map Configurations"),
label_delete_button = T("Remove Map Configuration from this scenario"),
msg_record_created = T("Map Configuration added"),
msg_record_modified = T("Map Configuration updated"),
msg_record_deleted = T("Map Configuration removed"),
msg_list_empty = T("No Map Configurations currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# =============================================================================
class S3ScenarioOrganisationModel(S3Model):
"""
Link Organisations to Scenarios
- people to keep informed
- people to mobilise
"""
names = ["scenario_organisation"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Organisations
# @ToDo: Search Widget
tablename = "scenario_organisation"
self.define_table(tablename,
self.scenario_scenario_id(),
self.org_organisation_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Organization"),
title_display = T("Organization Details"),
title_list = T("Organizations"),
title_update = T("Edit Organization"),
label_list_button = T("List Organizations"),
label_delete_button = T("Remove Organization from this scenario"),
msg_record_created = T("Organization added"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization removed"),
msg_list_empty = T("No organizations currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# =============================================================================
class S3ScenarioSiteModel(S3Model):
"""
Link Sites (Facilities) to Scenarios
"""
names = ["scenario_site"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Facilities
# @ToDo: Search Widget
tablename = "scenario_site"
self.define_table(tablename,
self.scenario_scenario_id(),
self.org_site_id,
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Facility"),
title_display = T("Facility Details"),
title_list = T("Facilities"),
title_update = T("Edit Facility"),
label_list_button = T("List Facilities"),
label_delete_button = T("Remove Facility from this scenario"),
msg_record_created = T("Facility added"),
msg_record_modified = T("Facility updated"),
msg_record_deleted = T("Facility removed"),
msg_list_empty = T("No facilities currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# =============================================================================
class S3ScenarioTaskModel(S3Model):
"""
Link Tasks to Scenarios
@ToDo: Task Templates (like CAP Templates)
"""
names = ["scenario_task"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Tasks
# Standing Tasks required for this Scenario
# @ToDo: Search Widget
tablename = "scenario_task"
self.define_table(tablename,
self.scenario_scenario_id(),
self.project_task_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Task"),
title_display = T("Task Details"),
title_list = T("Tasks"),
title_update = T("Edit Task"),
label_list_button = T("List Tasks"),
label_delete_button = T("Remove Task from this scenario"),
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task removed"),
msg_list_empty = T("No tasks currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# END =========================================================================
|
|
import csv
from IPython.display import Image
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn import cross_validation
from sklearn import ensemble
from sklearn import metrics
import seaborn as sns
import re
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
import yaml
#from treeinterpreter import treeinterpreter as ti
import model_pipeline_script
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-tf','--training_table',help='Contract data file')
parser.add_argument('-pf','--prediction_table',help='File for prediction',default='')
parser.add_argument('-ac','--allegation_category',help='allegation category for prediction (contracts)',default='')
parser.add_argument('-fl','--feature_sets_log_file',
help='Feature Set Log File, typically feature_sets_log.yaml',
default='feature_sets_log.yaml')
parser.add_argument('-wf','--output_file',help='File to write output (ranked list')
parser.add_argument('-pred_id','--predict_table_id',help='Identifier for reading feature tables',default='')
parser.add_argument('-train_id','--train_table_id',help='Identifier for reading feature tables')
args = parser.parse_args()
def main():
clf = GradientBoostingClassifier(n_estimators = 1000, min_samples_split=15, learning_rate = 0.1, max_depth=160)
feature_set_id = '59'
feature_sets_file = args.feature_sets_log_file
feature_set_dict = {}
with open(feature_sets_file, 'r') as stream:
feature_set_dict = yaml.load(stream)
feature_set = feature_set_dict[feature_set_id]
engine = model_pipeline_script.get_engine()
con = engine.connect()
if args.prediction_table != '':
contract_flag = True
else:
contract_flag = False
contracts_data = pd.read_sql(args.training_table,engine)
if contract_flag:
prediction_data = pd.read_sql(args.prediction_table,engine)
print contracts_data.columns
#proccess training data
contracts_data['amt_standardized'] = contracts_data['amount_standardized']
contracts_data['contract_signing_date'] = pd.to_datetime(contracts_data['contract_signing_date'])
#Subsetting on only main allegation outcomes
train_data = contracts_data[(contracts_data['allegation_outcome'] == 'Substantiated') |
(contracts_data['allegation_outcome'] == 'Unfounded') |
(contracts_data['allegation_outcome'] == 'Unsubstantiated')]
train_data,col_group_dict_train = model_pipeline_script.join_features(engine,con,contracts_data,args.train_table_id)
col_group_dict_train,col_group_keys_train = model_pipeline_script.define_feature_sets(col_group_dict_train)
if contract_flag:
#process prediction data
prediction_data['amt_standardized'] = prediction_data['amount_standardized']
prediction_data['contract_signing_date'] = pd.to_datetime(prediction_data['contract_signing_date'])
prediction_data['allegation_category']=args.allegation_category
prediction_data,col_group_dict_predict = model_pipeline_script.join_features(engine,con,prediction_data,args.predict_table_id)
col_group_dict_predict,col_group_keys_predict = model_pipeline_script.define_feature_sets(col_group_dict_predict)
train_df = train_data[ train_data['allegation_outcome'].notnull() ]
if not contract_flag:
predict_df = train_data[ train_data['allegation_outcome'].isnull() ]
predict_df.drop('allegation_outcome',1,inplace=True)
else:
predict_df = prediction_data
feature_set_new = []
for feat_set in feature_set:
if 'cntrcts_splr_ftr_set_train' in feat_set:
feat_set = feat_set.replace('cntrcts_splr_ftr_set_train','cntrcts_splr_ftr_set_' + args.train_table_id)
feature_set_new.append(feat_set)
feature_set = feature_set_new
df_features_train,y_train = model_pipeline_script.select_features(train_df,col_group_dict_train,feature_set)
print 'feat_sets:'
if args.predict_table_id != '':
feature_set_new = []
for feat_set in feature_set:
print feat_set
if 'cntrcts_splr_ftr_set_' + args.train_table_id in feat_set:
feat_set = feat_set.replace('cntrcts_splr_ftr_set_' + args.train_table_id,'cntrcts_splr_ftr_set_' + args.predict_table_id)
feature_set_new.append(feat_set)
feature_set = feature_set_new
print 'shape: '
print predict_df.shape,feature_set
if contract_flag:
df_features_predict,y_predict = model_pipeline_script.select_features(predict_df,col_group_dict_predict,feature_set)
else:
df_features_predict,y_predict = model_pipeline_script.select_features(predict_df,col_group_dict_train,feature_set)
print df_features_predict.shape
df_to_write = df_features_train.merge(pd.DataFrame(y_train),left_index=True,right_index=True)
df_to_write.to_csv('features_and_outcomes.csv')
matching_cols = [val for val in df_features_train.columns if val in set(df_features_predict.columns)]
print len(matching_cols),len(df_features_train.columns),len(df_features_predict.columns)
df_features_train = df_features_train[matching_cols]
df_features_predict = df_features_predict[matching_cols]
x_train = np.array(df_features_train)
y_train = np.array(y_train)
x_train = x_train.astype(float)
x_predict = np.array(df_features_predict)
x_predict = x_predict.astype(float)
print 'Fitting....'
clf.fit(x_train,y_train)
print 'Predicting...'
y_pred = clf.predict(x_predict)
y_proba= clf.predict_proba(x_predict).T[1]
#code for printing out top features
#try:
# print 'Feature importance...'
# print df_features_train.columns,df_features_train.shape
# top_features = model_pipeline_script.get_feature_importance(clf,x_train,y_train,df_features_train.columns,nfeatures=50)
# print top_features
#feat_idx = []
#for feat in top_features:
# print feat
# idx =
# model_pipeline_script.decision_surface_plot(clf,df_features_train,y_train,top_features)
# except IOError:
# ''
#code for plotting distribution of prediction scores
# plt.hist(y_proba,bins=30)
# if contract_flag:
# plt.title('Prediction Scores on Contracts')
# else:
# plt.title('Prediction Scores on Uninvestigated Complaints')
# plt.xlabel('Prediction Score')
# if contract_flag:
# plt.ylabel('Number of Contracts')
# else:
# plt.ylabel('Number of Complaints')
# plt.show()
prediction_data = predict_df
prediction_data['prediction_score'] = y_proba
grouped = prediction_data[['country','prediction_score']].groupby('country').aggregate(['mean','median','std','count'])
grouped.columns = [' '.join(col).strip() for col in grouped.columns.values]
# print prediction_data.columns
# prediction_data[['country','prediction_score']].to_sql('prediction_scores_complaints_by_country_nocountryfeatures',engine,if_exists='replace')
if contract_flag:
output_df=prediction_data[['wb_contract_number','fiscal_year','region','country','project_id','project_name','contract_description','supplier','borrower_contract_reference_number','amount','prediction_score']]
else:
output_df=prediction_data[['wb_contract_number','fiscal_year','region','country','project_id','project_name','contract_description','supplier','borrower_contract_reference_number','amount','allegation_category','prediction_score']]
if '.csv' not in args.output_file:
output_file = args.output_file + '.csv'
output_table = args.output_file
else:
output_file = args.output_file
output_table = re.sub(r'\.csv$', '', args.output_file)
output_table_array = output_table.split("/")
print output_table_array
output_table = output_table_array[len(output_table_array)-1]
output_df.to_csv(output_file,encoding='utf-8')
if len(output_table) > 63:
output_table = output_table[:63]
output_df.to_sql(output_table,engine,if_exists='replace')
# grouped.to_sql('contract_set_w_prediction_nocountries',engine,if_exists='replace')
if __name__ == "__main__":
main()
|
|
import os
import sys
import urllib.request, urllib.error, urllib.parse
import time
import subprocess
import threading
import re
import zipfile
import shutil
import ssl
import codecs
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir))
python_path = os.path.join(root_path, 'python3')
noarch_lib = os.path.join(python_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
from instances import xlog
import config
import update
data_root = os.path.join(root_path, 'data')
if not os.path.isdir(data_root):
os.mkdir(data_root)
download_path = os.path.join(data_root, 'downloads')
if not os.path.isdir(download_path):
os.mkdir(download_path)
progress = {} # link => {"size", 'downloaded', status:downloading|canceled|finished:failed}
progress["update_status"] = "Idle"
def get_opener(retry=0):
if retry == 0:
opener = urllib.request.build_opener()
return opener
else:
return update.get_opener()
def download_file(url, filename):
if url not in progress:
progress[url] = {}
progress[url]["status"] = "downloading"
progress[url]["size"] = 1
progress[url]["downloaded"] = 0
else:
if progress[url]["status"] == "downloading":
xlog.warn("url in downloading, %s", url)
return False
for i in range(0, 2):
try:
xlog.info("download %s to %s, retry:%d", url, filename, i)
opener = get_opener(i)
req = opener.open(url, timeout=30)
progress[url]["size"] = int(req.headers.get('content-length') or 0)
chunk_len = 65536
downloaded = 0
with open(filename, 'wb') as fp:
while True:
chunk = req.read(chunk_len)
if not chunk:
break
fp.write(chunk)
downloaded += len(chunk)
progress[url]["downloaded"] = downloaded
if downloaded != progress[url]["size"]:
xlog.warn("download size:%d, need size:%d, download fail.", downloaded, progress[url]["size"])
continue
else:
progress[url]["status"] = "finished"
return True
except (urllib.error.URLError, ssl.SSLError) as e:
xlog.warn("download %s to %s URL fail:%r", url, filename, e)
continue
except Exception as e:
xlog.exception("download %s to %s fail:%r", url, filename, e)
continue
progress[url]["status"] = "failed"
return False
def parse_readme_versions(readme_file):
versions = []
try:
lines = codecs.open(readme_file,"r",'utf8').readlines()
p = re.compile('https://codeload.github.com/XX-net/XX-Net/zip/([0-9]+)\.([0-9]+)\.([0-9]+)')
for line in lines:
m = p.match(line)
if m:
version = m.group(1) + "." + m.group(2) + "." + m.group(3)
versions.append([m.group(0), version])
if len(versions) == 2:
return versions
except Exception as e:
xlog.exception("xxnet_version fail:%r", e)
raise "get_version_fail:" % readme_file
def current_version():
readme_file = os.path.join(root_path, "README.md")
try:
versions = parse_readme_versions(readme_file)
return versions[0][1]
except:
return "get_version_fail"
def get_github_versions():
readme_url = "https://raw.githubusercontent.com/XX-net/XX-Net/master/README.md"
readme_target = os.path.join(download_path, "README.md")
if not download_file(readme_url, readme_target):
raise IOError("get README %s fail:" % readme_url)
versions = parse_readme_versions(readme_target)
return versions
def sha1_file(filename):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
try:
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
except:
return False
def download_overwrite_new_version(xxnet_version):
global update_progress
xxnet_url = 'https://codeload.github.com/XX-net/XX-Net/zip/%s' % xxnet_version
xxnet_zip_file = os.path.join(download_path, "XX-Net-%s.zip" % xxnet_version)
xxnet_unzip_path = os.path.join(download_path, "XX-Net-%s" % xxnet_version)
progress["update_status"] = "Downloading %s" % xxnet_url
if not download_file(xxnet_url, xxnet_zip_file):
progress["update_status"] = "Download Fail."
raise Exception("download xxnet zip fail:%s" % download_path)
xlog.info("update download %s finished.", download_path)
xlog.info("update start unzip")
progress["update_status"] = "Unziping"
try:
with zipfile.ZipFile(xxnet_zip_file, "r") as dz:
dz.extractall(download_path)
dz.close()
except Exception as e:
xlog.warn("unzip %s fail:%r", xxnet_zip_file, e)
progress["update_status"] = "Unzip Fail:%s" % e
raise
xlog.info("update finished unzip")
progress["update_status"] = "Over writing"
try:
for root, subdirs, files in os.walk(xxnet_unzip_path):
relate_path = root[len(xxnet_unzip_path)+1:]
for subdir in subdirs:
target_path = os.path.join(root_path, relate_path, subdir)
if not os.path.isdir(target_path):
xlog.info("mkdir %s", target_path)
os.mkdir(target_path)
if config.get(["update", "uuid"], '') == 'test' and "launcher" in relate_path:
# for debug
# don't over write launcher dir
continue
for filename in files:
src_file = os.path.join(root, filename)
dst_file = os.path.join(root_path, relate_path, filename)
if not os.path.isfile(dst_file) or sha1_file(src_file) != sha1_file(dst_file):
xlog.info("copy %s => %s", src_file, dst_file)
shutil.copy(src_file, dst_file)
except Exception as e:
xlog.warn("update over write fail:%r", e)
progress["update_status"] = "Over write Fail:%r" % e
raise
xlog.info("update file finished.")
os.remove(xxnet_zip_file)
shutil.rmtree(xxnet_unzip_path, ignore_errors=True)
def restart_xxnet():
import module_init
module_init.stop_all()
import web_control
web_control.stop()
current_path = os.path.dirname(os.path.abspath(__file__))
start_script = os.path.join(current_path, "start.py")
subprocess.Popen([sys.executable, start_script])
time.sleep(10)
os._exit(0)
def update_version(version):
global update_progress
try:
download_overwrite_new_version(version)
progress["update_status"] = "Restarting"
xlog.info("update try restart xxnet")
restart_xxnet()
except Exception as e:
xlog.warn("update version %s fail:%r", version, e)
def start_update_version(version):
if progress["update_status"] != "Idle" and "Fail" not in progress["update_status"]:
return progress["update_status"]
progress["update_status"] = "Start update"
th = threading.Thread(target=update_version, args=(version,))
th.start()
return True
def clean_old_file():
# These files moved to lib path
# old file need remove if exist.
def delete_file(file):
try:
os.remove(file)
except:
pass
delete_file(os.path.join(root_path, "gae_proxy", "local", "simple_http_server.py"))
delete_file(os.path.join(root_path, "gae_proxy", "local", "simple_http_server.pyc"))
delete_file(os.path.join(root_path, "gae_proxy", "local", "xlog.py"))
delete_file(os.path.join(root_path, "gae_proxy", "local", "xlog.pyc"))
clean_old_file()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 NTT
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import os
import mox
from oslo.config import cfg
from nova import context
from nova import db
from nova.network import driver
from nova.network import linux_net
from nova.openstack.common import fileutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
{'id': 0,
'uuid': '00000000-0000-0000-0000-0000000000000000',
'host': 'fake_instance00',
'created_at': 'fakedate',
'updated_at': 'fakedate',
'hostname': 'fake_instance00'},
'00000000-0000-0000-0000-0000000000000001':
{'id': 1,
'uuid': '00000000-0000-0000-0000-0000000000000001',
'host': 'fake_instance01',
'created_at': 'fakedate',
'updated_at': 'fakedate',
'hostname': 'fake_instance01'}}
addresses = [{"address": "10.0.0.1"},
{"address": "10.0.0.2"},
{"address": "10.0.0.3"},
{"address": "10.0.0.4"},
{"address": "10.0.0.5"},
{"address": "10.0.0.6"}]
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': True,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 1,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 2,
'network_id': 1,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 2,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 3,
'network_id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 3,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 4,
'network_id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
'leased': False,
'virtual_interface_id': 4,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 5,
'network_id': 1,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
'leased': False,
'virtual_interface_id': 5,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []}]
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 3,
'address': 'DE:AD:BE:EF:00:03',
'uuid': '00000000-0000-0000-0000-0000000000000003',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 4,
'address': 'DE:AD:BE:EF:00:04',
'uuid': '00000000-0000-0000-0000-0000000000000004',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 5,
'address': 'DE:AD:BE:EF:00:05',
'uuid': '00000000-0000-0000-0000-0000000000000005',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}]
def get_associated(context, network_id, host=None, address=None):
result = []
for datum in fixed_ips:
if (datum['network_id'] == network_id and datum['allocated']
and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue
if address and address != datum['address']:
continue
cleaned = {}
cleaned['address'] = datum['address']
cleaned['instance_uuid'] = datum['instance_uuid']
cleaned['network_id'] = datum['network_id']
cleaned['vif_id'] = datum['virtual_interface_id']
vif = vifs[datum['virtual_interface_id']]
cleaned['vif_address'] = vif['address']
cleaned['instance_hostname'] = instance['hostname']
cleaned['instance_updated'] = instance['updated_at']
cleaned['instance_created'] = instance['created_at']
cleaned['allocated'] = datum['allocated']
cleaned['leased'] = datum['leased']
result.append(cleaned)
return result
class LinuxNetworkTestCase(test.TestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
self.driver = driver.load_network_driver()
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
def get_vifs(_context, instance_uuid):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
def get_instance(_context, instance_id):
return instances[instance_id]
self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
self.stubs.Set(db, 'instance_get', get_instance)
self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
def _test_add_snat_rule(self, expected):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'snat')
self.assertEqual(rule, expected)
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
linux_net.add_snat_rule('10.0.0.0/24')
def test_add_snat_rule(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected)
def test_add_snat_rule_snat_range(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected)
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_update_dhcp_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_get_dhcp_hosts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected = (
"DE:AD:BE:EF:00:00,fake_instance00.novalocal,"
"192.168.0.100,net:NW-0\n"
"DE:AD:BE:EF:00:03,fake_instance01.novalocal,"
"192.168.1.101,net:NW-3\n"
"DE:AD:BE:EF:00:04,fake_instance00.novalocal,"
"192.168.0.102,net:NW-4"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_hosts_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.flags(host='fake_instance01')
expected = (
"DE:AD:BE:EF:00:02,fake_instance01.novalocal,"
"192.168.0.101,net:NW-2\n"
"DE:AD:BE:EF:00:05,fake_instance01.novalocal,"
"192.168.1.102,net:NW-5"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1])
self.assertEquals(actual_hosts, expected)
def test_get_dns_hosts_for_nw00(self):
expected = (
"192.168.0.100\tfake_instance00.novalocal\n"
"192.168.1.101\tfake_instance01.novalocal\n"
"192.168.0.102\tfake_instance00.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
self.assertEquals(actual_hosts, expected)
def test_get_dns_hosts_for_nw01(self):
expected = (
"192.168.1.100\tfake_instance00.novalocal\n"
"192.168.0.101\tfake_instance01.novalocal\n"
"192.168.1.102\tfake_instance01.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
expected_opts = 'NW-3,3\nNW-4,3'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
self.flags(host='fake_instance01')
expected_opts = "NW-5,3"
actual_opts = self.driver.get_dhcp_opts(self.context, networks[1])
self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_leases_for_nw00(self):
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[0])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 0, address=lease[2])[0]
self.assertTrue(data['allocated'])
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertTrue(lease[1] == data['vif_address'])
self.assertTrue(lease[2] == data['address'])
self.assertTrue(lease[3] == data['instance_hostname'])
self.assertTrue(lease[4] == '*')
def test_get_dhcp_leases_for_nw01(self):
self.flags(host='fake_instance01')
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[1])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 1, address=lease[2])[0]
self.assertTrue(data['allocated'])
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertTrue(lease[1] == data['vif_address'])
self.assertTrue(lease[2] == data['address'])
self.assertTrue(lease[3] == data['instance_hostname'])
self.assertTrue(lease[4] == '*')
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-0,3"
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dhcp_opts(data)
self.assertEquals(actual, expected)
def test_host_dhcp_without_default_gateway_network(self):
expected = ','.join(['DE:AD:BE:EF:00:00',
'fake_instance00.novalocal',
'192.168.0.100'])
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dhcp(data)
self.assertEquals(actual, expected)
def test_host_dns_without_default_gateway_network(self):
expected = "192.168.0.100\tfake_instance00.novalocal"
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dns(data)
self.assertEquals(actual, expected)
def test_linux_bridge_driver_plug(self):
"""Makes sure plug doesn't drop FORWARD by default.
Ensures bug 890195 doesn't reappear.
"""
def fake_execute(*args, **kwargs):
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'FORWARD')
self.assertIn('ACCEPT', rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.plug({"bridge": "br100", "bridge_interface": "eth0"},
"fakemac")
def test_vlan_override(self):
"""Makes sure vlan_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@classmethod
def test_ensure(_self, vlan, bridge, interface, network, mac_address):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_vlan_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"vlan": "fake"
}
self.flags(vlan_interface="")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(vlan_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
driver.plug(network, "fakemac")
def test_flat_override(self):
"""Makes sure flat_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@classmethod
def test_ensure(_self, bridge, interface, network, gateway):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
}
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(flat_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
def _test_dnsmasq_execute(self, extra_expected=None):
network_ref = {'id': 'fake',
'label': 'fake',
'multi_host': False,
'cidr': '10.0.0.0/24',
'netmask': '255.255.255.0',
'dns1': '8.8.4.4',
'dhcp_start': '1.0.0.2',
'dhcp_server': '10.0.0.1'}
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
self.stubs.Set(linux_net, '_execute', fake_execute)
self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
dev = 'br100'
default_domain = CONF.dhcp_domain
for domain in ('', default_domain):
executes = []
CONF.dhcp_domain = domain
linux_net.restart_dhcp(self.context, dev, network_ref)
expected = ['env',
'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=fake',
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
"--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'],
network_ref['dhcp_start'],
network_ref['netmask'],
CONF.dhcp_lease_time),
'--dhcp-lease-max=256',
'--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--leasefile-ro']
if CONF.dhcp_domain:
expected.append('--domain=%s' % CONF.dhcp_domain)
if extra_expected:
expected += extra_expected
self.assertEqual([tuple(expected)], executes)
def test_dnsmasq_execute(self):
self._test_dnsmasq_execute()
def test_dnsmasq_execute_dns_servers(self):
self.flags(dns_server=['1.1.1.1', '2.2.2.2'])
expected = [
'--no-hosts',
'--no-resolv',
'--server=1.1.1.1',
'--server=2.2.2.2',
]
self._test_dnsmasq_execute(expected)
def test_dnsmasq_execute_use_network_dns_servers(self):
self.flags(use_network_dns_servers=True)
expected = [
'--no-hosts',
'--no-resolv',
'--server=8.8.4.4',
]
self._test_dnsmasq_execute(expected)
def test_isolated_host(self):
self.flags(fake_network=False,
share_dhcp_address=True)
# NOTE(vish): use a fresh copy of the manager for each test
self.stubs.Set(linux_net, 'iptables_manager',
linux_net.IptablesManager())
self.stubs.Set(linux_net, 'binary_name', 'test')
executes = []
inputs = []
def fake_execute(*args, **kwargs):
executes.append(args)
process_input = kwargs.get('process_input')
if process_input:
inputs.append(process_input)
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
driver = linux_net.LinuxBridgeInterfaceDriver()
@classmethod
def fake_ensure(_self, bridge, interface, network, gateway):
return bridge
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', fake_ensure)
iface = 'eth0'
dhcp = '192.168.1.1'
network = {'dhcp_server': dhcp,
'bridge': 'br100',
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
expected_inputs = [
'-A test-FORWARD -m physdev --physdev-in %s '
'-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-out %s '
'-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-in %s '
'-d 192.168.1.1 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-out %s '
'-s 192.168.1.1 -j DROP' % iface,
]
for inp in expected_inputs:
self.assertTrue(inp in inputs[0])
executes = []
inputs = []
@classmethod
def fake_remove(_self, bridge, gateway):
return
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'remove_bridge', fake_remove)
driver.unplug(network)
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
for inp in expected_inputs:
self.assertFalse(inp in inputs[0])
def test_isolated_host_iptables_logdrop(self):
# Ensure that a different drop action for iptables doesn't change
# the drop action for ebtables.
self.flags(fake_network=False,
share_dhcp_address=True,
iptables_drop_action='LOGDROP')
# NOTE(vish): use a fresh copy of the manager for each test
self.stubs.Set(linux_net, 'iptables_manager',
linux_net.IptablesManager())
self.stubs.Set(linux_net, 'binary_name', 'test')
executes = []
inputs = []
def fake_execute(*args, **kwargs):
executes.append(args)
process_input = kwargs.get('process_input')
if process_input:
inputs.append(process_input)
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
driver = linux_net.LinuxBridgeInterfaceDriver()
@classmethod
def fake_ensure(_self, bridge, interface, network, gateway):
return bridge
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', fake_ensure)
iface = 'eth0'
dhcp = '192.168.1.1'
network = {'dhcp_server': dhcp,
'bridge': 'br100',
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
expected_inputs = [
('-A test-FORWARD -m physdev --physdev-in %s '
'-d 255.255.255.255 -p udp --dport 67 -j LOGDROP' % iface),
('-A test-FORWARD -m physdev --physdev-out %s '
'-d 255.255.255.255 -p udp --dport 67 -j LOGDROP' % iface),
('-A test-FORWARD -m physdev --physdev-in %s '
'-d 192.168.1.1 -j LOGDROP' % iface),
('-A test-FORWARD -m physdev --physdev-out %s '
'-s 192.168.1.1 -j LOGDROP' % iface),
]
for inp in expected_inputs:
self.assertTrue(inp in inputs[0])
executes = []
inputs = []
@classmethod
def fake_remove(_self, bridge, gateway):
return
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'remove_bridge', fake_remove)
driver.unplug(network)
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
for inp in expected_inputs:
self.assertFalse(inp in inputs[0])
def _test_initialize_gateway(self, existing, expected, routes=''):
self.flags(fake_network=False)
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
return routes, ""
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
'cidr': '192.168.1.0/24',
'broadcast': '192.168.1.255',
'cidr_v6': '2001:db8::/64'}
self.driver.initialize_gateway_device('eth0', network)
self.assertEqual(executes, expected)
def test_initialize_gateway_moves_wrong_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_resets_route(self):
routes = ("default via 192.168.0.1 dev eth0\n"
"192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'route', 'del', 'default', 'dev', 'eth0'),
('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
'dev', 'eth0'),
('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
'dev', 'eth0', 'proto', 'static'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected, routes)
def test_initialize_gateway_no_move_right_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_add_if_blank(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_ensure_floating_no_duplicate_forwards(self):
ln = linux_net
self.stubs.Set(ln.iptables_manager, 'apply', lambda: None)
self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None)
net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'}
ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net)
ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net)
two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net)
dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
self.assertEqual(two_forward_rules, dup_forward_rules)
def test_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = False
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
empty_ret = manager.apply()
self.assertEqual(empty_ret, None)
def test_apply_not_run(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
self.mox.ReplayAll()
manager.apply()
def test_deferred_unset_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
manager.defer_apply_off()
self.assertFalse(manager.iptables_apply_deferred)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Deals with K-mers and K-mer distribution from reads or genome
"""
import os.path as op
import sys
import logging
import numpy as np
from jcvi.graphics.base import plt, asciiplot, set_human_axis, savefig, \
markup, panel_labels, normalize_axes, set_ticklabels_helvetica
from jcvi.formats.fasta import Fasta
from jcvi.formats.base import BaseFile, must_open, get_number
from jcvi.utils.cbook import thousands, percentage
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, \
need_update, Popen, PIPE
KMERYL, KSOAP, KALLPATHS = range(3)
class KmerSpectrum (BaseFile):
def __init__(self, histfile):
self.load_data(histfile)
def load_data(self, histfile):
self.data = []
self.totalKmers = 0
self.hist = {}
kformat = self.guess_format(histfile)
kformats = ("Meryl", "Soap", "AllPaths")
logging.debug("Guessed format: {0}".format(kformats[kformat]))
fp = open(histfile)
for rowno, row in enumerate(fp):
if row[0] == '#':
continue
if kformat == KSOAP:
K = rowno + 1
counts = int(row.strip())
else: # meryl histogram
K, counts = row.split()[:2]
K, counts = int(K), int(counts)
Kcounts = K * counts
self.totalKmers += Kcounts
self.hist[K] = Kcounts
self.data.append((K, counts))
def guess_format(self, histfile):
# Guess the format of the Kmer histogram
fp = open(histfile)
for row in fp:
if row.startswith("# 1:"):
return KALLPATHS
if len(row.split()) == 1:
return KSOAP
return KMERYL
def get_xy(self, vmin=1, vmax=100):
self.counts = sorted((a, b) for a, b in self.hist.items() \
if vmin <= a <= vmax)
return zip(*self.counts)
def analyze(self, ploidy=2, K=23, covmax=1000000):
"""
Analyze Kmer spectrum, calculations derived from
allpathslg/src/kmers/KmerSpectra.cc
"""
from math import sqrt
data = self.data
kf_ceil = max(K for (K, c) in data)
if kf_ceil > covmax:
exceeds = sum(1 for (K, c) in data if K > covmax)
logging.debug("A total of {0} distinct K-mers appear > "
"{1} times. Ignored ...".format(exceeds, covmax))
kf_ceil = covmax
nkf = kf_ceil + 1
a = [0] * nkf
for kf, c in data:
if kf > kf_ceil:
continue
a[kf] = c
ndk = a # number of distinct kmers
nk = [k * c for k, c in enumerate(a)] # number of kmers
cndk = [0] * nkf # cumulative number of distinct kmers
cnk = [0] * nkf # cumulative number of kmers
for kf in xrange(1, nkf):
cndk[kf] = cndk[kf - 1] + .5 * (ndk[kf - 1] + ndk[kf])
cnk [kf] = cnk [kf - 1] + .5 * (nk [kf - 1] + nk [kf])
# Separate kmer spectrum in 5 regions based on the kf
# 1 ... kf_min1 : bad kmers with low frequency
# kf_min1 ... kf_min2 : good kmers CN = 1/2 (SNPs)
# kf_min2 ... kf_min3 : good kmers CN = 1
# kf_min3 ... kf_hi : good kmers CN > 1 (repetitive)
# kf_hi ... inf : bad kmers with high frequency
# min1: find first minimum
_kf_min1 = 10
while (_kf_min1 - 1 >= 2 and nk[_kf_min1 - 1] < nk[_kf_min1]):
_kf_min1 -= 1
while (_kf_min1 <= kf_ceil and nk [_kf_min1 + 1] < nk[_kf_min1]):
_kf_min1 += 1
# max2: find absolute maximum mx2 above first minimum min1
_kf_max2 = _kf_min1
for kf in xrange(_kf_min1 + 1, int(0.8 * kf_ceil)):
if nk[kf] > nk[_kf_max2]:
_kf_max2 = kf
# max2: resetting max2 for cases of very high polymorphism
if ploidy == 2:
ndk_half = ndk[_kf_max2 / 2]
ndk_double = ndk[_kf_max2 * 2]
if ndk_double > ndk_half:
_kf_max2 *= 2
# max1: SNPs local maximum max1 as half global maximum max2
_kf_max1 = _kf_max2 / 2
# min2: SNPs local minimum min2 between max1 and max2
_kf_min2 = _kf_max1 * (2 * ndk[_kf_max1] + ndk[_kf_max2]) / \
(ndk[_kf_max1] + ndk[_kf_max2])
# min1: refine between min1 and max2/2
for kf in xrange(_kf_min1 + 1, _kf_max1):
if (nk[kf] < nk[_kf_min1]):
_kf_min1 = kf
# min3: not a minimum, really. upper edge of main peak
_kf_min3 = _kf_max2 * 3 / 2
print >> sys.stderr, "kfs:", _kf_min1, _kf_max1, \
_kf_min2, _kf_max2, _kf_min3
self.min1 = _kf_min1
self.max1 = _kf_max1
self.min2 = _kf_min2
self.max2 = _kf_max2
self.min3 = _kf_min3
# Define maximum kf above which we neglect data
_kf_hi = _kf_max2 * sqrt(4 * ndk[2 * _kf_max2] * _kf_max2) \
if 2 * _kf_max2 < len(ndk) else \
_kf_max2 * sqrt(4 * ndk[len(ndk) - 1] * _kf_max2)
_kf_hi = int(_kf_hi)
if _kf_hi > kf_ceil:
_kf_hi = kf_ceil
_nk_total = cnk[len(cnk) - 1]
_nk_bad_low_kf = cnk[_kf_min1]
_nk_good_uniq = cnk[_kf_min3] - cnk[_kf_min2]
_nk_bad_high_kf = _nk_total - cnk[_kf_hi]
_ndk_good_snp = cndk[_kf_min2] - cndk[_kf_min1]
_ndk_good_uniq = cndk[_kf_min3] - cndk[_kf_min2]
# kmer coverage C_k
_kf_ave_uniq = _nk_good_uniq * 1. / _ndk_good_uniq
_genome_size = (_nk_total - _nk_bad_low_kf - _nk_bad_high_kf) / \
_kf_ave_uniq
_genome_size_unique = _ndk_good_uniq + _ndk_good_snp / 2
_genome_size_repetitive = _genome_size - _genome_size_unique
_coverage = _nk_total / _genome_size if _genome_size else 0
# SNP rate estimation, assumes uniform distribution of SNPs over the
# genome and accounts for the reduction in SNP kmer counts when
# polymorphism is very high
if ploidy == 2:
_d_SNP = 1. / (1. - (1. - .5 * _ndk_good_snp / _genome_size) ** (1. / K)) \
if _ndk_good_snp > 0 else 1000000
G = int(_genome_size)
G1 = int(_genome_size_unique)
GR = int(_genome_size_repetitive)
coverage = int(_coverage)
m = "Kmer (K={0}) Spectrum Analysis\n".format(K)
m += "Genome size estimate = {0}\n".format(thousands(G))
m += "Genome size estimate CN = 1 = {0} ({1})\n".format(thousands(G1),
percentage(G1, G))
m += "Genome size estimate CN > 1 = {0} ({1})\n".format(thousands(GR),
percentage(GR, G))
m += "Coverage estimate: {0} x\n".format(coverage)
self.repetitive = "Repeats: {0} percent".format(GR * 100 / G)
if ploidy == 2:
d_SNP = int(_d_SNP)
self.snprate = "SNP rate ~= 1/{0}".format(d_SNP)
else:
self.snprate = "SNP rate not computed (Ploidy = {0})".format(ploidy)
m += self.snprate + '\n'
self.genomesize = int(round(self.totalKmers * 1. / self.max2))
print >> sys.stderr, m
def main():
actions = (
('jellyfish', 'dump histogram using `jellyfish`'),
('meryl', 'dump histogram using `meryl`'),
('histogram', 'plot the histogram based on meryl K-mer distribution'),
('multihistogram', 'plot histogram across a set of K-mer sizes'),
# These forms a pipeline to count K-mers for given FASTA seq
('dump', 'convert FASTA sequences to list of K-mers'),
('bin', 'serialize counts to bitarrays'),
('bincount', 'count K-mers in the bin'),
('count', 'run dump - jellyfish - bin - bincount in serial'),
('logodds', 'compute log likelihood between two db'),
('model', 'model kmer distribution given error rate'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def model(args):
"""
%prog model erate
Model kmer distribution given error rate. See derivation in FIONA paper:
<http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
"""
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option("-k", default=23, type="int", help="Kmer size")
p.add_option("--cov", default=50, type="int", help="Expected coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
erate, = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
# Range include c although it is unclear what it means to have c=0
for c in xrange(0, cov * 2 + 1):
Prob_Yk = 0
for i in xrange(k + 1):
# Probability of having exactly i errors
pi_i = binom.pmf(i, k, erate)
# Expected coverage of kmer with exactly i errors
mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)
# Probability of seeing coverage of c
Prob_Yk_i = poisson.pmf(c, mu_i)
# Sum i over 0, 1, ... up to k errors
Prob_Yk += pi_i * Prob_Yk_i
xy.append((c, Prob_Yk))
x, y = zip(*xy)
asciiplot(x, y, title="Model")
def logodds(args):
"""
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
"""
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print "{0}\t{1}".format(scf, score)
def get_K(jfdb):
"""
Infer K from jellyfish db.
"""
j = jfdb.rsplit('_', 1)[0].rsplit('-', 1)[-1]
assert j[0] == 'K'
return int(j[1:])
def count(args):
"""
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
"""
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print >> proc.stdin, "\n".join(kmers)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\
format(K, fastafile, jfdb, binfile))
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
def bincount(args):
"""
%prog bincount fastafile binfile
Count K-mers in the bin.
"""
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, binfile = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, "w")
for name, seqlen in f.iter_sizes():
ksize = seqlen - K + 1
b = a[tsize: tsize + ksize]
bcount = b.count()
print >> fw, "\t".join(str(x) for x in (name, bcount))
tsize += ksize
def bin(args):
"""
%prog bin filename filename.bin
Serialize counts to bitarrays.
"""
from bitarray import bitarray
p = OptionParser(bin.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inp, outp = args
fp = must_open(inp)
fw = must_open(outp, "w")
a = bitarray()
for row in fp:
c = row.split()[-1]
a.append(int(c))
a.tofile(fw)
fw.close()
def make_kmers(seq, K):
seq = str(seq).upper().replace("N", "A")
seqlen = len(seq)
for i in xrange(seqlen - K + 1):
yield seq[i: i + K]
def dump(args):
"""
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
"""
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print >> fw, "\n".join(kmers)
fw.close()
def jellyfish(args):
"""
%prog jellyfish [*.fastq|*.fasta]
Run jellyfish to dump histogram to be used in kmer.histogram().
"""
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.add_option("--coverage", default=40, type="int",
help="Expected sequence coverage [default: %default]")
p.add_option("--prefix", default="jf",
help="Database prefix [default: %default]")
p.add_option("--nohist", default=False, action="store_true",
help="Do not print histogram [default: %default]")
p.set_home("jellyfish")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum(getfilesize(x) for x in fastqfiles)
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith(".gz")
hashsize = totalfilesize / coverage
logging.debug("Total file size: {0}, hashsize (-s): {1}".\
format(human_size(totalfilesize,
a_kilobyte_is_1024_bytes=True), hashsize))
jfpf = "{0}-K{1}".format(pf, K)
jfdb = jfpf
fastqfiles = " ".join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, "jellyfish")
cmd = jfcmd
cmd += " count -t {0} -C -o {1}".format(opts.cpus, jfpf)
cmd += " -s {0} -m {1}".format(hashsize, K)
if gzip:
cmd = "gzip -dc {0} | ".format(fastqfiles) + cmd + " /dev/fd/0"
else:
cmd += " " + fastqfiles
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = jfpf + ".histogram"
cmd = jfcmd + " histo -t 64 {0} -o {1}".format(jfdb, jfhisto)
if need_update(jfdb, jfhisto):
sh(cmd)
def meryl(args):
"""
%prog meryl merylfile
Run meryl to dump histogram to be used in kmer.histogram(). The merylfile
are the files ending in .mcidx or .mcdat.
"""
p = OptionParser(meryl.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
merylfile, = args
pf, sf = op.splitext(merylfile)
outfile = pf + ".histogram"
cmd = "meryl -Dh -s {0}".format(pf)
sh(cmd, outfile=outfile)
return outfile
def multihistogram(args):
"""
%prog multihistogram *.histogram species
Plot the histogram based on a set of K-mer hisotograms. The method is based
on Star et al.'s method (Atlantic Cod genome paper).
"""
p = OptionParser(multihistogram.__doc__)
p.add_option("--kmin", default=15, type="int",
help="Minimum K-mer size, inclusive")
p.add_option("--kmax", default=30, type="int",
help="Maximum K-mer size, inclusive")
p.add_option("--vmin", default=2, type="int",
help="Minimum value, inclusive")
p.add_option("--vmax", default=100, type="int",
help="Maximum value, inclusive")
opts, args, iopts = p.set_image_options(args, figsize="10x5", dpi=300)
histfiles = args[:-1]
species = args[-1]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([.08, .12, .38, .76])
B = fig.add_axes([.58, .12, .38, .76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
x, y = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split(".")[0].split("-")[-1])
if not opts.kmin <= K <= opts.kmax:
continue
line, = A.plot(x, y, '-', lw=1)
lines.append(line)
legends.append("K = {0}".format(K))
ks.analyze(K=K)
genomesizes.append((K, ks.genomesize / 1e6))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(.5)
title = "{0} genome K-mer histogram".format(species)
A.set_title(markup(title))
xlabel, ylabel = "Coverage (X)", "Counts"
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = "{0} genome size estimate".format(species)
B.set_title(markup(title))
x, y = zip(*genomesizes)
B.plot(x, y, "ko", mfc='w')
t = np.linspace(opts.kmin - .5, opts.kmax + .5, 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), "r:")
xlabel, ylabel = "K-mer size", "Estimated genome size (Mb)"
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_helvetica(B)
labels = ((.04, .96, 'A'), (.54, .96, 'B'))
panel_labels(root, labels)
normalize_axes(root)
imagename = species + ".multiK.pdf"
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
def histogram(args):
"""
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic. Find out totalKmers when running
kmer.meryl().
"""
p = OptionParser(histogram.__doc__)
p.add_option("--vmin", dest="vmin", default=1, type="int",
help="minimum value, inclusive [default: %default]")
p.add_option("--vmax", dest="vmax", default=100, type="int",
help="maximum value, inclusive [default: %default]")
p.add_option("--pdf", default=False, action="store_true",
help="Print PDF instead of ASCII plot [default: %default]")
p.add_option("--coverage", default=0, type="int",
help="Kmer coverage [default: auto]")
p.add_option("--nopeaks", default=False, action="store_true",
help="Do not annotate K-mer peaks")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
histfile, species, N = args
ascii = not opts.pdf
peaks = not opts.nopeaks
N = int(N)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = ks.max2 if not coverage else coverage
Genome_size = int(round(Total_Kmers * 1. / Kmer_coverage))
Total_Kmers_msg = "Total {0}-mers: {1}".format(N, thousands(Total_Kmers))
Kmer_coverage_msg = "{0}-mer coverage: {1}".format(N, Kmer_coverage)
Genome_size_msg = "Estimated genome size: {0:.1f}Mb".\
format(Genome_size / 1e6)
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print >> sys.stderr, msg
x, y = ks.get_xy(opts.vmin, opts.vmax)
title = "{0} genome {1}-mer histogram".format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, 'g-', lw=2, alpha=.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for x, y in ks.counts if x in t]
x, y = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, 'ko', lw=2, mec='k', mfc='w')
ax.text(ks.max1, tcounts[ks.max1], "SNP peak", va="top")
ax.text(ks.max2, tcounts[ks.max2], "Main peak")
tc = "gray"
axt = ax.transAxes
ax.text(.95, .95, Total_Kmers_msg, color=tc, transform=axt, ha="right")
ax.text(.95, .9, Kmer_coverage_msg, color=tc, transform=axt, ha="right")
ax.text(.95, .85, Genome_size_msg, color=tc, transform=axt, ha="right")
ax.text(.95, .8, Repetitive_msg, color=tc, transform=axt, ha="right")
ax.text(.95, .75, SNPrate_msg, color=tc, transform=axt, ha="right")
ymin, ymax = ax.get_ylim()
ymax = ymax * 7 / 6
ax.set_title(markup(title), color='r')
ax.set_ylim((ymin, ymax))
xlabel, ylabel = "Coverage (X)", "Counts"
ax.set_xlabel(xlabel, color='r')
ax.set_ylabel(ylabel, color='r')
set_human_axis(ax)
imagename = histfile.split(".")[0] + ".pdf"
savefig(imagename, dpi=100)
return Genome_size
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The DirichletMultinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
__all__ = [
"DirichletMultinomial",
]
_dirichlet_multinomial_sample_note = """For each batch of counts,
`value = [n_0, ..., n_{k-1}]`, `P[value]` is the probability that after
sampling `self.total_count` draws from this Dirichlet-Multinomial distribution,
the number of draws falling in class `j` is `n_j`. Since this definition is
[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables);
different sequences have the same counts so the probability includes a
combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.concentration` and `self.total_count`."""
class DirichletMultinomial(distribution.Distribution):
"""Dirichlet-Multinomial compound distribution.
The Dirichlet-Multinomial distribution is parameterized by a (batch of)
length-`k` `concentration` vectors (`k > 1`) and a `total_count` number of
trials, i.e., the number of trials per draw from the DirichletMultinomial. It
is defined over a (batch of) length-`k` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is
identically the Beta-Binomial distribution when `k = 2`.
#### Mathematical Details
The Dirichlet-Multinomial is a distribution over `k`-class counts, i.e., a
length-`k` vector of non-negative integer `counts = n = [n_0, ..., n_{k-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z
Z = Beta(alpha) / N!
```
where:
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `total_count = N`, `N` a positive integer,
* `N!` is `N` factorial, and,
* `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the
[multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
Dirichlet-Multinomial is a [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its
samples are generated as follows.
1. Choose class probabilities:
`probs = [p_0,...,p_{k-1}] ~ Dir(concentration)`
2. Draw integers:
`counts = [n_0,...,n_{k-1}] ~ Multinomial(total_count, probs)`
The last `concentration` dimension parametrizes a single Dirichlet-Multinomial
distribution. When calling distribution functions (e.g., `dist.prob(counts)`),
`concentration`, `total_count` and `counts` are broadcast to the same shape.
The last dimension of of `counts` corresponds single Dirichlet-Multinomial
distributions.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
alpha = [1, 2, 3]
n = 2
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0, 0, 2]
dist.prob(counts) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.
counts = [[1, 1, 0], [1, 0, 1]]
dist.prob(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
n = [3, 3]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
counts = [2, 1, 0]
dist.prob(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of concentration once int
# allowed.
def __init__(self,
total_count,
concentration,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
total_count: Non-negative floating point tensor, whose dtype is the same
as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with
`m >= 0`. Defines this as a batch of `N1 x ... x Nm` different
Dirichlet multinomial distributions. Its components should be equal to
integer values.
concentration: Positive floating point tensor, whose dtype is the
same as `n` with shape broadcastable to `[N1,..., Nm, k]` `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different `k` class Dirichlet
multinomial distributions.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[total_count, concentration]) as ns:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, whereas
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough explicitness.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and concentration.
self._total_count = self._maybe_assert_valid_total_count(
ops.convert_to_tensor(total_count, name="total_count"),
validate_args)
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration,
name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(DirichletMultinomial, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=False,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._total_count,
self._concentration],
name=ns)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def concentration(self):
"""Concentration parameter; expected prior counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
# Event shape depends only on total_concentration, not "n".
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _log_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
ordered_prob = (
special_math_ops.lbeta(self.concentration + counts)
- special_math_ops.lbeta(self.concentration))
return ordered_prob + distribution_util.log_combinations(
self.total_count, counts)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return self.total_count * (self.concentration /
self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""The covariance for each batch member is defined as the following:
```none
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `concentration = alpha` and
`total_concentration = alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```none
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (self.total_count * scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# We must take care to expand back the last dim whenever we use the
# total_concentration.
c0 = self.total_concentration[..., array_ops.newaxis]
return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0))
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_total_count(self, total_count, validate_args):
if not validate_args:
return total_count
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
total_count,
message="total_count must be non-negative."),
distribution_util.assert_integer_form(
total_count,
message="total_count cannot contain fractional values."),
], total_count)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
counts,
message="counts must be non-negative."),
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts last-dimension must sum to `self.total_count`"),
distribution_util.assert_integer_form(
counts,
message="counts cannot contain fractional components."),
], counts)
|
|
from serial import SerialException
from multiprocessing import Process, Pipe, Event
from collections import namedtuple
from time import sleep, time
from struct import pack, unpack
import numpy as np
import logging
import signal
from .. import config as c
from .tamp_serial import *
logger = logging.getLogger('tamproxy.controller')
class PacketController(Process):
# Constants
WINDOW_SIZE = c.host.window_size
ENABLE_TIMEOUT = c.host.enable_timeout
INITIAL_TIMEOUT = c.host.initial_timeout
SRTT_ALPHA = c.host.srtt_alpha
RTTDEV_ALPHA = c.host.rttdev_alpha
SRTT_GAIN = c.host.srtt_gain
RTTDEV_GAIN = c.host.rttdev_gain
SERIAL_RETRIES = c.host.serial_retries
SERIAL_RETRY_TIMEOUT = c.host.serial_retry_timeout
PACK_FORMAT = c.host.pack_format
UNPACK_FORMAT = c.host.unpack_format
RESET_MSG = c.host.reset_msg
START_BYTE = c.packet.start_byte
DEVICELIST_CODE = c.devices.devicelist.code
HELLO_PAYLOAD = c.devices.devicelist.hello_code
compare_pids = lambda self, a,b: np.int16(b - a)
def __init__(self):
self._stop = Event()
self._continuous = Event()
self.tserial = None
self.packet_parser = None
self.pipe_inside, self.pipe_outside = Pipe()
self.continuous_requests = set()
self.weighted_tdma_list = []
self.reset()
super(PacketController, self).__init__()
def reset(self, clear_pipe=False):
self.rtt_smoothed = None
self.rtt_deviation = None
self.timeout = self.INITIAL_TIMEOUT
self.en_route = dict() # pid -> packet, time_sent
self.receiving_buffer = dict() # pid -> sent_packet, payload
self.next_send_pid = np.uint16(0)
self.next_recv_pid = np.uint16(0)
self.weighted_tdma_pos = 0
self.packets_received = 0
if clear_pipe:
while self.pipe_inside.poll():
self.pipe_inside.recv()
def connect(self):
self.reset(True)
raw_hello_packet = self.encode_raw_packet(0, self.DEVICELIST_CODE,
self.HELLO_PAYLOAD)
hello_response_length = len(raw_hello_packet) - 1
self.tserial = TAMPSerial(raw_hello_packet, hello_response_length)
self.packet_parser = PacketParser(self.tserial)
self.pipe_inside.send((None, self.RESET_MSG))
def encode_raw_packet(self, pid, dest, payload):
pack_format = self.PACK_FORMAT.format(len(payload))
length = len(payload) + 5
return pack(pack_format, self.START_BYTE, pid, length, dest, payload)
def set_continuous_enabled(self, bool):
if bool:
self._continuous.set()
logger.info("Enabled continuous requests")
else:
self._continuous.clear()
logger.info("Disabled continuous requests")
def get_new_packet_to_send(self):
# process new requests
while self.pipe_inside.poll():
packet_request = PacketRequest(*self.pipe_inside.recv())
if packet_request.is_continuous:
if not packet_request.remove_continuous:
self.continuous_requests.add(packet_request[:2])
self.weighted_tdma_list += (
[packet_request[:2]] * packet_request.weight)
else:
self.continuous_requests.discard(packet_request[:2])
else:
return packet_request[:2]
# resend exiting continuous_requests
while self.weighted_tdma_list and self._continuous.is_set():
key = self.weighted_tdma_list[self.weighted_tdma_pos]
if key in self.continuous_requests:
self.weighted_tdma_pos += 1
self.weighted_tdma_pos %= len(self.weighted_tdma_list)
return key
else:
# this item was removed
self.weighted_tdma_list.pop(self.weighted_tdma_pos)
if self.weighted_tdma_list:
self.weighted_tdma_pos += 1
self.weighted_tdma_pos %= len(self.weighted_tdma_list)
# nothing else to do
return None
def slide_window(self):
if self.ENABLE_TIMEOUT:
# resend all en_route packets that have timed out
for pid in self.en_route:
packet, time_sent = self.en_route[pid]
dt = time() - time_sent
if dt > self.timeout:
logger.debug("Packet timed out, will retransmit: "
"pid={}, packet={}, timeout={}"
.format(pid, packet, self.timeout)
)
self.en_route[pid] = (packet, time())
self.transmit(pid, *packet[:2])
return
# transmit the next packet
if len(self.en_route) < self.WINDOW_SIZE:
packet = self.get_new_packet_to_send()
if packet:
new_pid = self.next_send_pid
self.en_route[new_pid] = (packet, time())
self.transmit(new_pid, *packet)
self.next_send_pid += np.uint16(1)
def decode_raw_packet(self, raw_packet):
payload_length = len(raw_packet) - 4
unpack_format = self.UNPACK_FORMAT.format(payload_length)
start_byte, pid, length, payload = unpack(unpack_format, raw_packet)
return (np.uint16(pid), payload)
def transmit(self, pid, dest, payload):
if self.tserial.out_waiting > 50:
self.tserial.flush()
raw_packet = self.encode_raw_packet(pid, dest, payload)
self.tserial.write(raw_packet)
def receive(self):
raw_packets = self.packet_parser.receive()
packets = [self.decode_raw_packet(p) for p in raw_packets]
for pid, payload in packets:
if payload != self.HELLO_PAYLOAD:
self.process_packet(pid, payload)
return len(packets)
def process_packet(self, pid, payload):
self.packets_received += 1
if pid not in self.en_route:
logger.debug("Retransmitted packet received: pid={}, payload={}"
.format(pid,payload))
return
sent_packet, time_sent = self.en_route.pop(pid)
if self.ENABLE_TIMEOUT:
self.update_timeout(time_sent)
if self.next_recv_pid == pid:
self.pipe_inside.send((sent_packet, payload))
i = 1
while pid + i in self.receiving_buffer:
self.pipe_inside.send(self.receiving_buffer.pop(pid + i))
i += 1
self.next_recv_pid = pid + np.uint16(i)
elif self.compare_pids(self.next_recv_pid, pid) > 0:
self.receiving_buffer[pid] = (sent_packet, payload)
else: self.pipe_inside.send((sent_packet, payload))
def update_timeout(self, time_sent):
"""
Update the timeout value to use for future packets, by combining the
smoothed round trip time, and the deviation in the round trip time
"""
rtt = time() - time_sent
if self.rtt_smoothed:
self.rtt_smoothed = rtt*self.SRTT_ALPHA + (1-self.SRTT_ALPHA)*self.rtt_smoothed
else:
self.rtt_smoothed = rtt
if self.rtt_deviation:
self.rtt_deviation = (abs(rtt-self.rtt_smoothed)*self.RTTDEV_ALPHA
+ (1-self.RTTDEV_ALPHA)*self.rtt_deviation)
else:
self.rtt_deviation = rtt
self.timeout = self.SRTT_GAIN*self.rtt_smoothed + self.RTTDEV_GAIN*self.rtt_deviation
def stop(self):
logger.info('stop requested')
self._stop.set()
def run(self):
# keyboard interrupt is handled by the main process
signal.signal(signal.SIGINT, signal.SIG_IGN)
i = self.SERIAL_RETRIES
while i >= 0:
try:
logger.info('Connecting to the teensy')
self.connect()
logger.info('Connected!')
i = self.SERIAL_RETRIES
while True:
self.slide_window()
self.receive()
if (self._stop.is_set()
and not self.pipe_inside.poll()
and not self.en_route):
logger.info('stopped')
return
except (IOError, SerialException,
SerialPortUnavailableException) as e:
logger.error(e)
i -= 1
if i == 0:
logger.critical('Giving up, hit maximum serial retries')
return
else:
logger.warn("Retrying connection in 1 second, "
"{} tries left".format(i))
sleep(self.SERIAL_RETRY_TIMEOUT)
continue
except SerialPortEstablishException as e:
logger.critical('Giving up - could not establish a connection')
return
class PacketParser(object):
MAX_PACKET_SIZE = c.packet.max_size
MIN_PACKET_SIZE = c.packet.min_response_size
START_BYTE = c.packet.start_byte
def __init__(self, tserial):
self.tserial = tserial
self.receive_buffer = []
self.error_flag = False
self.receive_length = self.MAX_PACKET_SIZE
self.raw_packets = []
def receive(self):
self.raw_packets = []
while True:
new_byte = self.tserial.read()
if not new_byte: break
if new_byte == chr(self.START_BYTE) and not self.receive_buffer:
# Starting a new raw packet
self.error_flag = False
self.receive_length = self.MAX_PACKET_SIZE
self.process_byte(new_byte)
elif not self.error_flag:
if self.receive_buffer: self.process_byte(new_byte)
else:
if new_byte in c.serial_errors:
self.raise_error_flag("Firmware: {}".format(
c.serial_errors[new_byte].msg))
else:
self.raise_error_flag("{}: {}".format(
c.serial_errors.N.msg, ord(new_byte)))
return self.raw_packets
def process_byte(self, byte):
self.receive_buffer.append(byte)
if len(self.receive_buffer) == 4:
if ord(byte) > self.MAX_PACKET_SIZE:
self.raise_error_flag("Specified response length is too long")
if ord(byte) < self.MIN_PACKET_SIZE:
self.raise_error_flag("Specified response length is too short")
else: self.receive_length = ord(byte)
elif len(self.receive_buffer) == self.receive_length:
# the packet is done
self.raw_packets.append("".join(self.receive_buffer))
self.receive_buffer = []
def raise_error_flag(self, msg):
logger.error(msg)
self.error_flag = True
self.receive_buffer = []
PacketRequest = namedtuple('PacketRequest',
["dest",
"payload",
"is_continuous",
"weight",
"remove_continuous"])
PacketRequest.__new__.__defaults__ = (False, 1, False)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import adjacency
class adjacencies(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS adjacencies.
"""
__slots__ = ("_path_helper", "_extmethods", "__adjacency")
_yang_name = "adjacencies"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__adjacency = YANGDynClass(
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"adjacencies",
]
def _get_adjacency(self):
"""
Getter method for adjacency, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies/adjacency (list)
YANG Description: List of the local system's IS-IS adjacencies.
"""
return self.__adjacency
def _set_adjacency(self, v, load=False):
"""
Setter method for adjacency, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies/adjacency (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_adjacency is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adjacency() directly.
YANG Description: List of the local system's IS-IS adjacencies.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """adjacency must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("system_id",adjacency.adjacency, yang_name="adjacency", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='system-id', extensions=None), is_container='list', yang_name="adjacency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__adjacency = t
if hasattr(self, "_set"):
self._set()
def _unset_adjacency(self):
self.__adjacency = YANGDynClass(
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
adjacency = __builtin__.property(_get_adjacency)
_pyangbind_elements = OrderedDict([("adjacency", adjacency)])
from . import adjacency
class adjacencies(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS adjacencies.
"""
__slots__ = ("_path_helper", "_extmethods", "__adjacency")
_yang_name = "adjacencies"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__adjacency = YANGDynClass(
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"adjacencies",
]
def _get_adjacency(self):
"""
Getter method for adjacency, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies/adjacency (list)
YANG Description: List of the local system's IS-IS adjacencies.
"""
return self.__adjacency
def _set_adjacency(self, v, load=False):
"""
Setter method for adjacency, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies/adjacency (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_adjacency is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adjacency() directly.
YANG Description: List of the local system's IS-IS adjacencies.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """adjacency must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("system_id",adjacency.adjacency, yang_name="adjacency", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='system-id', extensions=None), is_container='list', yang_name="adjacency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__adjacency = t
if hasattr(self, "_set"):
self._set()
def _unset_adjacency(self):
self.__adjacency = YANGDynClass(
base=YANGListType(
"system_id",
adjacency.adjacency,
yang_name="adjacency",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="system-id",
extensions=None,
),
is_container="list",
yang_name="adjacency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
adjacency = __builtin__.property(_get_adjacency)
_pyangbind_elements = OrderedDict([("adjacency", adjacency)])
|
|
# tests.test_classifier.test_confusion_matrix
# Tests for the confusion matrix visualizer
#
# Aithor: Neal Humphrey
# Author: Benjamin Bengfort
# Created: Tue May 03 11:05:11 2017 -0700
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_confusion_matrix.py [] benjamin@bengfort.com $
"""
Tests for the confusion matrix visualizer
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
import yellowbrick as yb
import numpy.testing as npt
import matplotlib.pyplot as plt
from yellowbrick.exceptions import ModelError
from yellowbrick.datasets import load_occupancy
from yellowbrick.classifier.confusion_matrix import *
from unittest.mock import patch
from tests.fixtures import Dataset, Split
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveRegressor
from sklearn.model_selection import train_test_split as tts
from sklearn.datasets import load_digits, make_classification
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def digits(request):
"""
Creates a fixture of train and test splits for the sklearn digits dataset
For ease of use returns a Dataset named tuple composed of two Split tuples.
"""
data = load_digits()
X_train, X_test, y_train, y_test = tts(
data.data, data.target, test_size=0.2, random_state=11
)
# Set a class attribute for digits
request.cls.digits = Dataset(Split(X_train, X_test), Split(y_train, y_test))
##########################################################################
## Test Cases
##########################################################################
@pytest.mark.usefixtures("digits")
class TestConfusionMatrix(VisualTestCase):
"""
Test ConfusionMatrix visualizer
"""
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_confusion_matrix(self):
"""
Integration test on digits dataset with LogisticRegression
"""
_, ax = plt.subplots()
model = LogisticRegression(random_state=93)
cm = ConfusionMatrix(model, ax=ax, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
self.assert_images_similar(cm, tol=10)
# Ensure correct confusion matrix under the hood
npt.assert_array_equal(
cm.confusion_matrix_,
np.array(
[
[38, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 35, 0, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 39, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 38, 0, 0, 0, 0, 3, 0],
[0, 0, 0, 0, 40, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 27, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 29, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 35, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 31, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 36],
]
),
)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_no_classes_provided(self):
"""
Integration test on digits dataset with GaussianNB, no classes
"""
_, ax = plt.subplots()
model = GaussianNB()
cm = ConfusionMatrix(model, ax=ax, classes=None)
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
self.assert_images_similar(cm, tol=10)
# Ensure correct confusion matrix under the hood
npt.assert_array_equal(
cm.confusion_matrix_,
np.array(
[
[36, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 31, 0, 0, 0, 0, 0, 1, 3, 2],
[0, 1, 34, 0, 0, 0, 0, 0, 4, 0],
[0, 1, 0, 33, 0, 2, 0, 2, 3, 0],
[0, 0, 0, 0, 36, 0, 0, 5, 0, 0],
[0, 0, 0, 0, 0, 27, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 28, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 36, 0, 0],
[0, 3, 0, 1, 0, 1, 0, 4, 25, 0],
[1, 2, 0, 0, 1, 0, 0, 8, 3, 22],
]
),
)
def test_fontsize(self):
"""
Test confusion matrix with smaller fontsize on digits dataset with SVC
"""
_, ax = plt.subplots()
model = SVC(random_state=93)
cm = ConfusionMatrix(model, ax=ax, fontsize=8)
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
self.assert_images_similar(cm, tol=10)
def test_percent_mode(self):
"""
Test confusion matrix in percent mode on digits dataset with SVC
"""
_, ax = plt.subplots()
model = SVC(random_state=93)
cm = ConfusionMatrix(model, ax=ax, percent=True)
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
self.assert_images_similar(cm, tol=10)
# Ensure correct confusion matrix under the hood
npt.assert_array_equal(
cm.confusion_matrix_,
np.array(
[
[38, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 37, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 39, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 38, 0, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 41, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 27, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 30, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 35, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 34, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 36],
]
),
)
@pytest.mark.xfail(reason="class filtering is not currently supported")
def test_class_filter_eg_zoom_in(self):
"""
Test filtering classes zooms in on the confusion matrix.
"""
_, ax = plt.subplots()
model = LogisticRegression(random_state=93)
cm = ConfusionMatrix(model, ax=ax, classes=[0, 1, 2])
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
self.assert_images_similar(cm, tol=10)
# Ensure correct confusion matrix under the hood
npt.assert_array_equal(
cm.confusion_matrix_, np.array([[38, 0, 0], [0, 35, 0], [0, 0, 39]])
)
def test_extra_classes(self):
"""
Assert that any extra classes raise an exception
"""
model = LogisticRegression(random_state=93)
cm = ConfusionMatrix(model, classes=[0, 1, 2, 11])
with pytest.raises(ModelError, match="could not decode"):
cm.fit(self.digits.X.train, self.digits.y.train)
def test_defined_mapping(self):
"""
Test mapping as label encoder to define tick labels
"""
_, ax = plt.subplots()
model = LogisticRegression(random_state=93)
classes = np.array(
[
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
)
mapping = {
0: "zero",
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
}
cm = ConfusionMatrix(model, ax=ax, encoder=mapping)
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
xlabels = np.array([l.get_text() for l in ax.get_xticklabels()])
npt.assert_array_equal(xlabels, classes)
ylabels = [l.get_text() for l in ax.get_yticklabels()]
ylabels.reverse()
ylabels = np.asarray(ylabels)
npt.assert_array_equal(ylabels, classes)
def test_inverse_mapping(self):
"""
Test LabelEncoder as label encoder to define tick labels
"""
_, ax = plt.subplots()
model = LogisticRegression(random_state=93)
le = LabelEncoder()
le.fit(
[
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
)
cm = ConfusionMatrix(model, ax=ax, encoder=le)
cm.fit(self.digits.X.train, self.digits.y.train)
cm.score(self.digits.X.test, self.digits.y.test)
xlabels = np.array([l.get_text() for l in ax.get_xticklabels()])
npt.assert_array_equal(xlabels, le.classes_)
ylabels = [l.get_text() for l in ax.get_yticklabels()]
ylabels.reverse()
ylabels = np.asarray(ylabels)
npt.assert_array_equal(ylabels, le.classes_)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test with Pandas DataFrame and Series input
"""
_, ax = plt.subplots()
# Load the occupancy dataset from fixtures
X, y = load_occupancy(return_dataset=True).to_pandas()
# Create train/test splits
splits = tts(X, y, test_size=0.2, random_state=8873)
X_train, X_test, y_train, y_test = splits
# Create confusion matrix
model = GaussianNB()
cm = ConfusionMatrix(model, ax=ax, classes=None)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
self.assert_images_similar(cm, tol=0.1)
# Ensure correct confusion matrix under the hood
npt.assert_array_equal(cm.confusion_matrix_, np.array([[3012, 114], [1, 985]]))
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_quick_method(self):
"""
Test the quick method with a random dataset
"""
X, y = make_classification(
n_samples=400,
n_features=20,
n_informative=8,
n_redundant=8,
n_classes=2,
n_clusters_per_class=4,
random_state=27,
)
_, ax = plt.subplots()
model = DecisionTreeClassifier(random_state=25)
# compare the images
visualizer = confusion_matrix(model, X, y, ax=ax, show=False)
self.assert_images_similar(visualizer)
def test_isclassifier(self):
"""
Assert that only classifiers can be used with the visualizer.
"""
model = PassiveAggressiveRegressor()
message = (
"This estimator is not a classifier; "
"try a regression or clustering score visualizer instead!"
)
with pytest.raises(yb.exceptions.YellowbrickError, match=message):
ConfusionMatrix(model)
def test_score_returns_score(self):
"""
Test that ConfusionMatrix score() returns a score between 0 and 1
"""
# Load the occupancy dataset from fixtures
X, y = load_occupancy(return_dataset=True).to_numpy()
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42)
# Create and fit the visualizer
visualizer = ConfusionMatrix(LogisticRegression())
visualizer.fit(X_train, y_train)
# Score the visualizer
s = visualizer.score(X_test, y_test)
assert 0 <= s <= 1
def test_with_fitted(self):
"""
Test that visualizer properly handles an already-fitted model
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
model = LogisticRegression().fit(X, y)
classes = ["unoccupied", "occupied"]
with patch.object(model, "fit") as mockfit:
oz = ConfusionMatrix(model, classes=classes)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = ConfusionMatrix(model, classes=classes, is_fitted=True)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = ConfusionMatrix(model, classes=classes, is_fitted=False)
oz.fit(X, y)
mockfit.assert_called_once_with(X, y)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column as tpu_fc
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
fc._LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
class SharedEmbeddingColumnTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.initializer)
self.assertIsNotNone(embedding_column_b.initializer)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a._var_scope_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.initializer())
self.assertEqual('my_initializer', embedding_column_b.initializer())
self.assertEqual('var_scope_name',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('var_scope_name',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('var_scope_name', embedding_column_a._var_scope_name)
self.assertEqual('var_scope_name', embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
@test_util.deprecated_graph_mode_only
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a._get_dense_tensor(
fc._LazyBuilder(input_features))
embedding_lookup_b = embedding_column_b._get_dense_tensor(
fc._LazyBuilder(input_features))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
"""Test of "New Hunt" wizard."""
from grr.gui import runtests_test
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flags
from grr.lib import output_plugin
from grr.lib import test_lib
from grr.lib.flows.general import processes
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import foreman as rdf_foreman
from grr.lib.rdfvalues import paths as rdf_paths
class DummyOutputPlugin(output_plugin.OutputPlugin):
"""An output plugin that sends an email for each response received."""
name = "dummy"
description = "Dummy do do."
args_type = processes.ListProcessesArgs
class TestNewHuntWizard(test_lib.GRRSeleniumTest):
"""Test the Cron view GUI."""
@staticmethod
def FindForemanRules(hunt, token):
fman = aff4.FACTORY.Open("aff4:/foreman", mode="r", aff4_type="GRRForeman",
token=token)
hunt_rules = []
rules = fman.Get(fman.Schema.RULES, [])
for rule in rules:
for action in rule.actions:
if action.hunt_id == hunt.urn:
hunt_rules.append(rule)
return hunt_rules
@staticmethod
def CreateHuntFixtureWithTwoClients():
token = access_control.ACLToken(username="test", reason="test")
# Ensure that clients list is empty
root = aff4.FACTORY.Open(aff4.ROOT_URN, token=token)
for client_urn in root.ListChildren():
if aff4.VFSGRRClient.CLIENT_ID_RE.match(client_urn.Basename()):
data_store.DB.DeleteSubject(client_urn, token=token)
# Add 2 distinct clients
client_id = "C.1%015d" % 0
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_id), "VFSGRRClient",
token=token)
fd.Set(fd.Schema.SYSTEM("Windows"))
fd.Set(fd.Schema.CLOCK(2336650631137737))
fd.Close()
client_id = "C.1%015d" % 1
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_id), "VFSGRRClient",
token=token)
fd.Set(fd.Schema.SYSTEM("Linux"))
fd.Set(fd.Schema.CLOCK(2336650631137737))
fd.Close()
def setUp(self):
super(TestNewHuntWizard, self).setUp()
with self.ACLChecksDisabled():
# Create a Foreman with an empty rule set.
with aff4.FACTORY.Create("aff4:/foreman", "GRRForeman", mode="rw",
token=self.token) as self.foreman:
self.foreman.Set(self.foreman.Schema.RULES())
self.foreman.Close()
def testNewHuntWizard(self):
with self.ACLChecksDisabled():
self.CreateHuntFixtureWithTwoClients()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=ManageHunts]")
self.Click("css=a[grrtarget=ManageHunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsTextPresent, "What to run?")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > ins.jstree-icon")
self.Click("css=#_Filesystem > ins.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=.Wizard input[id=args-paths-0]")
# Change "path" and "pathtype" values
self.Type("css=.Wizard input[id=args-paths-0]", "/tmp")
self.Select("css=.Wizard select[id=args-pathtype]", "TSK")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=.Wizard button.Back")
self.WaitUntil(self.IsElementPresent,
"css=.Wizard input#args-paths-0")
self.assertEqual("/tmp", self.GetValue(
"css=.Wizard input#args-paths-0"))
self.assertEqual(
"TSK", self.GetSelectedLabel("css=.Wizard select#args-pathtype"))
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.Click("css=.Wizard button:contains('Add Output Plugin')")
# Configure the hunt to send an email on results.
self.Select("css=.Wizard select[id=output_1-option]",
"Send an email for each result.")
self.Type("css=.Wizard input[id=output_1-email_address]",
"test@%s" % config_lib.CONFIG["Logging.domain"])
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Create 3 foreman rules
self.WaitUntil(
self.IsElementPresent,
"css=.Wizard select[id=rule_1-option]")
self.Select("css=.Wizard select[id=rule_1-option]",
"Regular Expressions")
self.Select("css=.Wizard select[id=rule_1-attribute_name]",
"System")
self.Type("css=.Wizard input[id=rule_1-attribute_regex]",
"Linux")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_2-option]",
"Integer Rule")
self.Select("css=.Wizard select[id=rule_2-attribute_name]",
"Clock")
self.Select("css=.Wizard select[id=rule_2-operator]",
"GREATER_THAN")
self.Type("css=.Wizard input[id=rule_2-value]",
"1336650631137737")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_3-option]",
"OSX")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
# Click on "Back" button
self.Click("css=.Wizard button.Back")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Click on "Next" button again and check that all the values that we've just
# entered remain intact.
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Review")
# Check that the arguments summary is present.
self.WaitUntil(self.IsTextPresent, "Paths")
self.WaitUntil(self.IsTextPresent, "/tmp")
# Check that output plugins are shown.
self.assertTrue(self.IsTextPresent("EmailOutputPlugin"))
self.assertTrue(self.IsTextPresent("test@%s" %
config_lib.CONFIG["Logging.domain"]))
# Check that rules summary is present.
self.assertTrue(self.IsTextPresent("Regex rules"))
# Click on "Run" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent,
"Hunt was created!")
# Close the window and check that cron job object was created.
self.Click("css=button.Finish")
# Select newly created cron job.
self.Click("css=td:contains('GenericHunt')")
# Check that correct details are displayed in cron job details tab.
self.WaitUntil(self.IsTextPresent, "GenericHunt")
self.WaitUntil(self.IsTextPresent, "Flow args")
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
self.assertTrue(self.IsTextPresent("EmailOutputPlugin"))
self.assertTrue(self.IsTextPresent("test@%s" %
config_lib.CONFIG["Logging.domain"]))
# Check that the hunt object was actually created
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren())
self.assertEqual(len(hunts_list), 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.state.args.flow_runner_args.flow_name,
"FileFinder")
self.assertEqual(hunt.state.args.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.state.args.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.state.args.flow_args.ignore_errors, True)
self.assertTrue(hunt.state.args.output_plugins[0].plugin_name,
"EmailOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")
# Now try to start the hunt.
self.Click("css=button[name=RunHunt]")
# Note that hunt ACL controls are already tested in acl_manager_test.py.
# Run the hunt.
with self.ACLChecksDisabled():
with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as hunt:
hunt.Run()
# Check that the hunt was created with correct rules
with self.ACLChecksDisabled():
hunt_rules = self.FindForemanRules(hunt, token=self.token)
self.assertEqual(len(hunt_rules), 1)
self.assertTrue(
abs(int(hunt_rules[0].expires - hunt_rules[0].created) -
31 * 24 * 60 * 60) <= 1)
self.assertEqual(len(hunt_rules[0].regex_rules), 2)
self.assertEqual(hunt_rules[0].regex_rules[0].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_name, "System")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_regex, "Linux")
self.assertEqual(hunt_rules[0].regex_rules[1].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[1].attribute_name, "System")
self.assertEqual(hunt_rules[0].regex_rules[1].attribute_regex, "Darwin")
self.assertEqual(len(hunt_rules[0].integer_rules), 1)
self.assertEqual(hunt_rules[0].integer_rules[0].path, "/")
self.assertEqual(hunt_rules[0].integer_rules[0].attribute_name, "Clock")
self.assertEqual(hunt_rules[0].integer_rules[0].operator,
rdf_foreman.ForemanAttributeInteger.Operator.GREATER_THAN)
self.assertEqual(hunt_rules[0].integer_rules[0].value, 1336650631137737)
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# There should be no dummy output plugin visible.
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.WaitUntilNot(self.IsTextPresent, "Dummy do do")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider({
"AdminUI.new_hunt_wizard.default_output_plugin":
"DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Dummy output plugin should be added by default.
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.WaitUntil(self.IsTextPresent, "Dummy do do")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", aff4_type="VFSGRRClient",
mode="rw", token=self.token) as client:
client.AddLabels("foo", owner="owner1")
client.AddLabels("bar", owner="owner2")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to output plugins page.
self.Click("css=.Wizard button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=.Wizard button.Next")
# Select 'Clients With Label' rule.
self.Select("css=.Wizard select[id=rule_1-option]", "Clients With Label")
# Check that there's an option present for labels 'bar' (this option should
# be selected) and for label 'foo'.
self.WaitUntil(self.IsElementPresent,
"css=.Wizard select[id=rule_1] option:selected[value=bar]")
self.WaitUntil(self.IsElementPresent,
"css=.Wizard select[id=rule_1] option[value=bar]")
def testLabelsHuntRuleCreatesForemanRegexRuleInResultingHunt(self):
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", mode="rw",
token=self.token) as client:
client.AddLabels("foo", owner="test")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page.
self.Click("css=.Wizard button.Next")
# Click 'Next' to go to the hunt rules page.
self.Click("css=.Wizard button.Next")
# Select 'Clients With Label' rule.
self.Select("css=.Wizard select[id=rule_1-option]", "Clients With Label")
self.Select("css=.Wizard select[id=rule_1]", "foo")
# Click 'Next' to go to the hunt overview page. Check that generated regexp
# is displayed there.
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "(.+,|\\A)foo(,.+|\\Z)")
# Click 'Next' to go to submit the hunt and wait until it's created.
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Hunt was created!")
# Get hunt's rules.
with self.ACLChecksDisabled():
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren(mode="rw"))
hunt = hunts_list[0]
hunt.Run() # Run the hunt so that rules are added to the foreman.
hunt_rules = self.FindForemanRules(hunt, token=self.token)
self.assertEqual(len(hunt_rules), 1)
self.assertEqual(len(hunt_rules[0].regex_rules), 1)
self.assertEqual(hunt_rules[0].regex_rules[0].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_name, "Labels")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_regex,
"(.+,|\\A)foo(,.+|\\Z)")
def testLabelsHuntRuleMatchesCorrectClients(self):
with self.ACLChecksDisabled():
client_ids = self.SetupClients(10)
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", mode="rw",
token=self.token) as client:
client.AddLabels("foo", owner="owner1")
client.AddLabels("bar", owner="owner2")
with aff4.FACTORY.Open("C.0000000000000007", mode="rw",
token=self.token) as client:
client.AddLabels("bar", owner="GRR")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page and then to hunt rules page.
self.Click("css=.Wizard button.Next")
self.Click("css=.Wizard button.Next")
# Select 'Clients With Label' rule.
self.Select("css=.Wizard select[id=rule_1-option]", "Clients With Label")
self.Select("css=.Wizard select[id=rule_1]", "foo")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=.Wizard button.Next")
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Hunt was created!")
with self.ACLChecksDisabled():
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren(mode="rw"))
hunt = hunts_list[0]
hunt.Run() # Run the hunt so that rules are added to the foreman.
foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token)
for client_id in client_ids:
foreman.AssignTasksToClient(client_id)
# Check that hunt flow was started only on labeled clients.
for client_id in client_ids:
flows_count = len(list(aff4.FACTORY.Open(
client_id.Add("flows"), token=self.token).ListChildren()))
if (client_id == rdf_client.ClientURN("C.0000000000000001") or
client_id == rdf_client.ClientURN("C.0000000000000007")):
self.assertEqual(flows_count, 1)
else:
self.assertEqual(flows_count, 0)
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
# -*- coding: utf-8 -*-
"""
docker_registry.drivers.speedy
~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a speedy based driver.
"""
import gevent
import threading
import os
import random
import string
import logging
import time
from . import speedy_pyclient
from docker_registry.core import driver
from docker_registry.core import exceptions
logger = logging.getLogger(__name__)
class _TempFile(object):
def __init__(self, mode='w+b', prefix='/tmp/'):
self.prefix = prefix
self.name = self._gen_file_name()
self.file = None
if self.name:
self.file = open(self.name, mode)
def _gen_file_name(self):
while True:
name = self.prefix + string.join(random.sample(['z','y','x','w','v','u','t','s','r','q','p','o',
'n','m','l','k','j','i','h','g','f','e','d','c',
'b','a'], 5)).replace(' ','')
if os.path.exists(name):
continue
else:
return name
def close(self):
if self.file:
self.file.close()
os.remove(self.name)
class _SpeedyMultiPartUploadContext(object):
def __init__(self, path, conn, fragment_size, tmpdir):
self.path = path
self.conn = conn
self.fragment_size = fragment_size
self.tmpdir = tmpdir
self.fragment_tmp_file = []
self.cur_fragment = 0
self.cur_offset = 0
self._lock = threading.Lock()
self.success_parts = 0
self.failed_parts = 0
def _refresh_part_completed(self, status):
try:
self._lock.acquire()
if status == 0:
self.success_parts += 1
else:
self.failed_parts += 1
finally:
self._lock.release()
def _upload_part(self, fragment_index, bytes_range, last_fragment=False):
logger.debug("spawn _upload_part, path:%s, index:%d, bytesrange:%d-%d" % \
(self.path, fragment_index, bytes_range[0], bytes_range[1]))
f = self.fragment_tmp_file[fragment_index].file
f.seek(0)
try:
resp = self.conn.upload(self.path, data=f, fragment_index=fragment_index, bytes_range=bytes_range,
is_last=last_fragment)
if resp.status_code == 200:
logger.debug("_upload_part success, path:%s, index: %d" % \
(self.path, fragment_index))
self._refresh_part_completed(0)
else:
logger.warning("_upload_part failed, path: %s, index: %d, statuscode: %d" % \
(self.path, fragment_index, resp.status_code))
self._refresh_part_completed(1)
except:
logger.error("upload part failed, path:%s, index:%d, bytesrange:%d-%d" % \
(self.path, fragment_index, bytes_range[0], bytes_range[1]))
self._refresh_part_completed(2)
# delete tmp file after uploaded
self.fragment_tmp_file[fragment_index].close()
self.fragment_tmp_file[fragment_index] = None
logger.debug("exit upload_part, path: %s, fragment: %d" % (self.path, fragment_index))
def push_content(self, buf, more_data=True):
if len(self.fragment_tmp_file) <= self.cur_fragment:
self.fragment_tmp_file.append(_TempFile(mode='w+b', prefix=self.tmpdir))
f = self.fragment_tmp_file[self.cur_fragment].file
# seek to file end, and write data to file
f.seek(0, 2)
f.write(buf)
fsize = f.tell()
#print "after push_content, fsize:%d" % fsize
logger.debug("after push content fsize: %d" % fsize)
if fsize >= self.fragment_size or not more_data:
# upload this fragment
f.flush()
logger.debug("begin spawn upload part, fragment: %d" % self.cur_fragment)
gevent.spawn(self._upload_part, self.cur_fragment, (self.cur_offset, self.cur_offset + fsize),
not more_data)
logger.debug("end spawn upload part, fragment: %d" % self.cur_fragment)
self.cur_offset += fsize
self.cur_fragment += 1
def _check_error(self):
if self.failed_parts > 0:
self.conn.delete(self.path)
raise exceptions.UnspecifiedError("speedy upload failed")
def _finished_count(self):
try:
self._lock.acquire()
return self.success_parts + self.failed_parts
finally:
self._lock.release()
def wait_all_part_complete(self):
jobs = self.cur_fragment
logger.debug("begin wait all part finished. all jobs: %d" % jobs)
while True:
gevent.sleep(0.1)
if self._finished_count() >= jobs:
self._check_error()
return
class _SpeedyMultiPartDownloadContext(object):
iter_chunk_size = 16 * 1024
def __init__(self, path, conn, tmpdir):
self.path = path
self.conn = conn
self.tmpdir = tmpdir
self._fragment_tempfiles = []
self._cur_read_fragment = 0
self._cursor = 0
self._max_completed_index = 0
self._max_completed_byte = 0
self._max_completed_lock = threading.Lock()
self._error_lock = threading.Lock()
self._error = False
self._last_read_time = time.time()
self._last_read_lock = threading.Lock()
self._spawn_downloader()
def clear(self):
for f in self._fragment_tempfiles:
if f:
f.close()
def _get_last_read_time(self):
try:
self._last_read_lock.acquire()
return self._last_read_time
finally:
self._last_read_lock.release()
def _update_last_read_time(self):
try:
self._last_read_lock.acquire()
self._last_read_time = time.time()
finally:
self._last_read_lock.release()
def _set_error(self):
self._error_lock.acquire()
self._error = True
self._error_lock.release()
def _get_error(self):
try:
self._error_lock.acquire()
return self._error
finally:
self._error_lock.release()
def _downloader(self, fragments):
logger.debug("begin download multi parts.")
for fragment in fragments:
fragment_index = fragment['Index']
fragment_begin = fragment['Start']
fragment_end = fragment['End']
try:
logger.debug("begin download part fragment_index: %d" % fragment_index)
resp = self.conn.download(self.path, fragment_index, (fragment_begin, fragment_end), stream=False)
if resp.status_code == 200:
content = resp.content
self._fragment_tempfiles[fragment_index] = _TempFile(mode='w+b', prefix=self.tmpdir)
f = self._fragment_tempfiles[fragment_index].file
f.write(content)
# seek to 0, ready to be read
f.seek(0)
self._refresh_max_completed_byte(fragment_index, fragment_end)
logger.debug("download part success!!! fragment_index: %d" % fragment_index)
elif resp.status_code == 404:
logger.debug("download part, file not found, path: %s, frament_index: %d" % (self.path, fragment_index))
raise exceptions.FileNotFoundError("fetch_part FileNotFound!")
else:
logger.debug("mark else code: %d" % resp.status_code)
raise exceptions.UnspecifiedError("unexcept status code: %d" % resp.status_code)
except:
logger.error("download part failed, path: %s, index: %d" % (self.path, fragment_index))
self._set_error()
return
# docker read timeout
now = time.time()
if now - self._get_last_read_time() > 300:
logger.debug("speedy long time no read, reader maybe exited!!!")
self._set_error()
self.clear()
return
logger.debug("end download multi parts.")
def _spawn_downloader(self):
fsize, fragments = self.conn.getfileinfo(self.path)
self._fsize = fsize
if not fragments:
return
self._completed = [0] * len(fragments)
for i in range(0, len(fragments)):
self._fragment_tempfiles.append(None)
gevent.spawn(self._downloader, fragments)
def _refresh_max_completed_byte(self, fragment_index, fragment_end):
try:
self._max_completed_lock.acquire()
self._completed[fragment_index] = (fragment_index, fragment_end)
self._max_completed_index = fragment_index
self._max_completed_byte = fragment_end
finally:
self._max_completed_lock.release()
def _get_max_completed_byte(self):
try:
self._max_completed_lock.acquire()
return self._max_completed_byte
finally:
self._max_completed_lock.release()
def read(self, size):
if self._cursor >= self._fsize:
# Read completed and delete all tmpfiles
self.clear()
return ''
# wait data
if self._cursor + size > self._get_max_completed_byte():
while self._cursor >= self._get_max_completed_byte():
gevent.sleep(0.1)
if self._get_error():
break
if self._get_error():
self.clear()
raise RuntimeError("download failed, error flag is on. path: %s" % self.path)
# update last read time
logger.debug("speedy update last read time")
self._update_last_read_time()
logger.debug("max_completed bytes:%d, cur_fragment:%d" % (self._get_max_completed_byte(), self._cur_read_fragment))
sz = size
cur_fragment_info = self._completed[self._cur_read_fragment]
cur_fragment_left = cur_fragment_info[1] - self._cursor
if cur_fragment_left == 0:
# read next fragment file
self._fragment_tempfiles[self._cur_read_fragment].close()
self._fragment_tempfiles[self._cur_read_fragment] = None
self._cur_read_fragment += 1
elif cur_fragment_left <= sz:
sz = cur_fragment_left
f = self._fragment_tempfiles[self._cur_read_fragment].file
buf = f.read(sz)
self._cursor += len(buf)
if not buf:
message = ('MultiPartDownloadContext; got en empty read on the buffer! '
'cursor={0}, size={1}; Transfer interrupted.'.format(
self._cursor, self._fsize))
raise RuntimeError(message)
return buf
class Storage(driver.Base):
buffer_size = 1 * 1024 * 1024
def __init__(self, path=None, config=None):
self.path = path or ""
# config tmp file dir
self.tmpdir = config.tmpdir
if not self.tmpdir:
self.tmpdir = "/tmp/"
logger.debug("tmpdir : %s" % self.tmpdir)
# config speedy
speedy_pyclient.init_speedy(config.storage_urls)
self.speedy_conn = speedy_pyclient.Connection()
# config speedy fragment size
fragmentsize_str = config.fragment_size
if not fragmentsize_str:
self.fragment_size = 16 * 1024 * 1024
else:
if fragmentsize_str[len(fragmentsize_str)-1] == 'M':
fragmentsize_str = fragmentsize_str[:-1]
self.fragment_size = int(fragmentsize_str) * 1024 * 1024
# run speedy heart beat routine
gevent.spawn(speedy_pyclient.speedy_heart_beater, int(config.heart_beat_interval))
def get_content(self, path):
logger.debug("get speedy content path: %s" % path)
_, fragments = self.speedy_conn.getfileinfo(path)
if len(fragments) != 1:
logger.error('little file fragment error! %s' % path)
raise exceptions.UnspecifiedError("little file invalid fragment info!")
fragment = fragments[0]
resp = self.speedy_conn.download(path, 0, (fragment['Start'], fragment['End']))
if resp.status_code == 200:
content = resp.content
logger.debug("get content len: %d" % len(content))
return content
elif resp.status_code == 404:
raise exceptions.FileNotFoundError('%s is not here' % path)
else:
logger.error("get content unexcept status code: %d" % resp.status_code)
raise exceptions.UnspecifiedError("get content unexcept status code: %d" % resp.status_code)
def put_content(self, path, content):
logger.debug("put content path: %s" % path)
bytes_range = (0, len(content))
resp = self.speedy_conn.upload(path, content, 0, bytes_range, is_last=True)
if resp.status_code != 200:
raise exceptions.UnspecifiedError("put speedy content failed: %s, status_code: %d"
% (path, resp.status_code))
def exists(self, path):
logger.debug("call exists path: %s" % path)
return self.speedy_conn.exists(path)
def remove(self, path):
logger.debug("remove path: %s" % path)
resp = self.speedy_conn.delete(path)
if resp.status_code == 204:
logger.debug("speedy remove success: %s" % path)
elif resp.status_code == 404:
logger.warning("speedy remove, file not found: %s" % path)
raise exceptions.FileNotFoundError("%s is not here" % path)
else:
logger.error("speedy remove, unexcept status code %d" % resp.status_code)
raise exceptions.UnspecifiedError("speedy remove, unexcept status code %d" % resp.status_code)
def get_size(self, path):
logger.debug("get size: %s" % path)
size, _ = self.speedy_conn.getfileinfo(path)
return size
def list_directory(self, path=None):
logger.debug("list directory path: %s" % path)
resp = self.speedy_conn.list_directory(path)
if resp.status_code == 200:
j = resp.json()
return j["file-list"]
elif resp.status_code == 404:
raise exceptions.FileNotFoundError("no such directory: %s" % path)
else:
raise exceptions.UnspecifiedError("unexcept status code: %d" % path)
def stream_read(self, path, bytes_range=None):
mc = _SpeedyMultiPartDownloadContext(path, self.speedy_conn, self.tmpdir)
while True:
buf = mc.read(self.buffer_size)
if not buf:
break
yield buf
def stream_write(self, path, fp):
speedy_mc = _SpeedyMultiPartUploadContext(path, self.speedy_conn, self.fragment_size, self.tmpdir)
more_data = True
buf = fp.read(self.buffer_size)
while True:
if not buf:
break
buf_more = fp.read(self.buffer_size)
if not buf_more:
more_data = False
speedy_mc.push_content(buf, more_data=more_data)
buf = buf_more
# wait all part upload completed or failed
speedy_mc.wait_all_part_complete()
|
|
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
ADMIN_MENU_ORDER = (
("Content", ("links.Link", "generic.Keyword", "generic.ThreadedComment")),
("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
("Users", ("auth.User", "auth.Group",)),
)
EXTRA_MODEL_FIELDS = (
# Four-item sequence for one field injected.
(
# Dotted path to field.
"mezzanine.generic.models.ThreadedComment.end",
# Dotted path to field class.
"django.db.models.FloatField",
# Positional args for field class.
("End",),
# Keyword args for field class.
{"blank": True, "null": True},
),
)
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"drum.links",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.accounts",
#"mezzanine.blog",
#"mezzanine.forms",
#"mezzanine.pages",
#"mezzanine.galleries",
#"mezzanine.twitter",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
########
# DRUM #
########
# Drum-specific Mezzanine settings
AUTH_PROFILE_MODULE = "links.Profile"
SITE_TITLE = "Drum"
RATINGS_RANGE = (-1, 1)
RATINGS_ACCOUNT_REQUIRED = True
COMMENTS_ACCOUNT_REQUIRED = True
ACCOUNTS_PROFILE_VIEWS_ENABLED = True
SEARCH_MODEL_CHOICES = ("links.Link",)
# Drum settings
ALLOWED_DUPLICATE_LINK_HOURS = 24 * 7 * 3
ITEMS_PER_PAGE = 20
LINK_REQUIRED = False
AUTO_TAG = True
SECRET_KEY = "pkh4d!@peo4z!0ntn$io*!n2tcagaeaawfzymbt=-0$t%#0me5"
NEVERCACHE_KEY = "pkh4d!@peo4z!0ntn$io*!n2tcagaeaawfzymbt=-0$t%#0me5"
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from .local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AgentPoolsOperations(object):
"""AgentPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster. The operation returns properties
of each agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2019_04_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the agent pool.
Gets the details of the agent pool by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> LROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: Parameters supplied to the Create or Update an agent pool operation.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2019_04_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes an agent pool.
Deletes the agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ComponentCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'error': 'str',
'message': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'error': 'error',
'message': 'message',
'status': 'status',
'type': 'type'
}
def __init__(self, error=None, message=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1ComponentCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._error = None
self._message = None
self._status = None
self._type = None
self.discriminator = None
if error is not None:
self.error = error
if message is not None:
self.message = message
self.status = status
self.type = type
@property
def error(self):
"""Gets the error of this V1ComponentCondition. # noqa: E501
Condition error code for a component. For example, a health check error code. # noqa: E501
:return: The error of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this V1ComponentCondition.
Condition error code for a component. For example, a health check error code. # noqa: E501
:param error: The error of this V1ComponentCondition. # noqa: E501
:type: str
"""
self._error = error
@property
def message(self):
"""Gets the message of this V1ComponentCondition. # noqa: E501
Message about the condition for a component. For example, information about a health check. # noqa: E501
:return: The message of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ComponentCondition.
Message about the condition for a component. For example, information about a health check. # noqa: E501
:param message: The message of this V1ComponentCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def status(self):
"""Gets the status of this V1ComponentCondition. # noqa: E501
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
:return: The status of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ComponentCondition.
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
:param status: The status of this V1ComponentCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1ComponentCondition. # noqa: E501
Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
:return: The type of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ComponentCondition.
Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
:param type: The type of this V1ComponentCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ComponentCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ComponentCondition):
return True
return self.to_dict() != other.to_dict()
|
|
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it generates usages for
July 3rd.
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
from __future__ import print_function
import datetime
import iso8601
import sys
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begin_period": begin, "end_period": end})
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
volumes = objects.VolumeList.get_all_active_by_window(admin_context,
begin,
end)
LOG.info("Found %d volumes", len(volumes))
for volume_ref in volumes:
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
admin_context, begin, end,
cinder.volume.utils.notify_about_volume_usage,
"volume_id", "volume")
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d snapshots", len(snapshots))
for snapshot_ref in snapshots:
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_snapshot_usage,
"snapshot_id", "snapshot")
backups = objects.BackupList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d backups", len(backups))
for backup_ref in backups:
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_backup_usage,
"backup_id", "backup")
LOG.info("Volume usage audit completed")
|
|
#
#
# All Rights Reserved.
# Copyright 2011 OpenStack LLC.
# Copyright 2010 Jacob Kaplan-Moss
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import json
import types
import errno
import os
import time
import prettytable
from paxes_cinder.k2aclient import _
from paxes_cinder.k2aclient import utils
from paxes_cinder.k2aclient.v1.k2uom import K2Resource
from paxes_cinder.k2aclient.v1.k2web import K2WebResource
from paxes_cinder.k2aclient.openstack.common import strutils
import logging
_logger = logging.getLogger(__name__)
class K2Encoder(json.JSONEncoder):
def default(self, obj):
# if type(obj) is types.ListType:
if isinstance(obj, types.ListType):
ll = []
for li in obj:
if li is None:
ll.append("Null")
elif isinstance(li, types.StringType):
ll.append(li)
elif isinstance(li, K2Resource) or \
isinstance(li, K2WebResource):
ll.append(self.default(li))
else:
msg = (_("k2aclient:"
" during encoding"
" unexpected type: >%s<") %
li)
_logger.warn(msg)
# return "["+",".join(ll)+"]"
return ll
elif isinstance(obj, K2Resource) or isinstance(obj, K2WebResource):
outdct = {}
for k, v in obj.__dict__.iteritems():
if k[:1] == "_" and not k.startswith("_pattr_"):
pass
elif k == "_pattr_metadata":
if hasattr(obj, "id"):
outdct["id"] = obj.id
elif v is None:
# outdct[k] = None
# if not assigned, don't output
pass
elif k == "group":
outdct[k] = v
elif isinstance(v, types.StringType):
outdct[k[7:]] = v
else:
outdct[k[7:]] = self.default(v)
return outdct
return json.JSONEncoder.default(self, obj)
def print_k2_list(objs, fields, formatters={}):
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
data = getattr(o, field, '')
row.append(data)
pt.add_row(row)
if len(pt._rows) > 0:
print(strutils.safe_encode(pt.get_string(sortby=fields[0])))
######## UOM
def _print_cluster_list(clusters):
# simplify for output
for cluster in clusters:
cluster._ssp_ = None
if cluster.cluster_shared_storage_pool is not None:
cluster._ssp_ = cluster.cluster_shared_storage_pool.split("/")[-1]
print_k2_list(clusters, ['cluster_name', 'id', 'cluster_id', '_ssp_'])
def _print_logicalpartition_list(logicalpartitions):
print_k2_list(logicalpartitions, ['partition_name',
'id',
'partition_type',
'partition_id',
'partition_state'])
def _print_managedsystem_list(managedsystems):
print_k2_list(managedsystems, ['system_name',
'id',
'primary_ip_address',
'state'])
def _print_managementconsole_list(managementconsole):
print_k2_list(managementconsole, ['management_console_name',
'id'])
def _print_sharedstoragepool_list(sharedstoragepools):
print_k2_list(sharedstoragepools, ['storage_pool_name',
'id',
'capacity',
'free_space',
'over_commit_space',
'total_logical_unit_size',
'unique_device_id'])
def _print_sharedstoragepool_list_lus(sharedstoragepool, full=False):
lus = sharedstoragepool.logical_units.logical_unit
if not full:
print_k2_list(lus, ['unit_name',
'unit_capacity',
'thin_device',
'logical_unit_type'])
else:
print_k2_list(lus, ['unit_name',
'unique_device_id',
'cloned_from',
'unit_capacity',
'thin_device',
'logical_unit_type'])
def _print_virtualioserver_list(virtualioservers):
print_k2_list(virtualioservers, ['partition_name',
'id',
'partition_type',
'partition_id',
'partition_state'])
def _print_clientnetworkadapter_list(clientnetworkadapters):
print_k2_list(clientnetworkadapters, ['id',
'mac_address',
'port_vlan_id',
'virtual_switch_id',
'adapter_type',
'location_code',
'local_partition_id',
'virtual_slot_number'])
def _print_virtualnetwork_list(virtualnetworks):
print_k2_list(virtualnetworks, ['network_name',
'id',
'network_vlan_id',
'vswitch_id',
'tagged_network'])
def _print_virtualswitch_list(virtualswitchs):
print_k2_list(virtualswitchs, ['switch_name',
'id',
'switch_id',
'switch_mode'])
# Cluster
def do_cluster_list(cs, args):
"""Output a list of cluster."""
cluster_list = cs.cluster.list()
_print_cluster_list(cluster_list)
@utils.arg('cluster',
metavar='<cluster>',
help=_('Id of the cluster.'))
def do_cluster_show(cs, args):
"""Output details for a specific cluster."""
cluster = cs.cluster.get(args.cluster)
json.dump(cluster, sys.stdout, sort_keys=True, indent=4, cls=K2Encoder)
print ("\n")
@utils.arg('cluster',
metavar='<cluster_id>',
help=_('UUID of the cluster containing the LUs.'))
@utils.arg('source',
metavar='<source_udid>',
help=_('UDID of the source logicalunit.'))
@utils.arg('destination',
metavar='<destination_udid>',
help=_('UDID of the destination logicalunit.'))
def do_cluster_lulinkedclone(cs, args):
"""Clone a SharedStoragePool (SSP) LogicalUnit (LU)"""
cluster = cs.cluster.get(args.cluster)
cluster.api.lu_linked_clone(cluster, args.source, args.destination)
# LogicalPartitition
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem to list logicalpartitions'))
def do_logicalpartition_list(cs, args):
"""Given a managedsystem, output its logicalpartitions."""
logicalpartitions = cs.logicalpartition.list(args.managedsystem)
_print_logicalpartition_list(logicalpartitions)
def do_logicalpartition_listasroot(cs, args):
"""Output a list of all logicalpartitions."""
logicalpartitions = cs.logicalpartition.listasroot()
_print_logicalpartition_list(logicalpartitions)
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('logicalpartition',
metavar='<logicalpartition>',
help=_('Id of the logicalpartition.'))
def do_logicalpartition_show(cs, args):
"""Output details for a specific logicalpartition """
"""under a given managedsystem."""
logicalpartition = cs.logicalpartition.get(args.managedsystem,
args.logicalpartition)
json.dump(logicalpartition, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('logicalpartition',
metavar='<logicalpartition>',
help=_('Id of the logicalpartition.'))
def do_logicalpartition_showasroot(cs, args):
"""Output details for a specific logicalpartition."""
logicalpartition = cs.logicalpartition.getasroot(args.logicalpartition)
json.dump(logicalpartition, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('logicalpartitions',
metavar='<logicalpartitions>',
help=_('UDIDs of the logicalpartition.'),
nargs='+')
def do_logicalpartition_delete(cs, args):
"""Delete logicalpartitions."""
for lpar in args.logicalpartitions:
cs.managedsystem.deletebyid(args.managedsystem,
"LogicalPartition",
lpar)
# ManagedSystem
def do_managedsystem_list(cs, args):
"""Output a list of managedsystem."""
managedsystem_list = cs.managedsystem.list()
_print_managedsystem_list(managedsystem_list)
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
def do_managedsystem_show(cs, args):
"""Output details for a specific managedsystem."""
managedsystem = cs.managedsystem.get(args.managedsystem)
json.dump(managedsystem, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
# ManagmentConsole
def do_managementconsole_list(cs, args):
"""Output a list of managedsystem."""
managementconsole_list = cs.managementconsole.list()
_print_managementconsole_list(managementconsole_list)
@utils.arg('managementconsole',
metavar='<managementconsole>',
help=_('Id of the managementconsole.'))
def do_managementconsole_show(cs, args):
"""Output details for a specific managementconsole."""
managementconsole = cs.managementconsole.get(args.managementconsole)
json.dump(managementconsole, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('managementconsole',
metavar='<managementconsole>',
help=_('Id of the managementconsole.'))
@utils.arg('cmd',
metavar='<cmd>',
help=_('Command to run.'))
def do_managementconsole_cmd(cs, args):
"""Run command"""
# # could get mc from list
# mcs = cs.managementconsole.list()
# mc = cs.managementconsole.get(mcs[0].id)
mc = cs.managementconsole.get(args.managementconsole)
jresponse = cs.managementconsole.run_cmd(mc,
args.cmd)
json.dump(jresponse, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('managementconsole',
metavar='<managementconsole>',
help=_('Id of the managementconsole.'))
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualioserver',
metavar='<virtualioserver_id>',
help=_('UUID of the virtualioserver.'))
@utils.arg('cmd',
metavar='<cmd>',
help=_('Command to run.'))
def do_managementconsole_cmd_vios(cs, args):
"""Run vios command"""
# # could get mc from list
# mcs = cs.managementconsole.list()
# mc = cs.managementconsole.get(mcs[0].id)
mc = cs.managementconsole.get(args.managementconsole)
ms = cs.managedsystem.get(args.managedsystem)
vios = cs.virtualioserver.get(ms.id, args.virtualioserver)
jresponse = cs.managementconsole.run_vios_cmd(mc,
ms,
vios,
args.cmd)
json.dump(jresponse, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
# SharedStoragePool
# @utils.arg('cluster',
# metavar='<cluster>',
# help=_('Id of the cluster to list sharedstoragepool'))
def do_sharedstoragepool_list(cs, args):
"""Output a list of sharedstoragepool."""
# sharedstoragepools = cs.sharedstoragepool.list(args.cluster)
sharedstoragepools = cs.sharedstoragepool.list()
_print_sharedstoragepool_list(sharedstoragepools)
# @utils.arg('cluster',
# metavar='<cluster>',
# help=_('Id of the cluster.'))
@utils.arg('sharedstoragepool',
metavar='<sharedstoragepool>',
help=_('Id of the sharedstoragepool.'))
def do_sharedstoragepool_show(cs, args):
"""Output details for a specific sharedstoragepool."""
sharedstoragepool = cs.sharedstoragepool.get(args.sharedstoragepool)
json.dump(sharedstoragepool, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('sharedstoragepool',
metavar='<sharedstoragepool_id>',
help=_('UUID of the sharedstoragepool.'))
@utils.arg('unitname',
metavar='<unit_name>',
help=_('Name of the new logicalunit.'))
@utils.arg('unitcapacity',
metavar='<unit_capacity>',
help=_('Capacity of the new logicalunit.'))
@utils.arg('--thin',
metavar='<True|False>',
help=_('Optional flag to control whether '
'the new logicalunit will be thick or thin'),
default=False)
@utils.arg('--lut',
metavar='<logical_unit_type>',
help=_('LogicalUnitType: VirtualIO_Disk | VirtualIO_Image'),
default="VirtualIO_Disk")
@utils.arg('--cf',
metavar='<cloned_from>',
help=_('Optional udid of LU to clone from, new if omitted'),
default=None)
def do_sharedstoragepool_update_append_lu(cs, args):
"""Add a logicalunit (LU) to a sharedstoragepool (SSP)"""
ssp = cs.sharedstoragepool.get(args.sharedstoragepool)
(new_lu, updated_ssp) = ssp.update_append_lu(args.unitname,
int(args.unitcapacity),
args.thin,
args.lut,
args.cf)
print ("For sharedstoragepool: >%s<, appended logical unit:" %
updated_ssp.id)
json.dump(new_lu, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('sharedstoragepool',
metavar='<sharedstoragepool_id>',
help=_('UUID of the sharedstoragepool.'))
@utils.arg('logicalunits',
metavar='<logicalunit_udids>',
help=_('UDIDs of the logicalunit.'),
nargs='+')
def do_sharedstoragepool_update_del_lus(cs, args):
"""Delete a logicalunits (LUs) from a sharedstoragepool (SSP)"""
ssp = cs.sharedstoragepool.get(args.sharedstoragepool)
ssp.update_del_lus(args.logicalunits)
@utils.arg('sharedstoragepool',
metavar='<sharedstoragepool_id>',
help=_('UUID of the sharedstoragepool.'))
@utils.arg('--full',
metavar='<True|False>',
help=_('Optional flag to control whether '
'full details should be output'),
default=False)
def do_sharedstoragepool_list_lus(cs, args):
"""List the logicalunits (LUs) in a sharedstoragepool (SSP)"""
ssp = cs.sharedstoragepool.get(args.sharedstoragepool)
_print_sharedstoragepool_list_lus(ssp, full=args.full)
# VirtualIOServer
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem to list virtualioservers'))
def do_virtualioserver_list(cs, args):
"""Output a list of virtualioservers."""
virtualioservers = cs.virtualioserver.list(args.managedsystem)
_print_virtualioserver_list(virtualioservers)
def do_virtualioserver_listasroot(cs, args):
"""Output a list of all virtualioservers."""
virtualioservers = cs.virtualioserver.listasroot()
_print_virtualioserver_list(virtualioservers)
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualioserver',
metavar='<virtualioserver>',
help=_('Id of the virtualioserver.'))
@utils.arg('--xag',
metavar='<xag>',
action='append',
help=_('Optional extended attributes'
' Valid values include: All, Advanced, Hypervisor,'
' SystemNetwork, ViosStorage, ViosNetwork, ViosFCMapping,'
' ViosSCSIMapping, and None'
' May be used multiple times.'
' "All" if omitted'
),
default=[])
def do_virtualioserver_show(cs, args):
"""Output details for a virtualioserver."""
virtualioserver = cs.virtualioserver.get(args.managedsystem,
args.virtualioserver,
xag=args.xag)
json.dump(virtualioserver, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('virtualioserver',
metavar='<virtualioserver>',
help=_('Id of the virtualioserver.'))
def do_virtualioserver_showasroot(cs, args):
"""Output details for a virtualioserver."""
virtualioserver = cs.virtualioserver.getasroot(args.virtualioserver)
json.dump(virtualioserver, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
# @utils.arg('managedsystem',
# metavar='<managedsystem>',
# help=_('Id of the managedsystem.'))
# @utils.arg('virtualioserver',
# metavar='<virtualioserver>',
# help=_('Id of the virtualioserver.'))
# @utils.arg(
# '--output-file-name-root',
# metavar='<output-file-name-root>',
# default='/tmp/vios-rtu',
# help=_('Root file for output'))
# def do_virtualioserver_rtu(cs, args):
# """Output round trip details for a virtualioserver."""
#
# virtualioserver = cs.virtualioserver.get(args.managedsystem,
# args.virtualioserver)
#
# # get xml
# body = virtualioserver._k2resp.body
# with open(args.output_file_name_root + ".get.xml", 'w') as f:
# f.write(body)
#
# # json
# with open(args.output_file_name_root + ".json", 'w') as f:
# json.dump(virtualioserver, f, sort_keys=True, indent=4,
# cls=K2Encoder)
# f.write("\n")
#
# # updated xml
# element = v1k2creater.process_root("uom",
# v1k2creater.Mode.UPDATE,
# virtualioserver)
# xml = minidom.parseString(element.toxmlstring())
# with open(args.output_file_name_root + ".update.xml", 'w') as f:
# f.write(xml.toprettyxml(indent=' ' * 4))
#
# ######
# ms_id = args.managedsystem
# lpar_id = "1B071EC7-29A4-4717-9C15-B867BAE6BD7A"
# new_virtual_scsi_mapping = []
# print ("VSM edits:")
# for vsm in virtualioserver.virtual_scsi_mappings.virtual_scsi_mapping:
#
# ####
# # if vsm.client_adapter\
# # is not None and\
# # vsm.client_adapter.associated_logical_partition\
# # is not None:
# keep = True
# if vsm.associated_logical_partition is not None:
# parts = vsm.associated_logical_partition.split('/')
# cur_ms_id = parts[-3]
# cur_lpar_id = parts[-1]
# if cur_ms_id == ms_id and cur_lpar_id == lpar_id:
# keep = False
# msg = " will delete: vsm.server_adapter.adapter_name: >%s<"
# print (msg % (vsm.server_adapter.adapter_name,))
#
# if keep:
# new_virtual_scsi_mapping.append(vsm)
# msg = " will keep: vsm.server_adapter.adapter_name: >%s<"
# print (msg % (vsm.server_adapter.adapter_name,))
#
# # print ("Updated VSM will be:")
# # for vsm in new_virtual_scsi_mapping:
# # msg = " vsm.client_adapter.adapter_name: >%s<"
# # print (msg % (vsm.server_adapter.adapter_name,))
#
# if len(new_virtual_scsi_mapping) != \
# len(virtualioserver.virtual_scsi_mappings.virtual_scsi_mapping
# ):
# virtualioserver.virtual_scsi_mappings.virtual_scsi_mapping = \
# new_virtual_scsi_mapping
#
# # updated xml
# element = v1k2creater.process_root("uom",
# v1k2creater.Mode.UPDATE,
# virtualioserver)
# xml = minidom.parseString(element.toxmlstring())
# with open(args.output_file_name_root + ".update2.xml", 'w') as f:
# f.write(xml.toprettyxml(indent=' ' * 4))
# ClientNetworkAdapter
@utils.arg('logicalpartition',
metavar='<logicalpartition>',
help=_('Id of the logicalpartition to list clientnetworkadapters'))
def do_clientnetworkadapter_list(cs, args):
"""Given a logicalpartition, output its clientnetworkadapters."""
clientnetworkadapters = cs.clientnetworkadapter.list(args.logicalpartition)
_print_clientnetworkadapter_list(clientnetworkadapters)
@utils.arg('logicalpartition',
metavar='<logicalpartition>',
help=_('Id of the logicalpartition.'))
@utils.arg('clientnetworkadapter',
metavar='<clientnetworkadapter>',
help=_('Id of the clientnetworkadapter.'))
def do_clientnetworkadapter_show(cs, args):
"""Output details for a specific clientnetworkadapter """
"""under a given logicalpartition."""
clientnetworkadapter = \
cs.clientnetworkadapter.get(args.logicalpartition,
args.clientnetworkadapter)
json.dump(clientnetworkadapter, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('logicalpartition',
metavar='<logicalpartition>',
help=_('Id of the logicalpartition.'))
@utils.arg('clientnetworkadapters',
metavar='<clientnetworkadapters>',
help=_('UDIDs of the clientnetworkadapter.'),
nargs='+')
def do_clientnetworkadapter_delete(cs, args):
"""Delete clientnetworkadapter."""
for clientnetworkadapter in args.clientnetworkadapters:
cs.clientnetworkadapter.deletebyid(args.logicalpartition,
clientnetworkadapter)
# VirtualNetwork
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem to list virtualnetworks'))
def do_virtualnetwork_list(cs, args):
"""Given a managedsystem, output its virtualnetworks."""
virtualnetworks = cs.virtualnetwork.list(args.managedsystem)
_print_virtualnetwork_list(virtualnetworks)
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualnetwork',
metavar='<virtualnetwork>',
help=_('Id of the virtualnetwork.'))
def do_virtualnetwork_show(cs, args):
"""Output details for a specific virtualnetwork """
"""under a given managedsystem."""
virtualnetwork = cs.virtualnetwork.get(args.managedsystem,
args.virtualnetwork)
json.dump(virtualnetwork, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualnetworks',
metavar='<virtualnetworks>',
help=_('UDIDs of the virtualnetwork.'),
nargs='+')
def do_virtualnetwork_delete(cs, args):
"""Delete virtualnetwork."""
for virtualnetwork in args.virtualnetworks:
cs.virtualnetwork.deletebyid(args.managedsystem,
virtualnetwork)
# VirtualNetwork
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem to list virtualswitchs'))
def do_virtualswitch_list(cs, args):
"""Given a managedsystem, output its virtualswitchs."""
virtualswitchs = cs.virtualswitch.list(args.managedsystem)
_print_virtualswitch_list(virtualswitchs)
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualswitch',
metavar='<virtualswitch>',
help=_('Id of the virtualswitch.'))
def do_virtualswitch_show(cs, args):
"""Output details for a specific virtualswitch """
"""under a given managedsystem."""
virtualswitch = cs.virtualswitch.get(args.managedsystem,
args.virtualswitch)
json.dump(virtualswitch, sys.stdout, sort_keys=True, indent=4,
cls=K2Encoder)
print ("\n")
@utils.arg('managedsystem',
metavar='<managedsystem>',
help=_('Id of the managedsystem.'))
@utils.arg('virtualswitchs',
metavar='<virtualswitchs>',
help=_('UDIDs of the virtualswitch.'),
nargs='+')
def do_virtualswitch_delete(cs, args):
"""Delete virtualnetwork."""
for virtualswitch in args.virtualswitchs:
cs.virtualswitch.deletebyid(args.managedsystem,
virtualswitch)
######## WEB
def _print_web_file_list(files):
print_k2_list(files, ['filename', 'id', 'file_uuid', 'file_enum_type'])
# WEB File
def do_web_file_list(cs, args):
"""Output a list of file."""
file_list = cs.web_file.list()
_print_web_file_list(file_list)
@utils.arg('file',
metavar='<file>',
help=_('Id of the file.'))
def do_web_file_show(cs, args):
"""Output details for a specific file."""
uomfile = cs.web_file.get(args.file)
json.dump(uomfile, sys.stdout, sort_keys=True, indent=4, cls=K2Encoder)
print ("\n")
@utils.arg('files',
metavar='<files>',
help=_('Ids of the files.'),
nargs='+')
def do_web_file_delete(cs, args):
"""Delete files."""
for webfile in args.files:
cs.web_file.deletebyid(webfile)
######## MISC
@utils.arg('uomresource',
metavar='<uomresource>',
help=_('/rest/api/uom<uomresource>.'))
@utils.arg(
'--output-file-name',
metavar='<output-file-name>',
default=None,
help=_('Send output to file'))
def do_uom_show(cs, args):
"""Output details for a UOM path."""
k2resp = cs.uom.get(args.uomresource)
if args.output_file_name:
f = open(args.output_file_name, "w")
f.write(k2resp.body)
f.close()
else:
print (k2resp.body)
@utils.arg(
'--cinder-conf-dir',
metavar='<cinder-conf-dir>',
default="/etc/cinder",
help=_('Read cinder ssp conf files and report on health'))
def do_paxes_check_sspconf(cs, args):
"""Output health details for PowerVC ssp resources."""
tset = cs.paxes.check_ssp_conf(args.cinder_conf_dir)
tforj = cs.paxes.result_output_as_dict(tset)
json.dump(tforj, sys.stdout, sort_keys=True, indent=4)
print("")
################################
# VIOS SNAP FUNCTION
def _prepdir(targetdir):
ct = time.localtime()
subdir = time.strftime("%Y-%m-%d_%H-%M-%S", ct)
return os.path.join(targetdir, subdir)
@utils.arg('cluster',
metavar='<cluster>',
help=_('Id of the cluster.'))
def do_cluster_vios_ips(cs, args):
"""Get VIOS ips for a specific cluster."""
cluster = cs.cluster.get(args.cluster)
vios_ips = cs.cluster.extract_vios_ips(cluster)
json.dump(vios_ips, sys.stdout)
print ("\n")
|
|
import signal, time, sys, glob, os, codecs, pybonjour, atexit, tornado
import inflection, copy, types
# Infastructure
from event_emitter import EventEmitter
from print_job_queue import PrintJobQueue
# Routes
from construct_socket_handler import ConstructSocketHandler
from construct_job_upload_handler import ConstructJobUploadHandler
class ConstructServer(tornado.web.Application, EventEmitter):
def __init__(self, **kwargs):
self.printer = kwargs["printer"]
EventEmitter.__init__(self)
# Configuring the Web Server
if not kwargs["routes"]: kwargs["routes"] = []
routes = kwargs["routes"] + [
(r"/socket", ConstructSocketHandler),
(r"/jobs", ConstructJobUploadHandler),
]
server_settings = kwargs["server_settings"]
if server_settings == None: server_settings = {}
self.clients = {}
self.ioloop = tornado.ioloop.IOLoop.instance()
signal.signal(signal.SIGINT, self.sigint_handler)
tornado.web.Application.__init__(self, routes, **server_settings)
# Configuring the print job queue
self.jobs = PrintJobQueue(self)
self.jobs.listeners.add(self)
# Configuring the printer components
self.components = dict(
motors = dict(enabled = False),
jobs = dict(),
pause_between_prints = True,
sensor_poll_rate = 3000,
sessions_count = 0,
status = 'idle'
)
self.components = dict(self.components, **kwargs["settings"])
for t, default_vals in self.component_defaults.iteritems():
for key in kwargs["components"][inflection.pluralize(t)]:
self.components[key] = copy.deepcopy(default_vals)
self.components[key]["type"] = t
# Setting the printer's initial values
self.sensor_update_received = True
self.reset_timeout = 0
self.blockers = []
component_defaults = dict(
temp = dict(
current_temp = -1,
target_temp = 0,
target_temp_countdown = None,
blocking = False
),
fan = dict( speed = 255, enabled = False ),
conveyor = dict( speed = 255, enabled = False ),
axis = dict( position = 0 )
)
def start(self):
_do = lambda *args: tornado.ioloop.PeriodicCallback(*args).start()
# Start the print queue and sensor polling
_do(self.jobs.iterate_print_job_loop, 300, self.ioloop)
_do(self.poll_temp, self.components['sensor_poll_rate'], self.ioloop)
# Initialize DNS-SD once the server is ready to go online
self.init_dns_sd()
# Start the server
self.listen(8888)
def init_dns_sd(self):
sdRef = pybonjour.DNSServiceRegister(name = None,
regtype = '_construct._tcp',
port = 8888,
domain = "local.")
atexit.register(self.cleanup_service, sdRef)
def cleanup_service(self, sdRef):
sdRef.close()
def sigint_handler(self, signum, frame):
print "exiting..."
self.ioloop.stop()
raise Exception("Ctrl+C")
def poll_temp(self):
# A number of conditions that must be met for us to send a temperature
# request to the printer. This safeguards this printer from being overloaded
# by temperature requests it cannot presently respond to.
c = (not self.sensor_update_received) or (time.time() < self.reset_timeout)
if c or len(self.blockers) > 0: return
# Requesting a temperature update from the printer
self.sensor_update_received = False
self.printer.request_sensor_update()
def set_blocking_temps(self, keys):
unblocked = [k for k in self.blockers if k not in keys]
for k in unblocked: self.update_c([k], False)
for k in keys: self.update_c([k], True)
self.blockers = keys
def set_sensor_update_received(self, value):
self.sensor_update_received = value
# Not thread safe
def set_reset_timeout(self, timeout):
self.reset_timeout = timeout
self.ioloop.add_timeout(timeout, lambda: self.c_set(['status'], 'idle'))
def c_add(self, target_path, data, internal= False):
if 'type' in data: self.fire("add_%s"%data['type'], data, target_path)
self.c_set(target_path, data, internal=internal, event="add")
def c_set(self, target_path, data, internal= False, event="change"):
parent = self.find_parent(target_path, requireKey=(event!="add"))
key = target_path[-1]
virtual = (key in parent) and type(parent[key]) == types.FunctionType
# If the value has not changed there is nothing to do.
if (key in parent) and parent[key] == data: return
# do not override virtual attributes. Just skip to firing the event.
if not virtual: parent[key] = data
if internal == False:
# targets without a parent type are fired internally as "my_key_change"
event_name = "%s_%s"%(key, event)
# targets with a parent type are fired internally as "type_my_key_change"
if 'type' in parent:
event_name = "%s_%s"(parent['type'], event_name)
self.fire(event_name, target_path[:-1], parent[key], data)
# Sending the event to all the websocket sessions
self.broadcast([dict(type= event, data= data, target= target_path)])
def c_get(self, target_path):
target_parent = self.find_parent(target_path, requireKey=True)
return target_parent[target_path.pop()]
def c_rm(self, target_path):
target_parent = self.find_parent(target_path, requireKey=True)
key = target_path.pop()
data = target_parent[key]
if 'type' in data: self.fire("rm_%s"%data['type'], data, target_path)
del target_parent[key]
self.broadcast([dict(type= "remove", target= target_path)])
def find_parent(self, path, requireKey=True):
parent = self.components
for i, key in enumerate(path):
if (not key in parent) and (requireKey != False):
raise Exception("Target does not exist: [%s]"%','.join(path))
if not i == len(path) - 1: parent = parent[key]
return parent
def build_initialized_event(self, client):
# Adding each component to the data (except jobs, it needs to be modified)
data = {k:v for k,v in self.components.iteritems() if not k in ['jobs']}
# Adding the jobs (minus their full text) and this sessions's uuid
data = dict(data, **dict(
session_uuid= client.session_uuid,
jobs= self.jobs.public_list()
))
return [dict(type= "initialized", data= data)]
def broadcast(self, events):
for id, client in self.clients.iteritems(): client.send(events)
def add_client(self, client):
self.c_set(['sessions_count'], len(self.clients))
self.clients[client.session_uuid] = client
def remove_client(self, client):
del self.clients[client.session_uuid]
self.c_set(['sessions_count'], len(self.clients))
# Commands
# --------------------------------------------------------------------------
def run_cmd(self, c):
status = self.c_get(['status'])
if status != "idle" and (c.cmd != "estop" and c.cmd.find("job") == -1):
raise Exception("Cannot run commands when %s"%status)
for d in [self.printer, self.jobs, self]:
if hasattr(d, c.method_name): delegate = d
return getattr(delegate, c.method_name)(*(c.args), **(c.kwargs))
def do_set(self, *args, **kwargs):
if(len(args) == 1 and args[0] == "temp"):
key = "target_temp"
for target, data in kwargs.iteritems(): self.c_set([target, key], data)
else:
for k, v in kwargs.iteritems(): self.set_speed_or_enabled(k, v)
def set_speed_or_enabled(self, k, v):
if (type(v) == bool): key = "enabled"
if (type(v) in [float, int]): key = "speed"
if k == "motors": target = "motors"
if k == "fan": target = "f0"
if k == "conveyor": target = "c0"
self.c_set([target, key], v)
def do_change_job(self, *kwargs):
job_id = kwargs['id']
del kwargs['id']
for k, v in kwargs.iteritems(): self.c_set(['jobs', job_id, k], v)
def do_print(self):
if not self.printer.is_online(): raise Exception("Not online")
no_jobs_msg = "Nothing to print. Try adding a print job with add_job."
if len(self.jobs.list) == 0: raise Exception(no_jobs_msg)
self.c_set(['status'], 'printing')
def do_estop(self):
self.printer.do_estop()
self.c_set(['status'], 'estopped')
# Resetting all the printer's attributes
for target, attrs in self.components.iteritems():
if type(attrs) != dict: continue
if not ("type" in attrs and attrs["type"] in self.component_defaults):
continue
for key, data in self.component_defaults[attrs["type"]].iteritems():
self.c_set([target, key], data, internal = True)
|
|
#! /usr/bin/env python
"""Bulkloader for slow databases (Bizgres).
Idea is following:
- Script reads from queue a batch of urlencoded row changes.
Inserts/updates/deletes, maybe many per one row.
- It creates 3 lists: ins_list, upd_list, del_list.
If one row is changed several times, it keeps the latest.
- Lists are processed in followin way:
ins_list - COPY into main table
upd_list - COPY into temp table, UPDATE from there
del_list - COPY into temp table, DELETE from there
- One side-effect is that total order of how rows appear
changes, but per-row changes will be kept in order.
The speedup from the COPY will happen only if the batches are
large enough. So the ticks should happen only after couple
of minutes.
"""
import sys, os, pgq, skytools
## several methods for applying data
# update as update
METH_CORRECT = 0
# update as delete/copy
METH_DELETE = 1
# merge ins_list and upd_list, do delete/copy
METH_MERGED = 2
# no good method for temp table check before 8.2
USE_LONGLIVED_TEMP_TABLES = False
def find_dist_fields(curs, fqtbl):
if not skytools.exists_table(curs, "pg_catalog.mpp_distribution_policy"):
return []
schema, name = fqtbl.split('.')
q = "select a.attname"\
" from pg_class t, pg_namespace n, pg_attribute a,"\
" mpp_distribution_policy p"\
" where n.oid = t.relnamespace"\
" and p.localoid = t.oid"\
" and a.attrelid = t.oid"\
" and a.attnum = any(p.attrnums)"\
" and n.nspname = %s and t.relname = %s"
curs.execute(q, [schema, name])
res = []
for row in curs.fetchall():
res.append(row[0])
return res
def exists_temp_table(curs, tbl):
# correct way, works only on 8.2
q = "select 1 from pg_class where relname = %s and relnamespace = pg_my_temp_schema()"
# does not work with parallel case
#q = """
#select 1 from pg_class t, pg_namespace n
#where n.oid = t.relnamespace
# and pg_table_is_visible(t.oid)
# and has_schema_privilege(n.nspname, 'USAGE')
# and has_table_privilege(n.nspname || '.' || t.relname, 'SELECT')
# and substr(n.nspname, 1, 8) = 'pg_temp_'
# and t.relname = %s;
#"""
curs.execute(q, [tempname])
tmp = curs.fetchall()
return len(tmp) > 0
class TableCache:
"""Per-table data hander."""
def __init__(self, tbl):
"""Init per-batch table data cache."""
self.name = tbl
self.ev_list = []
self.pkey_map = {}
self.pkey_list = []
self.pkey_str = None
self.col_list = None
self.final_ins_list = []
self.final_upd_list = []
self.final_del_list = []
def add_event(self, ev):
"""Store new event."""
# op & data
ev.op = ev.ev_type[0]
ev.data = skytools.db_urldecode(ev.ev_data)
# get pkey column names
if self.pkey_str is None:
if len(ev.ev_type) > 2:
self.pkey_str = ev.ev_type.split(':')[1]
else:
self.pkey_str = ev.ev_extra2
if self.pkey_str:
self.pkey_list = self.pkey_str.split(',')
# get pkey value
if self.pkey_str:
pk_data = []
for k in self.pkey_list:
pk_data.append(ev.data[k])
ev.pk_data = tuple(pk_data)
elif ev.op == 'I':
# fake pkey, just to get them spread out
ev.pk_data = ev.id
else:
raise Exception('non-pk tables not supported: %s' % self.name)
# get full column list, detect added columns
if not self.col_list:
self.col_list = ev.data.keys()
elif self.col_list != ev.data.keys():
# ^ supposedly python guarantees same order in keys()
# find new columns
for c in ev.data.keys():
if c not in self.col_list:
for oldev in self.ev_list:
oldev.data[c] = None
self.col_list = ev.data.keys()
# add to list
self.ev_list.append(ev)
# keep all versions of row data
if ev.pk_data in self.pkey_map:
self.pkey_map[ev.pk_data].append(ev)
else:
self.pkey_map[ev.pk_data] = [ev]
def finish(self):
"""Got all data, prepare for insertion."""
del_list = []
ins_list = []
upd_list = []
for ev_list in self.pkey_map.values():
# rewrite list of I/U/D events to
# optional DELETE and optional INSERT/COPY command
exists_before = -1
exists_after = 1
for ev in ev_list:
if ev.op == "I":
if exists_before < 0:
exists_before = 0
exists_after = 1
elif ev.op == "U":
if exists_before < 0:
exists_before = 1
#exists_after = 1 # this shouldnt be needed
elif ev.op == "D":
if exists_before < 0:
exists_before = 1
exists_after = 0
else:
raise Exception('unknown event type: %s' % ev.op)
# skip short-lived rows
if exists_before == 0 and exists_after == 0:
continue
# take last event
ev = ev_list[-1]
# generate needed commands
if exists_before and exists_after:
upd_list.append(ev.data)
elif exists_before:
del_list.append(ev.data)
elif exists_after:
ins_list.append(ev.data)
# reorder cols
new_list = self.pkey_list[:]
for k in self.col_list:
if k not in self.pkey_list:
new_list.append(k)
self.col_list = new_list
self.final_ins_list = ins_list
self.final_upd_list = upd_list
self.final_del_list = del_list
class BulkLoader(pgq.SerialConsumer):
def __init__(self, args):
pgq.SerialConsumer.__init__(self, "bulk_loader", "src_db", "dst_db", args)
def reload(self):
pgq.SerialConsumer.reload(self)
self.load_method = self.cf.getint("load_method", METH_CORRECT)
if self.load_method not in (0,1,2):
raise Exception("bad load_method")
self.remap_tables = {}
for map in self.cf.getlist("remap_tables", ''):
tmp = map.split(':')
tbl = tmp[0].strip()
new = tmp[1].strip()
self.remap_tables[tbl] = new
def process_remote_batch(self, src_db, batch_id, ev_list, dst_db):
"""Content dispatcher."""
# add events to per-table caches
tables = {}
for ev in ev_list:
tbl = ev.extra1
if not tbl in tables:
tables[tbl] = TableCache(tbl)
cache = tables[tbl]
cache.add_event(ev)
ev.tag_done()
# then process them
for tbl, cache in tables.items():
cache.finish()
self.process_one_table(dst_db, tbl, cache)
def process_one_table(self, dst_db, tbl, cache):
del_list = cache.final_del_list
ins_list = cache.final_ins_list
upd_list = cache.final_upd_list
col_list = cache.col_list
real_update_count = len(upd_list)
self.log.debug("process_one_table: %s (I/U/D = %d/%d/%d)" % (
tbl, len(ins_list), len(upd_list), len(del_list)))
if tbl in self.remap_tables:
old = tbl
tbl = self.remap_tables[tbl]
self.log.debug("Redirect %s to %s" % (old, tbl))
# hack to unbroke stuff
if self.load_method == METH_MERGED:
upd_list += ins_list
ins_list = []
# check if interesting table
curs = dst_db.cursor()
if not skytools.exists_table(curs, tbl):
self.log.warning("Ignoring events for table: %s" % tbl)
return
# fetch distribution fields
dist_fields = find_dist_fields(curs, tbl)
extra_fields = []
for fld in dist_fields:
if fld not in cache.pkey_list:
extra_fields.append(fld)
self.log.debug("PKey fields: %s Extra fields: %s" % (
",".join(cache.pkey_list), ",".join(extra_fields)))
# create temp table
temp = self.create_temp_table(curs, tbl)
# where expr must have pkey and dist fields
klist = []
for pk in cache.pkey_list + extra_fields:
exp = "%s.%s = %s.%s" % (tbl, pk, temp, pk)
klist.append(exp)
whe_expr = " and ".join(klist)
# create del sql
del_sql = "delete from only %s using %s where %s" % (tbl, temp, whe_expr)
# create update sql
slist = []
key_fields = cache.pkey_list + extra_fields
for col in cache.col_list:
if col not in key_fields:
exp = "%s = %s.%s" % (col, temp, col)
slist.append(exp)
upd_sql = "update only %s set %s from %s where %s" % (
tbl, ", ".join(slist), temp, whe_expr)
# insert sql
colstr = ",".join(cache.col_list)
ins_sql = "insert into %s (%s) select %s from %s" % (tbl, colstr, colstr, temp)
# process deleted rows
if len(del_list) > 0:
self.log.info("Deleting %d rows from %s" % (len(del_list), tbl))
# delete old rows
q = "truncate %s" % temp
self.log.debug(q)
curs.execute(q)
# copy rows
self.log.debug("COPY %d rows into %s" % (len(del_list), temp))
skytools.magic_insert(curs, temp, del_list, col_list)
# delete rows
self.log.debug(del_sql)
curs.execute(del_sql)
self.log.debug("%s - %d" % (curs.statusmessage, curs.rowcount))
self.log.debug(curs.statusmessage)
if len(del_list) != curs.rowcount:
self.log.warning("Delete mismatch: expected=%s updated=%d"
% (len(del_list), curs.rowcount))
# process updated rows
if len(upd_list) > 0:
self.log.info("Updating %d rows in %s" % (len(upd_list), tbl))
# delete old rows
q = "truncate %s" % temp
self.log.debug(q)
curs.execute(q)
# copy rows
self.log.debug("COPY %d rows into %s" % (len(upd_list), temp))
skytools.magic_insert(curs, temp, upd_list, col_list)
if self.load_method == METH_CORRECT:
# update main table
self.log.debug(upd_sql)
curs.execute(upd_sql)
self.log.debug(curs.statusmessage)
# check count
if len(upd_list) != curs.rowcount:
self.log.warning("Update mismatch: expected=%s updated=%d"
% (len(upd_list), curs.rowcount))
else:
# delete from main table
self.log.debug(del_sql)
curs.execute(del_sql)
self.log.debug(curs.statusmessage)
# check count
if real_update_count != curs.rowcount:
self.log.warning("Update mismatch: expected=%s deleted=%d"
% (real_update_count, curs.rowcount))
# insert into main table
if 0:
# does not work due bizgres bug
self.log.debug(ins_sql)
curs.execute(ins_sql)
self.log.debug(curs.statusmessage)
else:
# copy again, into main table
self.log.debug("COPY %d rows into %s" % (len(upd_list), tbl))
skytools.magic_insert(curs, tbl, upd_list, col_list)
# process new rows
if len(ins_list) > 0:
self.log.info("Inserting %d rows into %s" % (len(ins_list), tbl))
skytools.magic_insert(curs, tbl, ins_list, col_list)
# delete remaining rows
if USE_LONGLIVED_TEMP_TABLES:
q = "truncate %s" % temp
else:
# fscking problems with long-lived temp tables
q = "drop table %s" % temp
self.log.debug(q)
curs.execute(q)
def create_temp_table(self, curs, tbl):
# create temp table for loading
tempname = tbl.replace('.', '_') + "_loadertmp"
# check if exists
if USE_LONGLIVED_TEMP_TABLES:
if exists_temp_table(curs, tempname):
self.log.debug("Using existing temp table %s" % tempname)
return tempname
# bizgres crashes on delete rows
arg = "on commit delete rows"
arg = "on commit preserve rows"
# create temp table for loading
q = "create temp table %s (like %s) %s" % (
tempname, tbl, arg)
self.log.debug("Creating temp table: %s" % q)
curs.execute(q)
return tempname
if __name__ == '__main__':
script = BulkLoader(sys.argv[1:])
script.start()
|
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import *
import os
try:
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import job as dci_job
from dciclient.v1.api import remoteci as dci_remoteci
from dciclient.v1.api import topic as dci_topic
except ImportError:
dciclient_found = False
else:
dciclient_found = True
DOCUMENTATION = '''
---
module: dci_job
short_description: An ansible module to interact with the /jobs endpoint of DCI
version_added: 2.2
options:
state:
required: false
default: present
description: Desired state of the resource
login:
required: false
description: User's DCI login
password:
required: false
description: User's DCI password
url:
required: false
description: DCI Control Server URL
topic:
required: false
description: Topic for which the job will be schedule
remoteci:
required: false
description: RemoteCI for which the job will be schedule
id:
required: false
description: ID of the job
comment:
required: false
description: Comment attached to the job
status:
required: false
description: Status the job should be entitled
configuration:
required: false
description: Configuration attached to the job
'''
EXAMPLES = '''
- name: Schedule a new job
dci_job:
remoteci: 'MyRCI'
- name: Update job
dci_job:
id: '{{ job_id }}'
comment: 'New comment for my job'
- name: Remove a job
dci_job:
state: absent
id: '{{ job_id }}'
'''
# TODO
RETURN = '''
'''
def get_details(module):
"""Method that retrieves the appropriate credentials. """
login_list = [module.params['login'], os.getenv('DCI_LOGIN')]
login = next((item for item in login_list if item is not None), None)
password_list = [module.params['password'], os.getenv('DCI_PASSWORD')]
password = next((item for item in password_list if item is not None), None)
url_list = [module.params['url'], os.getenv('DCI_CS_URL')]
url = next((item for item in url_list if item is not None), 'https://api.distributed-ci.io')
return login, password, url
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
# Authentication related parameters
#
login=dict(required=False, type='str'),
password=dict(required=False, type='str'),
url=dict(required=False, type='str'),
# Resource related parameters
#
id=dict(type='str'),
topic=dict(required=False, type='str'),
remoteci=dict(type='str'),
comment=dict(type='str'),
status=dict(type='str'),
configuration=dict(type='dict'),
),
)
if not dciclient_found:
module.fail_json(msg='The python dciclient module is required')
topic_list = [module.params['topic'], os.getenv('DCI_TOPIC')]
topic = next((item for item in topic_list if item is not None), None)
login, password, url = get_details(module)
if not login or not password:
module.fail_json(msg='login and/or password have not been specified')
ctx = dci_context.build_dci_context(url, login, password, 'Ansible')
# Action required: Delete the job matching the job id
# Endpoint called: /jobs/<job_id> DELETE via dci_job.delete()
#
# If the job exist and it has been succesfully deleted the changed is
# set to true, else if the file does not exist changed is set to False
if module.params['state'] == 'absent':
if not module.params['id']:
module.fail_json(msg='id parameter is required')
res = dci_job.get(ctx, module.params['id'])
if res.status_code not in [400, 401, 404, 422]:
kwargs = {
'id': module.params['id'],
'etag': res.json()['job']['etag']
}
res = dci_job.delete(ctx, **kwargs)
# Action required: Retrieve job informations
# Endpoint called: /jobs/<job_id> GET via dci_job.get()
#
# Get job informations
elif module.params['id'] and not module.params['comment'] and not module.params['status'] and not module.params['configuration']:
res = dci_job.get(ctx, module.params['id'])
# Action required: Update an existing job
# Endpoint called: /jobs/<job_id> PUT via dci_job.update()
#
# Update the job with the specified characteristics.
elif module.params['id']:
res = dci_job.get(ctx, module.params['id'])
if res.status_code not in [400, 401, 404, 422]:
kwargs = {
'id': module.params['id'],
'etag': res.json()['job']['etag']
}
if module.params['comment']:
kwargs['comment'] = module.params['comment']
if module.params['status']:
kwargs['status'] = module.params['status']
if module.params['configuration']:
kwargs['configuration'] = module.params['configuration']
res = dci_job.update(ctx, **kwargs)
# Action required: Schedule a new job
# Endpoint called: /jobs/schedule POST via dci_job.schedule()
#
# Schedule a new job against the DCI Control-Server
else:
topic_id = dci_topic.get(ctx, topic).json()['topic']['id']
remoteci = dci_remoteci.get(ctx, module.params['remoteci']).json()
remoteci_id = remoteci['remoteci']['id']
res = dci_job.schedule(ctx, remoteci_id, topic_id=topic_id)
if res.status_code not in [400, 401, 404, 422]:
res = dci_job.get_full_data(ctx, ctx.last_job_id)
try:
result = res.json()
if res.status_code == 404:
module.fail_json(msg='The resource does not exist')
if res.status_code in [400, 401, 422]:
result['changed'] = False
else:
result['changed'] = True
except AttributeError:
# Enter here if new job has been schedule, return of get_full_data is already json.
result = res
result['changed'] = True
result['job_id'] = ctx.last_job_id
except:
result = {}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from schematics.exceptions import ValidationError
from schematics.transforms import whitelist, blacklist
from schematics.types import StringType, IntType, URLType, BooleanType
from schematics.types.compound import ModelType
from schematics.types.serializable import serializable
from barbecue import vnmax
# from zope.interface import implementer # TODO
from openprocurement.api.models import (
plain_role, listing_role, draft_role, schematics_default_role, schematics_embedded_role
)
from openprocurement.api.models import (
ListType, Period
)
from openprocurement.api.utils import (
get_now,
)
from openprocurement.api.constants import TZ
from openprocurement.api.validation import (
validate_items_uniq, validate_cpv_group
)
from openprocurement.tender.core.models import (
view_role, create_role, edit_role,
auction_view_role, auction_post_role, auction_patch_role, auction_role,
chronograph_role, chronograph_view_role,
)
from openprocurement.tender.core.models import (
validate_features_uniq, validate_lots_uniq
)
from openprocurement.tender.core.models import (
Value, Guarantee, ComplaintModelType, TenderAuctionPeriod,
PeriodEndRequired, Tender as BaseTender, Bid, ProcuringEntity,
Item, Award, Contract, Question, Cancellation, Feature,
Lot, Complaint,
)
from openprocurement.tender.core.utils import (
calc_auction_end_time
)
from openprocurement.tender.core.constants import (
CPV_ITEMS_CLASS_FROM, COMPLAINT_STAND_STILL_TIME
)
enquiries_role = (blacklist('owner_token', '_attachments', 'revisions', 'bids', 'numberOfBids') + schematics_embedded_role)
Administrator_role = whitelist('status', 'mode', 'procuringEntity', 'auctionPeriod', 'lots')
# class IBelowThresoldTender(ITender): # TODO
# """ Marker interface for belowThreshold tenders """
# @implementer(IBelowThresoldTender) # TODO
class Tender(BaseTender):
"""Data regarding tender process - publicly inviting prospective contractors
to submit bids for evaluation and selecting a winner or winners.
"""
class Options:
roles = {
'plain': plain_role,
'create': create_role,
'edit': edit_role,
'edit_draft': draft_role,
'edit_active.enquiries': edit_role,
'edit_active.tendering': whitelist(),
'edit_active.auction': whitelist(),
'edit_active.qualification': whitelist(),
'edit_active.awarded': whitelist(),
'edit_complete': whitelist(),
'edit_unsuccessful': whitelist(),
'edit_cancelled': whitelist(),
'view': view_role,
'listing': listing_role,
'auction_view': auction_view_role,
'auction_post': auction_post_role,
'auction_patch': auction_patch_role,
'draft': enquiries_role,
'active.enquiries': enquiries_role,
'active.tendering': enquiries_role,
'active.auction': auction_role,
'active.qualification': view_role,
'active.awarded': view_role,
'complete': view_role,
'unsuccessful': view_role,
'cancelled': view_role,
'chronograph': chronograph_role,
'chronograph_view': chronograph_view_role,
'Administrator': Administrator_role,
'default': schematics_default_role,
'contracting': whitelist('doc_id', 'owner'),
}
items = ListType(ModelType(Item), required=True, min_size=1, validators=[validate_items_uniq]) # The goods and services to be purchased, broken into line items wherever possible. Items should not be duplicated, but a quantity of 2 specified instead.
value = ModelType(Value, required=True) # The total estimated value of the procurement.
enquiryPeriod = ModelType(PeriodEndRequired, required=True) # The period during which enquiries may be made and will be answered.
tenderPeriod = ModelType(PeriodEndRequired, required=True) # The period when the tender is open for submissions. The end date is the closing date for tender submissions.
hasEnquiries = BooleanType() # A Yes/No field as to whether enquiries were part of tender process.
awardPeriod = ModelType(Period) # The date or period on which an award is anticipated to be made.
numberOfBidders = IntType() # The number of unique tenderers who participated in the tender
bids = ListType(ModelType(Bid), default=list()) # A list of all the companies who entered submissions for the tender.
procuringEntity = ModelType(ProcuringEntity, required=True) # The entity managing the procurement, which may be different from the buyer who is paying / using the items being procured.
awards = ListType(ModelType(Award), default=list())
contracts = ListType(ModelType(Contract), default=list())
auctionPeriod = ModelType(TenderAuctionPeriod, default={})
minimalStep = ModelType(Value, required=True)
questions = ListType(ModelType(Question), default=list())
complaints = ListType(ComplaintModelType(Complaint), default=list())
auctionUrl = URLType()
cancellations = ListType(ModelType(Cancellation), default=list())
features = ListType(ModelType(Feature), validators=[validate_features_uniq])
lots = ListType(ModelType(Lot), default=list(), validators=[validate_lots_uniq])
guarantee = ModelType(Guarantee)
procurementMethodType = StringType(default="belowThreshold")
procuring_entity_kinds = ['general', 'special', 'defense', 'other']
block_complaint_status = ['claim', 'answered', 'pending']
def __local_roles__(self):
roles = dict([('{}_{}'.format(self.owner, self.owner_token), 'tender_owner')])
for i in self.bids:
roles['{}_{}'.format(i.owner, i.owner_token)] = 'bid_owner'
return roles
def initialize(self):
if not self.enquiryPeriod.startDate:
self.enquiryPeriod.startDate = get_now()
if not self.tenderPeriod.startDate:
self.tenderPeriod.startDate = self.enquiryPeriod.endDate
now = get_now()
self.date = now
if self.lots:
for lot in self.lots:
lot.date = now
@serializable(serialize_when_none=False)
def next_check(self):
now = get_now()
checks = []
if self.status == 'active.enquiries' and self.tenderPeriod.startDate:
checks.append(self.tenderPeriod.startDate.astimezone(TZ))
elif self.status == 'active.enquiries' and self.enquiryPeriod.endDate:
checks.append(self.enquiryPeriod.endDate.astimezone(TZ))
elif self.status == 'active.tendering' and self.tenderPeriod.endDate:
checks.append(self.tenderPeriod.endDate.astimezone(TZ))
elif not self.lots and self.status == 'active.auction' and self.auctionPeriod and self.auctionPeriod.startDate and not self.auctionPeriod.endDate:
if now < self.auctionPeriod.startDate:
checks.append(self.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ))
elif self.lots and self.status == 'active.auction':
for lot in self.lots:
if lot.status != 'active' or not lot.auctionPeriod or not lot.auctionPeriod.startDate or lot.auctionPeriod.endDate:
continue
if now < lot.auctionPeriod.startDate:
checks.append(lot.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ))
elif not self.lots and self.status == 'active.awarded' and not any([
i.status in self.block_complaint_status
for i in self.complaints
]) and not any([
i.status in self.block_complaint_status
for a in self.awards
for i in a.complaints
]):
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in self.awards
if a.complaintPeriod.endDate
]
last_award_status = self.awards[-1].status if self.awards else ''
if standStillEnds and last_award_status == 'unsuccessful':
checks.append(max(standStillEnds))
elif self.lots and self.status in ['active.qualification', 'active.awarded'] and not any([
i.status in self.block_complaint_status and i.relatedLot is None
for i in self.complaints
]):
for lot in self.lots:
if lot['status'] != 'active':
continue
lot_awards = [i for i in self.awards if i.lotID == lot.id]
pending_complaints = any([
i['status'] in self.block_complaint_status and i.relatedLot == lot.id
for i in self.complaints
])
pending_awards_complaints = any([
i.status in self.block_complaint_status
for a in lot_awards
for i in a.complaints
])
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in lot_awards
if a.complaintPeriod.endDate
]
last_award_status = lot_awards[-1].status if lot_awards else ''
if not pending_complaints and not pending_awards_complaints and standStillEnds and last_award_status == 'unsuccessful':
checks.append(max(standStillEnds))
if self.status.startswith('active'):
from openprocurement.tender.core.utils import calculate_business_date
for complaint in self.complaints:
if complaint.status == 'claim' and complaint.dateSubmitted:
checks.append(calculate_business_date(complaint.dateSubmitted, COMPLAINT_STAND_STILL_TIME, self))
elif complaint.status == 'answered' and complaint.dateAnswered:
checks.append(calculate_business_date(complaint.dateAnswered, COMPLAINT_STAND_STILL_TIME, self))
for award in self.awards:
if award.status == 'active' and not any([i.awardID == award.id for i in self.contracts]):
checks.append(award.date)
for complaint in award.complaints:
if complaint.status == 'claim' and complaint.dateSubmitted:
checks.append(calculate_business_date(complaint.dateSubmitted, COMPLAINT_STAND_STILL_TIME, self))
elif complaint.status == 'answered' and complaint.dateAnswered:
checks.append(calculate_business_date(complaint.dateAnswered, COMPLAINT_STAND_STILL_TIME, self))
return min(checks).isoformat() if checks else None
@serializable
def numberOfBids(self):
"""A property that is serialized by schematics exports."""
return len(self.bids)
@serializable(serialized_name="value", type=ModelType(Value))
def tender_value(self):
return Value(dict(amount=sum([i.value.amount for i in self.lots]),
currency=self.value.currency,
valueAddedTaxIncluded=self.value.valueAddedTaxIncluded)) if self.lots else self.value
@serializable(serialized_name="guarantee", serialize_when_none=False, type=ModelType(Guarantee))
def tender_guarantee(self):
if self.lots:
lots_amount = [i.guarantee.amount for i in self.lots if i.guarantee]
if not lots_amount:
return self.guarantee
guarantee = {'amount': sum(lots_amount)}
lots_currency = [i.guarantee.currency for i in self.lots if i.guarantee]
guarantee['currency'] = lots_currency[0] if lots_currency else None
if self.guarantee:
guarantee['currency'] = self.guarantee.currency
return Guarantee(guarantee)
else:
return self.guarantee
@serializable(serialized_name="minimalStep", type=ModelType(Value))
def tender_minimalStep(self):
return Value(dict(amount=min([i.minimalStep.amount for i in self.lots]),
currency=self.minimalStep.currency,
valueAddedTaxIncluded=self.minimalStep.valueAddedTaxIncluded)) if self.lots else self.minimalStep
def validate_items(self, data, items):
cpv_336_group = items[0].classification.id[:3] == '336' if items else False
if not cpv_336_group and (data.get('revisions')[0].date if data.get('revisions') else get_now()) > CPV_ITEMS_CLASS_FROM and items and len(set([i.classification.id[:4] for i in items])) != 1:
raise ValidationError(u"CPV class of items should be identical")
else:
validate_cpv_group(items)
def validate_features(self, data, features):
if features and data['lots'] and any([
round(vnmax([
i
for i in features
if i.featureOf == 'tenderer' or i.featureOf == 'lot' and i.relatedItem == lot['id'] or i.featureOf == 'item' and i.relatedItem in [j.id for j in data['items'] if j.relatedLot == lot['id']]
]), 15) > 0.3
for lot in data['lots']
]):
raise ValidationError(u"Sum of max value of all features for lot should be less then or equal to 30%")
elif features and not data['lots'] and round(vnmax(features), 15) > 0.3:
raise ValidationError(u"Sum of max value of all features should be less then or equal to 30%")
def validate_auctionUrl(self, data, url):
if url and data['lots']:
raise ValidationError(u"url should be posted for each lot")
def validate_minimalStep(self, data, value):
if value and value.amount and data.get('value'):
if data.get('value').amount < value.amount:
raise ValidationError(u"value should be less than value of tender")
if data.get('value').currency != value.currency:
raise ValidationError(u"currency should be identical to currency of value of tender")
if data.get('value').valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(u"valueAddedTaxIncluded should be identical to valueAddedTaxIncluded of value of tender")
def validate_tenderPeriod(self, data, period):
if period and period.startDate and data.get('enquiryPeriod') and data.get('enquiryPeriod').endDate and period.startDate < data.get('enquiryPeriod').endDate:
raise ValidationError(u"period should begin after enquiryPeriod")
def validate_awardPeriod(self, data, period):
if period and period.startDate and data.get('auctionPeriod') and data.get('auctionPeriod').endDate and period.startDate < data.get('auctionPeriod').endDate:
raise ValidationError(u"period should begin after auctionPeriod")
if period and period.startDate and data.get('tenderPeriod') and data.get('tenderPeriod').endDate and period.startDate < data.get('tenderPeriod').endDate:
raise ValidationError(u"period should begin after tenderPeriod")
def validate_lots(self, data, value):
if len(set([lot.guarantee.currency for lot in value if lot.guarantee])) > 1:
raise ValidationError(u"lot guarantee currency should be identical to tender guarantee currency")
|
|
import base64
import pickle
from datetime import datetime, date
from decimal import Decimal, getcontext
from django.test import TestCase
from unittest import mock
from django import forms
from django.contrib.auth.models import User
from onmydesk.models import (Report, Scheduler, ReportNotSavedException,
output_file_handler)
class OutputFileHandlerTestCase(TestCase):
def test_call_must_return_filepath_changed(self):
my_handler = 'path.to.my.handler'
with mock.patch('onmydesk.models.app_settings.ONMYDESK_FILE_HANDLER', my_handler):
my_handler_mocked = mock.MagicMock(return_value='/tmp/filepath-changed.tsv')
with mock.patch('onmydesk.models.my_import', return_value=my_handler_mocked) as my_import_mocked:
self.assertEqual(
output_file_handler('/tmp/filepath.tsv'),
'/tmp/filepath-changed.tsv')
my_import_mocked.assert_called_once_with(my_handler)
my_handler_mocked.assert_called_once_with('/tmp/filepath.tsv')
def test_call_must_return_same_filepath_if_a_file_handler_not_exists(self):
with mock.patch('onmydesk.models.ONMYDESK_FILE_HANDLER', None):
self.assertEqual(output_file_handler('/tmp/filepath.tsv'), '/tmp/filepath.tsv')
class ReportTestCase(TestCase):
def setUp(self):
def my_output_file_handler(filepath):
return filepath
self.patch('onmydesk.models.output_file_handler', my_output_file_handler)
self.report_instance = mock.MagicMock()
self.report_instance.name = 'My Report'
self.report_instance.output_filepaths = ['/tmp/flunfa.tsv']
self.report_class = mock.MagicMock(return_value=self.report_instance)
self.report_class.name = 'My Report'
self.my_import_mocked = self.patch('onmydesk.models.my_import', return_value=self.report_class)
def test_to_string(self):
report = Report(report='my_report_class')
self.assertEqual(str(report), self.report_instance.name)
report.save()
self.assertEqual(str(report), '{} #{}'.format(self.report_instance.name, report.id))
def test_to_string_with_empty_report_must_return_generic_name(self):
report = Report()
self.assertEqual(str(report), 'Report object')
def test_process_must_call_process_from_report_class(self):
report = Report(report='my_report_class')
report.save()
report.process()
self.my_import_mocked.assert_called_once_with(report.report)
self.assertTrue(self.report_instance.process.called)
def test_process_with_not_saved_report_must_raise_a_exception(self):
report = Report(report='my_report_class')
self.assertRaises(ReportNotSavedException, report.process)
def test_process_must_store_filepaths_result(self):
self.report_instance.output_filepaths = [
'/tmp/flunfa-2.tsv',
'/tmp/flunfa-3.tsv',
]
report = Report(report='my_report_class')
report.save()
report.process()
self.assertEqual(
report.results, ';'.join(self.report_instance.output_filepaths))
def test_process_with_params_must_call_report_constructor_with_these_params(self):
report = Report(report='my_report_class')
params = {'type': 'whatever'}
report.set_params(params)
report.save()
report.process()
self.report_class.assert_called_once_with(params=params)
def test_process_must_set_status_as_processing_when_start(self):
self.patch('onmydesk.models.my_import', side_effect=Exception)
report = Report(report='my_report_class')
report.save()
self.assertEqual(report.status, Report.STATUS_PENDING)
try:
report.process()
except Exception:
pass
report = Report.objects.get(id=report.id)
self.assertEqual(report.status, Report.STATUS_PROCESSING)
def test_process_must_set_status_as_processed_after_report_process(self):
report = Report(report='my_report_class')
report.save()
report.process()
report = Report.objects.get(id=report.id)
self.assertEqual(report.status, Report.STATUS_PROCESSED)
def test_process_must_set_status_as_error_if_some_exception_is_raised(self):
self.report_instance.process.side_effect = Exception()
report = Report(report='my_report_class')
report.save()
self.assertRaises(Exception, report.process)
self.assertEqual(report.status, Report.STATUS_ERROR)
def test_process_must_set_process_time(self):
getcontext().prec = 5
start = Decimal(10.0000)
end = Decimal(15.1234)
self.patch('onmydesk.models.timer', side_effect=[start, end])
report = Report(report='my_report_class')
report.save()
report.process()
self.assertEqual(report.process_time, end - start)
def test_results_as_list_must_return_a_list(self):
expected_results = [
'/tmp/flunfa-2.tsv',
'/tmp/flunfa-3.tsv',
]
report = Report(report='my_report_class')
report.results = ';'.join(expected_results)
self.assertEqual(report.results_as_list, expected_results)
def test_results_as_list_must_return_empty_list_if_field_is_empty(self):
report = Report(report='my_report_class')
report.results = ''
self.assertEqual(report.results_as_list, [])
def test_set_params_must_serializer_info_and_store_on_params_attr(self):
report = Report(report='my_report_class')
self.assertIsNone(report.params)
params = {'param1': 1, 'somedate': datetime.now()}
expected_result = base64.b64encode(pickle.dumps(params))
report.set_params(params)
self.assertEqual(report.params, expected_result)
def test_get_params_must_return_unserialized_info(self):
params = {'param1': 1, 'somedate': datetime.now()}
report = Report(report='my_report_class')
report.params = base64.b64encode(pickle.dumps(params))
self.assertEqual(report.get_params(), params)
def test_get_params_returns_none_if_params_is_none(self):
report = Report(report='my_report_class')
report.params = None
self.assertIsNone(report.get_params())
@mock.patch('onmydesk.models.app_settings.ONMYDESK_DOWNLOAD_LINK_HANDLER', 'whatever')
def test_result_links(self):
self.report_instance.output_filepaths = [
'/tmp/flunfa-2.tsv',
'/tmp/flunfa-3.tsv',
]
report = Report(report='my_report_class')
report.save()
report.process()
def my_handler(filepath):
url_example = 'http://someplace.com/somepath{}'
return url_example.format(filepath)
with mock.patch('onmydesk.models.my_import', return_value=my_handler):
links = report.result_links
self.assertEqual(links, [my_handler(i) for i in self.report_instance.output_filepaths])
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
class SchedulerTestCase(TestCase):
def setUp(self):
class RangeDateForm(forms.Form):
my_date = forms.DateField()
other_filter = forms.CharField()
self.report_form = RangeDateForm
self.report_class = mock.MagicMock()
self.report_class.form = self.report_form
self.report_class.get_form.return_value = self.report_form
self.report_class.name = 'My Report'
self._patch('onmydesk.models.my_import', return_value=self.report_class)
def _patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_to_string(self):
scheduler = Scheduler(report='my_report_class')
with mock.patch('onmydesk.models.my_import', return_value=self.report_class):
self.assertEqual(str(scheduler), 'My Report')
scheduler.save()
self.assertEqual(str(scheduler), 'My Report #{}'.format(scheduler.id))
def test_to_string_with_empty_report_returns_generic_name(self):
scheduler = Scheduler()
self.assertEqual(str(scheduler), 'Scheduler object')
def test_set_params_must_serializer_info_and_store_on_params_attr(self):
scheduler = Scheduler()
params = {'teste': 'Alisson'}
scheduler.set_params(params)
expected_result = base64.b64encode(pickle.dumps(params))
self.assertEqual(scheduler.params, expected_result)
def test_get_params_must_return_unserialized_info(self):
params = {'param1': 1}
report = Scheduler()
report.params = base64.b64encode(pickle.dumps(params))
self.assertEqual(report.get_params(), params)
def test_get_processed_params_must_return_dictionary_with_parameters(self):
scheduler = Scheduler()
params = {'param1': 'First value'}
scheduler.set_params(params)
self.assertEqual(scheduler.get_processed_params(), params)
def test_get_processed_params_must_return_date_fields_processed(self):
scheduler = Scheduler(report='my_report_class')
scheduler.set_params({'my_date': 'D-2', 'other_filter': 'other_value'})
reference_date = date(2016, 4, 3)
expected_param = {'my_date': date(2016, 4, 1), 'other_filter': 'other_value'}
self.assertEqual(scheduler.get_processed_params(reference_date), expected_param)
def test_get_processed_params_must_return_none_if_params_is_none(self):
scheduler = Scheduler(report='my_report_class')
scheduler.params = None
self.assertIsNone(scheduler.get_processed_params())
def test_process_must_return_report(self):
scheduler = Scheduler(report='my_report_class')
result = scheduler.process()
self.assertIsInstance(result, Report)
def test_process_must_return_a_saved_report(self):
scheduler = Scheduler(report='my_report_class')
report = scheduler.process()
self.assertIsNotNone(report.id)
def test_process_must_return_a_saved_report_with_created_by_filled(self):
user = User.objects.create_user('Joao', 'joao.webmaster@webnastersonline.com.br', '123souwebmaster')
scheduler = Scheduler(report='my_report_class', created_by=user)
report = scheduler.process()
self.assertEqual(report.created_by, user)
def test_process_must_call_report_process(self):
scheduler = Scheduler(report='my_report_class')
with mock.patch('onmydesk.models.Report.process') as process_mocked:
scheduler.process()
self.assertTrue(process_mocked.called)
def test_process_must_notify_users(self):
scheduler = Scheduler(report='my_report_class', notify_emails='test@test.com,other@test.com')
with mock.patch('onmydesk.models.send_mail') as send_mail_mocked:
scheduler.process()
self.assertTrue(send_mail_mocked.called)
def test_process_must_not_notify_if_scheduler_has_no_emails(self):
scheduler = Scheduler(report='my_report_class')
with mock.patch('onmydesk.models.send_mail') as send_mail_mocked:
scheduler.process()
self.assertFalse(send_mail_mocked.called)
def test_process_must_use_given_reference_date(self):
self._patch('onmydesk.models.Report.process')
scheduler = Scheduler(report='my_report_class')
scheduler.set_params({'my_date': 'D-2', 'other_filter': 'other_value'})
my_date = date(2016, 5, 10)
with mock.patch('onmydesk.models.Report.set_params') as set_params:
scheduler.process(reference_date=my_date)
set_params.assert_called_once_with({'my_date': date(2016, 5, 8), 'other_filter': 'other_value'})
|
|
from __future__ import absolute_import
import sys
from django import forms
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils.log import getLogger
from . import BrokenException, except_args
from .models import Article
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def raises403(request):
raise PermissionDenied
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_mimetype(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, mimetype='application/x-rendertest')
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def raises_template_does_not_exist(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response('i_dont_exist.html')
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
|
|
"""
Chromatic Adaptation Transforms
===============================
Defines various chromatic adaptation transforms (CAT):
- :attr:`colour.adaptation.CAT_XYZ_SCALING`: *XYZ Scaling* chromatic
adaptation transform.
- :attr:`colour.adaptation.CAT_VON_KRIES`: *Von Kries* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_BRADFORD`: *Bradford* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_SHARP`: *Sharp* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_FAIRCHILD`: *Fairchild* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_CMCCAT97`: *CMCCAT97* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_CMCCAT2000`: *CMCCAT2000* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_CAT02`: *CAT02* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_CAT02_BRILL2008`: *Brill and Susstrunk (2008)*
corrected CAT02 chromatic adaptation transform.
- :attr:`colour.adaptation.CAT_CAT16`: *CAT16* chromatic adaptation
transform.
- :attr:`colour.adaptation.CAT_BIANCO2010`: *Bianco and Schettini (2010)*
chromatic adaptation transform.
- :attr:`colour.adaptation.CAT_PC_BIANCO2010`:
*Bianco and Schettini PC (2010)* chromatic adaptation transform.
References
----------
- :cite:`Bianco2010a` : Bianco, S., & Schettini, R. (2010). Two new von Kries
based chromatic adaptation transforms found by numerical optimization.
Color Research & Application, 35(3), 184-192. doi:10.1002/col.20573
- :cite:`Brill2008a` : Brill, M. H., & Susstrunk, S. (2008). Repairing gamut
problems in CIECAM02: A progress report. Color Research & Application,
33(5), 424-426. doi:10.1002/col.20432
- :cite:`CIETC1-321994b` : CIE TC 1-32. (1994). CIE 109-1994 A Method of
Predicting Corresponding Colours under Different Chromatic and Illuminance
Adaptations. Commission Internationale de l'Eclairage.
ISBN:978-3-900734-51-0
- :cite:`Fairchild2013ba` : Fairchild, M. D. (2013). The Nayatani et al.
Model. In Color Appearance Models (3rd ed., pp. 4810-5085). Wiley.
ISBN:B00DAYO8E2
- :cite:`Fairchildb` : Fairchild, M. D. (n.d.). Fairchild YSh.
http://rit-mcsl.org/fairchild//files/FairchildYSh.zip
- :cite:`Li2007e` : Li, C., Perales, E., Luo, M. R., & Martinez-verdu, F.
(2007). The Problem with CAT02 and Its Correction.
https://pdfs.semanticscholar.org/b5a9/\
0215ad9a1fb6b01f310b3d64305f7c9feb3a.pdf
- :cite:`Li2017` : Li, C., Li, Z., Wang, Z., Xu, Y., Luo, M. R., Cui, G.,
Melgosa, M., Brill, M. H., & Pointer, M. (2017). Comprehensive color
solutions: CAM16, CAT16, and CAM16-UCS. Color Research & Application,
42(6), 703-718. doi:10.1002/col.22131
- :cite:`Lindbloom2009g` : Fairchild, M. D. (2013). Chromatic Adaptation
Models. In Color Appearance Models (3rd ed., pp. 4179-4252). Wiley.
ISBN:B00DAYO8E2
- :cite:`Nayatani1995a` : Nayatani, Y., Sobagaki, H., & Yano, K. H. T.
(1995). Lightness dependency of chroma scales of a nonlinear
color-appearance model and its latest formulation. Color Research &
Application, 20(3), 156-167. doi:10.1002/col.5080200305
- :cite:`Westland2012g` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
CMCCAT97. In Computational Colour Science Using MATLAB (2nd ed., p. 80).
ISBN:978-0-470-66569-5
- :cite:`Westland2012k` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
CMCCAT2000. In Computational Colour Science Using MATLAB (2nd ed., pp.
83-86). ISBN:978-0-470-66569-5
- :cite:`Wikipedia2007` : Wikipedia. (2007). CAT02. Retrieved February 24,
2014, from http://en.wikipedia.org/wiki/CIECAM02#CAT02
"""
from __future__ import annotations
import numpy as np
from colour.hints import NDArray
from colour.utilities import CaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"CAT_XYZ_SCALING",
"CAT_VON_KRIES",
"CAT_BRADFORD",
"CAT_SHARP",
"CAT_FAIRCHILD",
"CAT_CMCCAT97",
"CAT_CMCCAT2000",
"CAT_CAT02",
"CAT_CAT02_BRILL2008",
"CAT_CAT16",
"CAT_BIANCO2010",
"CAT_PC_BIANCO2010",
"CHROMATIC_ADAPTATION_TRANSFORMS",
]
CAT_XYZ_SCALING: NDArray = np.array(np.identity(3)).reshape([3, 3])
"""
*XYZ Scaling* chromatic adaptation transform.
References
----------
:cite:`Lindbloom2009g`
"""
CAT_VON_KRIES: NDArray = np.array(
[
[0.4002400, 0.7076000, -0.0808100],
[-0.2263000, 1.1653200, 0.0457000],
[0.0000000, 0.0000000, 0.9182200],
]
)
"""
*Von Kries* chromatic adaptation transform.
References
----------
:cite:`CIETC1-321994b`, :cite:`Fairchild2013ba`, :cite:`Lindbloom2009g`,
:cite:`Nayatani1995a`
"""
CAT_BRADFORD: NDArray = np.array(
[
[0.8951000, 0.2664000, -0.1614000],
[-0.7502000, 1.7135000, 0.0367000],
[0.0389000, -0.0685000, 1.0296000],
]
)
"""
*Bradford* chromatic adaptation transform.
References
----------
:cite:`Lindbloom2009g`
"""
CAT_SHARP: NDArray = np.array(
[
[1.2694, -0.0988, -0.1706],
[-0.8364, 1.8006, 0.0357],
[0.0297, -0.0315, 1.0018],
]
)
"""
*Sharp* chromatic adaptation transform.
References
----------
:cite:`Bianco2010a`
"""
CAT_FAIRCHILD: NDArray = np.array(
[
[0.8562, 0.3372, -0.1934],
[-0.8360, 1.8327, 0.0033],
[0.0357, -0.0469, 1.0112],
]
)
"""
*Fairchild* chromatic adaptation transform.
References
----------
:cite:`Fairchildb`
"""
CAT_CMCCAT97: NDArray = np.array(
[
[0.8951, -0.7502, 0.0389],
[0.2664, 1.7135, 0.0685],
[-0.1614, 0.0367, 1.0296],
]
)
"""
*CMCCAT97* chromatic adaptation transform.
References
----------
:cite:`Westland2012g`
"""
CAT_CMCCAT2000: NDArray = np.array(
[
[0.7982, 0.3389, -0.1371],
[-0.5918, 1.5512, 0.0406],
[0.0008, 0.0239, 0.9753],
]
)
"""
*CMCCAT2000* chromatic adaptation transform.
References
----------
:cite:`Westland2012k`
"""
CAT_CAT02: NDArray = np.array(
[
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834],
]
)
"""
*CAT02* chromatic adaptation transform.
References
----------
:cite:`Wikipedia2007`
"""
CAT_CAT02_BRILL2008: NDArray = np.array(
[
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0000, 0.0000, 1.0000],
]
)
"""
*Brill and Susstrunk (2008)* corrected CAT02 chromatic adaptation
transform.
References
----------
:cite:`Brill2008a`, :cite:`Li2007e`
"""
CAT_CAT16: NDArray = np.array(
[
[0.401288, 0.650173, -0.051461],
[-0.250268, 1.204414, 0.045854],
[-0.002079, 0.048952, 0.953127],
]
)
"""
*CAT16* chromatic adaptation transform.
References
----------
:cite:`Li2017`
"""
CAT_BIANCO2010: NDArray = np.array(
[
[0.8752, 0.2787, -0.1539],
[-0.8904, 1.8709, 0.0195],
[-0.0061, 0.0162, 0.9899],
]
)
"""
*Bianco and Schettini (2010)* chromatic adaptation transform.
References
----------
:cite:`Bianco2010a`
"""
CAT_PC_BIANCO2010: NDArray = np.array(
[
[0.6489, 0.3915, -0.0404],
[-0.3775, 1.3055, 0.0720],
[-0.0271, 0.0888, 0.9383],
]
)
"""
*Bianco and Schettini PC (2010)* chromatic adaptation transform.
References
----------
:cite:`Bianco2010a`
Notes
-----
- This chromatic adaptation transform has no negative lobes.
"""
CHROMATIC_ADAPTATION_TRANSFORMS: CaseInsensitiveMapping = (
CaseInsensitiveMapping(
{
"XYZ Scaling": CAT_XYZ_SCALING,
"Von Kries": CAT_VON_KRIES,
"Bradford": CAT_BRADFORD,
"Sharp": CAT_SHARP,
"Fairchild": CAT_FAIRCHILD,
"CMCCAT97": CAT_CMCCAT97,
"CMCCAT2000": CAT_CMCCAT2000,
"CAT02": CAT_CAT02,
"CAT02 Brill 2008": CAT_CAT02_BRILL2008,
"CAT16": CAT_CAT16,
"Bianco 2010": CAT_BIANCO2010,
"Bianco PC 2010": CAT_PC_BIANCO2010,
}
)
)
CHROMATIC_ADAPTATION_TRANSFORMS.__doc__ = """
Chromatic adaptation transforms.
References
----------
:cite:`Bianco2010a`, :cite:`Brill2008a`, :cite:`Fairchildb`, :cite:`Li2007e`,
:cite:`Li2017`, :cite:`Lindbloom2009g`, :cite:`Westland2012g`,
:cite:`Westland2012k`, :cite:`Wikipedia2007`
"""
|
|
from datetime import datetime
import jsonschema
from slamon_afm.models import db, Agent, AgentCapability, Task
from slamon_afm.tests.afm_test import AFMTest
class TestPolling(AFMTest):
task_request_response_schema = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'task_id': {
'type': 'string',
'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'
},
'task_type': {'type': 'string'},
'task_version': {'type': 'integer'},
'task_data': {'type': 'object'}
},
'required': ['task_id', 'task_type', 'task_version', 'task_data'],
'additionalProperties': False
}
},
'return_time': {
'type': 'string'
}
},
'required': ['tasks', 'return_time'],
'additionalProperties': False
}
def test_poll_tasks_non_json_request(self):
"""Test a non-JSON request"""
assert self.test_app.post('/tasks', expect_errors=True).status_int == 400
def test_poll_tasks_empty_request(self):
assert self.test_app.post_json('/tasks', {}, expect_errors=True).status_int == 400
assert self.test_app.post_json('/tasks/', {}, expect_errors=True).status_int == 400
def test_poll_tasks_empty(self):
"""Test if task polling behaves when no tasks are available."""
resp = self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 5
})
jsonschema.validate(resp.json, TestPolling.task_request_response_schema)
self.assertEqual(len(resp.json['tasks']), 0)
def test_poll_tasks_claim_one(self):
"""Test if task is correctly claimed."""
task = Task()
task.uuid = 'de305d54-75b4-431b-adb2-eb6b9e546013'
task.test_id = 'de305d54-75b4-431b-adb2-eb6b9e546013'
task.type = 'task-type-1'
task.version = 1
task.data = "{}"
db.session.add(task)
db.session.commit()
resp = self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 5
})
jsonschema.validate(resp.json, TestPolling.task_request_response_schema)
self.assertEqual(len(resp.json['tasks']), 1)
def test_poll_task_capability_change(self):
self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 5
})
agent = db.session.query(Agent).filter(Agent.uuid == 'de305d54-75b4-431b-adb2-eb6b9e546013').one()
self.assertEqual(len(agent.capabilities), 2)
self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 2},
'task-type-3': {'version': 3},
'task-type-4': {'version': 4}
},
'max_tasks': 5
})
agent = db.session.query(Agent).filter(Agent.uuid == 'de305d54-75b4-431b-adb2-eb6b9e546013').one()
self.assertEqual(len(agent.capabilities), 3)
self.assertEqual(db.session.query(AgentCapability).filter(AgentCapability.agent_uuid == agent.uuid). \
filter(AgentCapability.type == 'task-type-1').one().version, 2)
self.assertEqual(db.session.query(AgentCapability).filter(AgentCapability.agent_uuid == agent.uuid). \
filter(AgentCapability.type == 'task-type-3').one().version, 3)
self.assertEqual(db.session.query(AgentCapability).filter(AgentCapability.agent_uuid == agent.uuid). \
filter(AgentCapability.type == 'task-type-4').one().version, 4)
def test_poll_tasks_invalid_data(self):
# Invalid protocol
assert self.test_app.post_json('/tasks', {
'protocol': 'invalid',
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 1
}, expect_errors=True).status_int == 400
# Another invalid protocol - so far only protocol version 1 is supported
assert self.test_app.post_json('/tasks', {
'protocol': 5,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 1
}, expect_errors=True).status_int == 400
# Invalid agent_id
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'invalid_agent',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 1
}, expect_errors=True).status_int == 400
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 'many_tasks'
}, expect_errors=True).status_int == 400
# Extra fields
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 'many_tasks',
'extra_field': 1234
}, expect_errors=True).status_int == 400
# Extra fields
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18',
'somewhere': 'else'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 'many_tasks'
}, expect_errors=True).status_int == 400
def test_poll_tasks_missing_data(self):
# Missing max_tasks
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
}
}, expect_errors=True).status_int == 400
# Missing agent_id
assert self.test_app.post_json('/tasks', {
'protocol': 1,
'agent_name': 'Agent 007',
'agent_location': {
'country': 'FI',
'region': '18'
},
'agent_time': '2012-04-23T18:25:43.511Z',
'agent_capabilities': {
'task-type-1': {'version': 1},
'task-type-2': {'version': 2}
},
'max_tasks': 5
}, expect_errors=True).status_int == 400
class TestPushing(AFMTest):
def test_push_response_non_json(self):
assert self.test_app.post('/tasks/response', expect_errors=True).status_int == 400
assert self.test_app.post('/tasks/response/', expect_errors=True).status_int == 400
def test_push_response_empty(self):
assert self.test_app.post_json('/tasks/response', {}, expect_errors=True).status_int == 400
assert self.test_app.post_json('/tasks/response/', {}, expect_errors=True).status_int == 400
def test_push_response(self):
task = Task()
task.uuid = 'de305d54-75b4-431b-adb2-eb6b9e546013'
task.test_id = 'de305d54-75b4-431b-adb2-eb6b9e546013'
task.claimed = datetime.utcnow()
db.session.add(task)
db.session.commit()
r = self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_data': {
'key': 'value',
'another_key': 5
}
})
print(r)
task = db.session.query(Task).filter(Task.uuid == 'de305d54-75b4-431b-adb2-eb6b9e546013').one()
self.assertIsNotNone(task.completed)
self.assertIsNotNone(task.completed)
self.assertIsNotNone(task.result_data)
self.assertIsNone(task.failed)
self.assertIsNone(task.error)
task.completed = None
task.result_data = None
db.session.commit()
self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_error': 'Something went terribly wrong'
})
task = db.session.query(Task).filter(Task.uuid == 'de305d54-75b4-431b-adb2-eb6b9e546013').one()
assert task.completed is None
assert task.result_data is None
assert task.failed is not None
assert task.error is not None
def test_push_response_invalid(self):
# Invalid task id
assert self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 5,
'task_data': {
'key': 'value',
'another_key': 5
}
}, expect_errors=True).status_int == 400
# Missing data and error
assert self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546012'
}, expect_errors=True).status_int == 400
# Wrong type for error
assert self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_error': 5
}, expect_errors=True).status_int == 400
# Task that doesn't exist
assert self.test_app.post_json('/tasks/response', {
'protocol': 1,
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_data': {
'key': 'value',
'another_key': 5
}
}, expect_errors=True).status_int == 400
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron import subnetpool
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests.openstack.neutron import inline_templates
from heat.tests import utils
class NeutronSubnetPoolTest(common.HeatTestCase):
def setUp(self):
super(NeutronSubnetPoolTest, self).setUp()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.find_resource = self.patchobject(neutronV20,
'find_resourceid_by_name_or_id',
return_value='new_test')
def create_subnetpool(self, status='COMPLETE'):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
self.stack = utils.parse_stack(self.t)
resource_defns = self.stack.t.resource_definitions(self.stack)
rsrc = subnetpool.SubnetPool('sub_pool', resource_defns['sub_pool'],
self.stack)
if status == 'FAILED':
self.patchobject(neutronclient.Client, 'create_subnetpool',
side_effect=qe.NeutronClientException(
status_code=500))
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: resources.sub_pool: '
'An unknown exception occurred.',
six.text_type(error))
else:
self.patchobject(neutronclient.Client, 'create_subnetpool',
return_value={'subnetpool': {
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, status), rsrc.state)
return rsrc
def test_validate_prefixlen_min_gt_max(self):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
props = self.t['resources']['sub_pool']['properties']
props['min_prefixlen'] = 28
props['max_prefixlen'] = 24
self.stack = utils.parse_stack(self.t)
rsrc = self.stack['sub_pool']
errMessage = ('Illegal prefix bounds: max_prefixlen=24, '
'min_prefixlen=28.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(errMessage, six.text_type(error))
def test_validate_prefixlen_default_gt_max(self):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
props = self.t['resources']['sub_pool']['properties']
props['default_prefixlen'] = 28
props['max_prefixlen'] = 24
self.stack = utils.parse_stack(self.t)
rsrc = self.stack['sub_pool']
errMessage = ('Illegal prefix bounds: max_prefixlen=24, '
'default_prefixlen=28.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(errMessage, six.text_type(error))
def test_validate_prefixlen_min_gt_default(self):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
props = self.t['resources']['sub_pool']['properties']
props['min_prefixlen'] = 28
props['default_prefixlen'] = 24
self.stack = utils.parse_stack(self.t)
rsrc = self.stack['sub_pool']
errMessage = ('Illegal prefix bounds: min_prefixlen=28, '
'default_prefixlen=24.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(errMessage, six.text_type(error))
def test_validate_minimal(self):
self.t = template_format.parse(inline_templates.SPOOL_MINIMAL_TEMPLATE)
self.stack = utils.parse_stack(self.t)
rsrc = self.stack['sub_pool']
self.assertIsNone(rsrc.validate())
def test_create_subnetpool(self):
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
def test_create_subnetpool_failed(self):
self.create_subnetpool('FAILED')
def test_delete_subnetpool(self):
self.patchobject(neutronclient.Client, 'delete_subnetpool')
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_delete_subnetpool_not_found(self):
self.patchobject(neutronclient.Client, 'delete_subnetpool',
side_effect=qe.NotFound(status_code=404))
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_delete_subnetpool_resource_id_none(self):
delete_pool = self.patchobject(neutronclient.Client,
'delete_subnetpool')
rsrc = self.create_subnetpool()
rsrc.resource_id = None
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
delete_pool.assert_not_called()
def test_update_subnetpool(self):
update_subnetpool = self.patchobject(neutronclient.Client,
'update_subnetpool')
rsrc = self.create_subnetpool()
self.patchobject(rsrc, 'physical_resource_name',
return_value='the_new_sp')
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
props = {
'name': 'the_new_sp',
'prefixes': [
'10.1.0.0/16',
'10.2.0.0/16'],
'address_scope': 'new_test',
'default_quota': '16',
'default_prefixlen': '24',
'min_prefixlen': '24',
'max_prefixlen': '28',
'is_default': False,
}
update_dict = props.copy()
update_dict['name'] = 'the_new_sp'
update_dict['address_scope_id'] = update_dict.pop('address_scope')
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
# with name
self.assertIsNone(rsrc.handle_update(update_snippet, {}, props))
# without name
props['name'] = None
self.assertIsNone(rsrc.handle_update(update_snippet, {}, props))
self.assertEqual(2, update_subnetpool.call_count)
update_subnetpool.assert_called_with(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'subnetpool': update_dict})
def test_update_subnetpool_no_prop_diff(self):
update_subnetpool = self.patchobject(neutronclient.Client,
'update_subnetpool')
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
rsrc.t)
self.assertIsNone(rsrc.handle_update(update_snippet, {}, {}))
update_subnetpool.assert_not_called()
def test_update_subnetpool_validate_prefixes(self):
update_subnetpool = self.patchobject(neutronclient.Client,
'update_subnetpool')
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
prefix_old = rsrc.properties['prefixes']
props = {
'name': 'the_new_sp',
'prefixes': ['10.5.0.0/16']
}
prefix_new = props['prefixes']
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
errMessage = ('Property prefixes updated value %(value1)s '
'should be superset of existing value %(value2)s.'
% dict(value1=sorted(prefix_new),
value2=sorted(prefix_old)))
error = self.assertRaises(exception.StackValidationFailed,
rsrc.handle_update,
update_snippet, {}, props)
self.assertEqual(errMessage, six.text_type(error))
update_subnetpool.assert_not_called()
props = {
'name': 'the_new_sp',
'prefixes': ['10.0.0.0/8',
'10.6.0.0/16'],
}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
self.assertIsNone(rsrc.handle_update(update_snippet, {}, props))
update_subnetpool.assert_called_once_with(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'subnetpool': props})
def test_update_subnetpool_update_address_scope(self):
update_subnetpool = self.patchobject(neutronclient.Client,
'update_subnetpool')
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
props = {
'name': 'the_new_sp',
'address_scope': 'new_test',
'prefixes': ['10.0.0.0/8',
'10.6.0.0/16'],
}
update_dict = {
'name': 'the_new_sp',
'address_scope_id': 'new_test',
'prefixes': ['10.0.0.0/8',
'10.6.0.0/16'],
}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
self.assertIsNone(rsrc.handle_update(update_snippet, {}, props))
self.assertEqual(3, self.find_resource.call_count)
update_subnetpool.assert_called_once_with(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'subnetpool': update_dict})
def test_update_subnetpool_remove_address_scope(self):
update_subnetpool = self.patchobject(neutronclient.Client,
'update_subnetpool')
rsrc = self.create_subnetpool()
ref_id = rsrc.FnGetRefId()
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)
props = {
'name': 'the_new_sp',
'prefixes': ['10.0.0.0/8',
'10.6.0.0/16'],
}
props_diff = {'address_scope': None}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
self.assertIsNone(rsrc.handle_update(update_snippet, {}, props_diff))
self.assertEqual(2, self.find_resource.call_count)
update_subnetpool.assert_called_once_with(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'subnetpool': props_diff})
|
|
from random import choice
import mimetypes
import pyrax
import re
import swiftclient
from gzip import GzipFile
from StringIO import StringIO
from django.core.files.base import File, ContentFile
from django.core.files.storage import Storage
from .cumulus_settings import CUMULUS
######### FROM DJANGO-CUMULUS UNRELEASED v1.1 #########
HEADER_PATTERNS = tuple((re.compile(p), h) for p, h in CUMULUS.get("HEADERS", {}))
def sync_headers(cloud_obj, headers={}, header_patterns=HEADER_PATTERNS):
"""
Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name.
"""
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata()
def get_gzipped_contents(input_file):
"""
Returns a gzipped version of a previously opened file's buffer.
"""
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue())
class SwiftclientStorage(Storage):
"""
Custom storage for Swiftclient.
"""
default_quick_listdir = True
api_key = CUMULUS["API_KEY"]
auth_url = CUMULUS["AUTH_URL"]
region = CUMULUS["REGION"]
connection_kwargs = {}
container_name = CUMULUS["CONTAINER"]
use_snet = CUMULUS["SERVICENET"]
username = CUMULUS["USERNAME"]
ttl = CUMULUS["TTL"]
use_ssl = CUMULUS["USE_SSL"]
use_pyrax = CUMULUS["USE_PYRAX"]
def __init__(self, username=None, api_key=None, container=None,
connection_kwargs=None, container_uri=None):
"""
Initializes the settings for the connection and container.
"""
if username is not None:
self.username = username
if api_key is not None:
self.api_key = api_key
if container is not None:
self.container_name = container
if connection_kwargs is not None:
self.connection_kwargs = connection_kwargs
# connect
if CUMULUS["USE_PYRAX"]:
if CUMULUS["PYRAX_IDENTITY_TYPE"]:
pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"])
pyrax.set_credentials(self.username, self.api_key)
def __getstate__(self):
"""
Return a picklable representation of the storage.
"""
return {
"username": self.username,
"api_key": self.api_key,
"container_name": self.container_name,
"use_snet": self.use_snet,
"connection_kwargs": self.connection_kwargs
}
def _get_connection(self):
if not hasattr(self, "_connection"):
if CUMULUS["USE_PYRAX"]:
public = not self.use_snet # invert
self.ord_connection = pyrax.connect_to_cloudfiles(region="ORD",
public=public)
self.dfw_connection = pyrax.connect_to_cloudfiles(region="DFW",
public=public)
if CUMULUS["REGION"] == "ORD":
self._connection = self.ord_connection
else:
self._connection = self.dfw_connection
else:
self._connection = swiftclient.Connection(
authurl=CUMULUS["AUTH_URL"],
user=CUMULUS["USERNAME"],
key=CUMULUS["API_KEY"],
snet=CUMULUS["SERVICENET"],
auth_version=CUMULUS["AUTH_VERSION"],
tenant_name=CUMULUS["AUTH_TENANT_NAME"],
)
return self._connection
def _set_connection(self, value):
self._connection = value
connection = property(_get_connection, _set_connection)
def _get_container(self):
"""
Gets or creates the container.
"""
if not hasattr(self, "_container"):
if CUMULUS["USE_PYRAX"]:
self._container = self.connection.create_container(self.container_name)
else:
self._container = None
return self._container
def _set_container(self, container):
"""
Sets the container (and, if needed, the configured TTL on it), making
the container publicly available.
"""
if CUMULUS["USE_PYRAX"]:
if container.cdn_ttl != self.ttl or not container.cdn_enabled:
container.make_public(ttl=self.ttl)
if hasattr(self, "_container_public_uri"):
delattr(self, "_container_public_uri")
self._container = container
container = property(_get_container, _set_container)
def _get_container_url(self):
if self.use_ssl and CUMULUS["CONTAINER_SSL_URI"]:
self._container_public_uri = CUMULUS["CONTAINER_SSL_URI"]
elif self.use_ssl:
self._container_public_uri = self.container.cdn_ssl_uri
elif CUMULUS["CONTAINER_URI"]:
self._container_public_uri = CUMULUS["CONTAINER_URI"]
else:
self._container_public_uri = self.container.cdn_uri
if CUMULUS["CNAMES"] and self._container_public_uri in CUMULUS["CNAMES"]:
self._container_public_uri = CUMULUS["CNAMES"][self._container_public_uri]
return self._container_public_uri
container_url = property(_get_container_url)
def _get_object(self, name):
"""
Helper function to retrieve the requested Object.
"""
if name not in self.container.get_object_names():
return False
else:
return self.container.get_object(name)
def _open(self, name, mode="rb"):
"""
Returns the SwiftclientStorageFile.
"""
return SwiftclientStorageFile(storage=self, name=name)
def _save(self, name, content):
"""
Uses the Swiftclient service to write ``content`` to a remote
file (called ``name``).
"""
# Checks if the content_type is already set.
# Otherwise uses the mimetypes library to guess.
if hasattr(content.file, "content_type"):
content_type = content.file.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
headers = {"Content-Type": content_type}
# gzip the file if its of the right content type
if content_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
content_encoding = headers["Content-Encoding"] = "gzip"
else:
content_encoding = None
if CUMULUS["USE_PYRAX"]:
# TODO set headers
if content_encoding == "gzip":
content = get_gzipped_contents(content)
self.connection.store_object(container=self.container.name,
obj_name=name,
data=content.read(),
content_type=content_type,
content_encoding=content_encoding,
etag=None)
else:
# TODO gzipped content when using swift client
self.connection.put_object(self.container.name, name,
content, headers=headers)
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
Deleting a model doesn't delete associated files: bit.ly/12s6Oox
"""
try:
self.connection.delete_object(self.container.name, name)
except pyrax.exceptions.ClientException as exc:
if exc.http_status == 404:
pass
else:
raise
def exists(self, name):
"""
Returns True if a file referenced by the given name already
exists in the storage system, or False if the name is
available for a new file.
"""
return bool(self._get_object(name))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
return self._get_object(name).total_bytes
def url(self, name):
"""
Returns an absolute URL where the content of each file can be
accessed directly by a web browser.
"""
return "{0}/{1}".format(self.container_url, name)
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple;
the first being an empty list of directories (not available
for quick-listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith("/"):
path = "{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple
of lists; the first item being directories, the second item
being files.
"""
dirs = set()
files = []
if path and not path.endswith("/"):
path = "{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
name = name[path_len:]
slash = name[1:-1].find("/") + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
class SwiftclientStaticStorage(SwiftclientStorage):
"""
Subclasses SwiftclientStorage to automatically set the container
to the one specified in CUMULUS["STATIC_CONTAINER"]. This provides
the ability to specify a separate storage backend for Django's
collectstatic command.
To use, make sure CUMULUS["STATIC_CONTAINER"] is set to something other
than CUMULUS["CONTAINER"]. Then, tell Django's staticfiles app by setting
STATICFILES_STORAGE = "cumulus.storage.SwiftclientStaticStorage".
"""
container_name = CUMULUS["STATIC_CONTAINER"]
class SwiftclientStorageFile(File):
closed = False
def __init__(self, storage, name, *args, **kwargs):
self._storage = storage
self._pos = 0
super(SwiftclientStorageFile, self).__init__(file=None, name=name,
*args, **kwargs)
def _get_pos(self):
return self._pos
def _get_size(self):
if not hasattr(self, "_size"):
self._size = self._storage.size(self.name)
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_file(self):
if not hasattr(self, "_file"):
self._file = self._storage._get_object(self.name)
self._file.tell = self._get_pos
return self._file
def _set_file(self, value):
if value is None:
if hasattr(self, "_file"):
del self._file
else:
self._file = value
file = property(_get_file, _set_file)
def read(self, chunk_size=-1):
"""
Reads specified chunk_size or the whole file if chunk_size is None.
If reading the whole file and the content-encoding is gzip, also
gunzip the read content.
"""
if self._pos == self._get_size() or chunk_size == 0:
return ""
if chunk_size < 0:
meta, data = self.file.get(include_meta=True)
if meta.get('content-encoding', None) == 'gzip':
zbuf = StringIO(data)
zfile = GzipFile(mode="rb", fileobj=zbuf)
data = zfile.read()
else:
data = self.file.get(chunk_size=chunk_size).next()
self._pos += len(data)
return data
def chunks(self, chunk_size=None):
"""
Returns an iterator of file where each chunk has chunk_size.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.file.get(chunk_size=chunk_size)
def open(self, *args, **kwargs):
"""
Opens the cloud file object.
"""
self._pos = 0
def close(self, *args, **kwargs):
self._pos = 0
@property
def closed(self):
return not hasattr(self, "_file")
def seek(self, pos):
self._pos = pos
######### END FROM DJANGO-CUMULUS UNRELEASED v1.1 #########
class MultiContainerCloudFilesStorage(SwiftclientStorage):
"""
Custom storage for Rackspace Cloud Files.
"""
active_containers = CUMULUS['ACTIVE_CONTAINERS']
all_containers = CUMULUS['ALL_CONTAINERS']
def _open(self, name, mode='rb'):
name = self.set_current_container(name)
return super(MultiContainerCloudFilesStorage, self)._open(name, mode)
def _save(self, name, content):
name = super(MultiContainerCloudFilesStorage, self)._save(name, content)
return "%s/%s" % (self.container.name, name)
def delete(self, name):
name = self.set_current_container(name)
return super(MultiContainerCloudFilesStorage, self).delete(name)
def exists(self, name):
self.set_random_container()
return super(MultiContainerCloudFilesStorage, self).exists(name)
def url(self, name):
name = self.set_current_container(name)
return super(MultiContainerCloudFilesStorage, self).url(name)
def modified_time(self, name):
name = self.set_current_container(name)
return super(MultiContainerCloudFilesStorage, self).modified_time(name)
def set_current_container(self, name):
"""
Set the current container based on the first folder portion of 'name'
"""
container_name, separator, new_name = name.partition("/")
if container_name in self.all_containers:
if self.container.name != container_name:
# Get region and set the connection
self.set_connection_by_container_name(container_name)
# Then get and set the container
self.container = self.connection.get_container(container_name)
if hasattr(self, '_container_public_uri'):
delattr(self, '_container_public_uri')
return new_name
else: # Else we need to use the default container
if self.container.name != self.container_name:
# Get region and set the connection
self.set_connection_by_container_name(self.container_name)
# Then get and set the container
self.container = self.connection.get_container(self.container_name)
if hasattr(self, '_container_public_uri'):
delattr(self, '_container_public_uri')
return name
def set_random_container(self):
"""
Set the container to a random container for load balancing
"""
container_name = choice(self.active_containers)
# Get region and set the connection
self.set_connection_by_container_name(container_name)
# Then get and set the container
self.container = self.connection.get_container(container_name)
if hasattr(self, '_container_public_uri'):
delattr(self, '_container_public_uri')
def set_connection_by_container_name(self, container_name):
self.connection
if CUMULUS['CONTAINER_REGIONS'][container_name] == "ORD":
self.connection = self.ord_connection
else:
self.connection = self.dfw_connection
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.