gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""QuizReports API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from base import BaseCanvasAPI
from base import BaseModel
class QuizReportsAPI(BaseCanvasAPI):
"""QuizReports API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for QuizReportsAPI."""
super(QuizReportsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("pycanvas.QuizReportsAPI")
def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None):
"""
Retrieve all quiz reports.
Returns a list of all available reports.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - includes_all_versions
"""Whether to retrieve reports that consider all the submissions or only
the most recent. Defaults to false, ignored for item_analysis reports."""
if includes_all_versions is not None:
params["includes_all_versions"] = includes_all_versions
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, all_pages=True)
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None):
"""
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - quiz_report[report_type]
"""The type of report to be generated."""
self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"])
data["quiz_report[report_type]"] = quiz_report_report_type
# OPTIONAL - quiz_report[includes_all_versions]
"""Whether the report should consider all submissions or only the most
recent. Defaults to false, ignored for item_analysis."""
if quiz_report_includes_all_versions is not None:
data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions
# OPTIONAL - include
"""Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
def get_quiz_report(self, id, quiz_id, course_id, include=None):
"""
Get a quiz report.
Returns the data for a single quiz report.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id}".format(**path), data=data, params=params, single_item=True)
def abort_generation_of_report_or_remove_previously_generated_one(self, id, quiz_id, course_id):
"""
Abort the generation of a report, or remove a previously generated one.
This API allows you to cancel a previous request you issued for a report to
be generated. Or in the case of an already generated report, you'd like to
remove it, perhaps to generate it another time with an updated version that
provides new features.
You must check the report's generation status before attempting to use this
interface. See the "workflow_state" property of the QuizReport's Progress
object for more information. Only when the progress reports itself in a
"queued" state can the generation be aborted.
*Responses*
- <code>204 No Content</code> if your request was accepted
- <code>422 Unprocessable Entity</code> if the report is not being generated
or can not be aborted at this stage
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id}".format(**path), data=data, params=params, no_data=True)
class Quizreport(BaseModel):
"""Quizreport Model."""
def __init__(self, file=None, progress_url=None, report_type=None, readable_type=None, url=None, created_at=None, updated_at=None, generatable=None, anonymous=None, progress=None, quiz_id=None, includes_all_versions=None, id=None):
"""Init method for Quizreport class."""
self._file = file
self._progress_url = progress_url
self._report_type = report_type
self._readable_type = readable_type
self._url = url
self._created_at = created_at
self._updated_at = updated_at
self._generatable = generatable
self._anonymous = anonymous
self._progress = progress
self._quiz_id = quiz_id
self._includes_all_versions = includes_all_versions
self._id = id
self.logger = logging.getLogger('pycanvas.Quizreport')
@property
def file(self):
"""if the report has finished generating, a File object that represents it. refer to the Files API for more information about the format."""
return self._file
@file.setter
def file(self, value):
"""Setter for file property."""
self.logger.warn("Setting values on file will NOT update the remote Canvas instance.")
self._file = value
@property
def progress_url(self):
"""if the report has not yet finished generating, a URL where information about its progress can be retrieved. refer to the Progress API for more information (Note: not available in JSON-API format)."""
return self._progress_url
@progress_url.setter
def progress_url(self, value):
"""Setter for progress_url property."""
self.logger.warn("Setting values on progress_url will NOT update the remote Canvas instance.")
self._progress_url = value
@property
def report_type(self):
"""which type of report this is possible values: 'student_analysis', 'item_analysis'."""
return self._report_type
@report_type.setter
def report_type(self, value):
"""Setter for report_type property."""
self.logger.warn("Setting values on report_type will NOT update the remote Canvas instance.")
self._report_type = value
@property
def readable_type(self):
"""a human-readable (and localized) version of the report_type."""
return self._readable_type
@readable_type.setter
def readable_type(self, value):
"""Setter for readable_type property."""
self.logger.warn("Setting values on readable_type will NOT update the remote Canvas instance.")
self._readable_type = value
@property
def url(self):
"""the API endpoint for this report."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def created_at(self):
"""when the report was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def updated_at(self):
"""when the report was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn("Setting values on updated_at will NOT update the remote Canvas instance.")
self._updated_at = value
@property
def generatable(self):
"""boolean indicating whether the report can be generated, which is true unless the quiz is a survey one."""
return self._generatable
@generatable.setter
def generatable(self, value):
"""Setter for generatable property."""
self.logger.warn("Setting values on generatable will NOT update the remote Canvas instance.")
self._generatable = value
@property
def anonymous(self):
"""boolean indicating whether the report is for an anonymous survey. if true, no student names will be included in the csv."""
return self._anonymous
@anonymous.setter
def anonymous(self, value):
"""Setter for anonymous property."""
self.logger.warn("Setting values on anonymous will NOT update the remote Canvas instance.")
self._anonymous = value
@property
def progress(self):
"""if the report is being generated, a Progress object that represents the operation. Refer to the Progress API for more information about the format. (Note: available only in JSON-API format)."""
return self._progress
@progress.setter
def progress(self, value):
"""Setter for progress property."""
self.logger.warn("Setting values on progress will NOT update the remote Canvas instance.")
self._progress = value
@property
def quiz_id(self):
"""the ID of the quiz."""
return self._quiz_id
@quiz_id.setter
def quiz_id(self, value):
"""Setter for quiz_id property."""
self.logger.warn("Setting values on quiz_id will NOT update the remote Canvas instance.")
self._quiz_id = value
@property
def includes_all_versions(self):
"""boolean indicating whether the report represents all submissions or only the most recent ones for each student."""
return self._includes_all_versions
@includes_all_versions.setter
def includes_all_versions(self, value):
"""Setter for includes_all_versions property."""
self.logger.warn("Setting values on includes_all_versions will NOT update the remote Canvas instance.")
self._includes_all_versions = value
@property
def id(self):
"""the ID of the quiz report."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
| |
from __future__ import print_function
# Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import assert_true, assert_raises, assert_equal
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import io, Epochs, read_events, pick_types
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import get_score_funcs
from mne.io.meas_info import Info
from mne.utils import (set_log_file, _TempDir, requires_sklearn, slow_test,
run_tests_if_main)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw, exclude=[], copy=True)
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs, exclude=[], copy=True)
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked, exclude=[], copy=True)
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw, copy=True)
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_reset():
"""Test ICA resetting"""
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True):
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs"""
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
assert_raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert_true('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
sources = ica.get_sources(raw)[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs, copy=True)
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@slow_test
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality"""
tempdir = _TempDir()
stop2 = 500
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
test_cov2 = deepcopy(test_cov)
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, start=start, stop=stop2)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_true(len(w) == 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = [0]
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.filter(4, 20)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.notch_filter([10])
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check scrore funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
ecg_events = ica_find_ecg_events(raw,
sources[np.abs(ecg_scores).argmax()])
assert_true(ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
eog_events = ica_find_eog_events(raw,
sources[np.abs(eog_scores).argmax()])
assert_true(eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
assert_true(len(ica_raw._filenames) == 0) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = io.Raw(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert_true(ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
assert_true(ica_epochs._raw is None)
assert_true(ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert_true(ncomps_ == expected)
@requires_sklearn
def test_run_ica():
"""Test run_ica function"""
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True):
run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
def test_ica_reject_buffer():
"""Test ICA data raw buffer rejection"""
tempdir = _TempDir()
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
raw._data[2, 1000:1005] = 5e-12
drop_log = op.join(op.dirname(tempdir), 'ica_drop.log')
set_log_file(drop_log, overwrite=True)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True)
assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
with open(drop_log) as fid:
log = [l for l in fid if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
def test_ica_twice():
"""Test running ICA twice"""
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
with warnings.catch_warnings(record=True):
ica1 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
run_tests_if_main()
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUIKEPSK(NURESTObject):
""" Represents a IKEPSK in the VSD
Notes:
Shared secret used during the authentication phase of IKE protocol.
"""
__rest_name__ = "ikepsk"
__resource_name__ = "ikepsks"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a IKEPSK instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ikepsk = NUIKEPSK(id=u'xxxx-xxx-xxx-xxx', name=u'IKEPSK')
>>> ikepsk = NUIKEPSK(data=my_dict)
"""
super(NUIKEPSK, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._signature = None
self._signing_certificate_serial_number = None
self._encrypted_psk = None
self._encrypting_certificate_serial_number = None
self._unencrypted_psk = None
self._entity_scope = None
self._associated_enterprise_id = None
self._auto_created = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="signature", remote_name="signature", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="signing_certificate_serial_number", remote_name="signingCertificateSerialNumber", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="encrypted_psk", remote_name="encryptedPSK", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="encrypting_certificate_serial_number", remote_name="encryptingCertificateSerialNumber", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="unencrypted_psk", remote_name="unencryptedPSK", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="associated_enterprise_id", remote_name="associatedEnterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_created", remote_name="autoCreated", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Encryption Profile
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Encryption Profile
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
Description of the IKEv2 Authentication
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the IKEv2 Authentication
"""
self._description = value
@property
def signature(self):
""" Get signature value.
Notes:
Base64 Encoded private key signature
"""
return self._signature
@signature.setter
def signature(self, value):
""" Set signature value.
Notes:
Base64 Encoded private key signature
"""
self._signature = value
@property
def signing_certificate_serial_number(self):
""" Get signing_certificate_serial_number value.
Notes:
Serial Number of the certificate needed to verify the encrypted data
This attribute is named `signingCertificateSerialNumber` in VSD API.
"""
return self._signing_certificate_serial_number
@signing_certificate_serial_number.setter
def signing_certificate_serial_number(self, value):
""" Set signing_certificate_serial_number value.
Notes:
Serial Number of the certificate needed to verify the encrypted data
This attribute is named `signingCertificateSerialNumber` in VSD API.
"""
self._signing_certificate_serial_number = value
@property
def encrypted_psk(self):
""" Get encrypted_psk value.
Notes:
Base64 Encoded Encrypted PSK
This attribute is named `encryptedPSK` in VSD API.
"""
return self._encrypted_psk
@encrypted_psk.setter
def encrypted_psk(self, value):
""" Set encrypted_psk value.
Notes:
Base64 Encoded Encrypted PSK
This attribute is named `encryptedPSK` in VSD API.
"""
self._encrypted_psk = value
@property
def encrypting_certificate_serial_number(self):
""" Get encrypting_certificate_serial_number value.
Notes:
Serial Number of the certificate of the public key that encrypted this data
This attribute is named `encryptingCertificateSerialNumber` in VSD API.
"""
return self._encrypting_certificate_serial_number
@encrypting_certificate_serial_number.setter
def encrypting_certificate_serial_number(self, value):
""" Set encrypting_certificate_serial_number value.
Notes:
Serial Number of the certificate of the public key that encrypted this data
This attribute is named `encryptingCertificateSerialNumber` in VSD API.
"""
self._encrypting_certificate_serial_number = value
@property
def unencrypted_psk(self):
""" Get unencrypted_psk value.
Notes:
Unencrypted PSK
This attribute is named `unencryptedPSK` in VSD API.
"""
return self._unencrypted_psk
@unencrypted_psk.setter
def unencrypted_psk(self, value):
""" Set unencrypted_psk value.
Notes:
Unencrypted PSK
This attribute is named `unencryptedPSK` in VSD API.
"""
self._unencrypted_psk = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def associated_enterprise_id(self):
""" Get associated_enterprise_id value.
Notes:
The ID of the associated Enterprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
return self._associated_enterprise_id
@associated_enterprise_id.setter
def associated_enterprise_id(self, value):
""" Set associated_enterprise_id value.
Notes:
The ID of the associated Enterprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
self._associated_enterprise_id = value
@property
def auto_created(self):
""" Get auto_created value.
Notes:
Was this object autocreated from the connection
This attribute is named `autoCreated` in VSD API.
"""
return self._auto_created
@auto_created.setter
def auto_created(self, value):
""" Set auto_created value.
Notes:
Was this object autocreated from the connection
This attribute is named `autoCreated` in VSD API.
"""
self._auto_created = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.protocols.haproxy.V2Parser}.
"""
from twisted.trial import unittest
from twisted.internet import address
from .._exceptions import InvalidProxyHeader
from .. import _v2parser
V2_SIGNATURE = b'\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A'
def _makeHeaderIPv6(sig=V2_SIGNATURE, verCom=b'\x21', famProto=b'\x21',
addrLength=b'\x00\x24',
addrs=((b'\x00' * 15) + b'\x01') * 2,
ports=b'\x1F\x90\x22\xB8'):
"""
Construct a version 2 IPv6 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET6/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{::1} for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs + ports
def _makeHeaderIPv4(sig=V2_SIGNATURE, verCom=b'\x21', famProto=b'\x11',
addrLength=b'\x00\x0C',
addrs=b'\x7F\x00\x00\x01\x7F\x00\x00\x01',
ports=b'\x1F\x90\x22\xB8'):
"""
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to 127.0.0.1 for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs + ports
def _makeHeaderUnix(sig=V2_SIGNATURE, verCom=b'\x21', famProto=b'\x31',
addrLength=b'\x00\xD8',
addrs=(b'\x2F\x68\x6F\x6D\x65\x2F\x74\x65\x73\x74\x73\x2F'
b'\x6D\x79\x73\x6F\x63\x6B\x65\x74\x73\x2F\x73\x6F'
b'\x63\x6B' + (b'\x00' * 82)) * 2):
"""
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_UNIX/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to 108
bytes for 2 null terminated paths.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{/home/tests/mysockets/sock}
for source and destination paths.
@type addrs: L{bytes}
@return: A packet with header, addresses, and8 ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs
class V2ParserTests(unittest.TestCase):
"""
Test L{twisted.protocols.haproxy.V2Parser} behaviour.
"""
def test_happyPathIPv4(self):
"""
Test if a well formed IPv4 header is parsed without error.
"""
header = _makeHeaderIPv4()
self.assertTrue(_v2parser.V2Parser.parse(header))
def test_happyPathIPv6(self):
"""
Test if a well formed IPv6 header is parsed without error.
"""
header = _makeHeaderIPv6()
self.assertTrue(_v2parser.V2Parser.parse(header))
def test_happyPathUnix(self):
"""
Test if a well formed UNIX header is parsed without error.
"""
header = _makeHeaderUnix()
self.assertTrue(_v2parser.V2Parser.parse(header))
def test_invalidSignature(self):
"""
Test if an invalid signature block raises InvalidProxyError.
"""
header = _makeHeaderIPv4(sig=b'\x00'*12)
self.assertRaises(
InvalidProxyHeader,
_v2parser.V2Parser.parse,
header,
)
def test_invalidVersion(self):
"""
Test if an invalid version raises InvalidProxyError.
"""
header = _makeHeaderIPv4(verCom=b'\x11')
self.assertRaises(
InvalidProxyHeader,
_v2parser.V2Parser.parse,
header,
)
def test_invalidCommand(self):
"""
Test if an invalid command raises InvalidProxyError.
"""
header = _makeHeaderIPv4(verCom=b'\x23')
self.assertRaises(
InvalidProxyHeader,
_v2parser.V2Parser.parse,
header,
)
def test_invalidFamily(self):
"""
Test if an invalid family raises InvalidProxyError.
"""
header = _makeHeaderIPv4(famProto=b'\x40')
self.assertRaises(
InvalidProxyHeader,
_v2parser.V2Parser.parse,
header,
)
def test_invalidProto(self):
"""
Test if an invalid protocol raises InvalidProxyError.
"""
header = _makeHeaderIPv4(famProto=b'\x24')
self.assertRaises(
InvalidProxyHeader,
_v2parser.V2Parser.parse,
header,
)
def test_localCommandIpv4(self):
"""
Test that local does not return endpoint data for IPv4 connections.
"""
header = _makeHeaderIPv4(verCom=b'\x20')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_localCommandIpv6(self):
"""
Test that local does not return endpoint data for IPv6 connections.
"""
header = _makeHeaderIPv6(verCom=b'\x20')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_localCommandUnix(self):
"""
Test that local does not return endpoint data for UNIX connections.
"""
header = _makeHeaderUnix(verCom=b'\x20')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_proxyCommandIpv4(self):
"""
Test that proxy returns endpoint data for IPv4 connections.
"""
header = _makeHeaderIPv4(verCom=b'\x21')
info = _v2parser.V2Parser.parse(header)
self.assertTrue(info.source)
self.assertIsInstance(info.source, address.IPv4Address)
self.assertTrue(info.destination)
self.assertIsInstance(info.destination, address.IPv4Address)
def test_proxyCommandIpv6(self):
"""
Test that proxy returns endpoint data for IPv6 connections.
"""
header = _makeHeaderIPv6(verCom=b'\x21')
info = _v2parser.V2Parser.parse(header)
self.assertTrue(info.source)
self.assertIsInstance(info.source, address.IPv6Address)
self.assertTrue(info.destination)
self.assertIsInstance(info.destination, address.IPv6Address)
def test_proxyCommandUnix(self):
"""
Test that proxy returns endpoint data for UNIX connections.
"""
header = _makeHeaderUnix(verCom=b'\x21')
info = _v2parser.V2Parser.parse(header)
self.assertTrue(info.source)
self.assertIsInstance(info.source, address.UNIXAddress)
self.assertTrue(info.destination)
self.assertIsInstance(info.destination, address.UNIXAddress)
def test_unspecFamilyIpv4(self):
"""
Test that UNSPEC does not return endpoint data for IPv4 connections.
"""
header = _makeHeaderIPv4(famProto=b'\x01')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_unspecFamilyIpv6(self):
"""
Test that UNSPEC does not return endpoint data for IPv6 connections.
"""
header = _makeHeaderIPv6(famProto=b'\x01')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_unspecFamilyUnix(self):
"""
Test that UNSPEC does not return endpoint data for UNIX connections.
"""
header = _makeHeaderUnix(famProto=b'\x01')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_unspecProtoIpv4(self):
"""
Test that UNSPEC does not return endpoint data for IPv4 connections.
"""
header = _makeHeaderIPv4(famProto=b'\x10')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_unspecProtoIpv6(self):
"""
Test that UNSPEC does not return endpoint data for IPv6 connections.
"""
header = _makeHeaderIPv6(famProto=b'\x20')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_unspecProtoUnix(self):
"""
Test that UNSPEC does not return endpoint data for UNIX connections.
"""
header = _makeHeaderUnix(famProto=b'\x30')
info = _v2parser.V2Parser.parse(header)
self.assertFalse(info.source)
self.assertFalse(info.destination)
def test_overflowIpv4(self):
"""
Test that overflow bits are preserved during feed parsing for IPv4.
"""
testValue = b'TEST DATA\r\n\r\nTEST DATA'
header = _makeHeaderIPv4() + testValue
parser = _v2parser.V2Parser()
info, overflow = parser.feed(header)
self.assertTrue(info)
self.assertEqual(overflow, testValue)
def test_overflowIpv6(self):
"""
Test that overflow bits are preserved during feed parsing for IPv6.
"""
testValue = b'TEST DATA\r\n\r\nTEST DATA'
header = _makeHeaderIPv6() + testValue
parser = _v2parser.V2Parser()
info, overflow = parser.feed(header)
self.assertTrue(info)
self.assertEqual(overflow, testValue)
def test_overflowUnix(self):
"""
Test that overflow bits are preserved during feed parsing for Unix.
"""
testValue = b'TEST DATA\r\n\r\nTEST DATA'
header = _makeHeaderUnix() + testValue
parser = _v2parser.V2Parser()
info, overflow = parser.feed(header)
self.assertTrue(info)
self.assertEqual(overflow, testValue)
def test_segmentTooSmall(self):
"""
Test that an initial payload of less than 16 bytes fails.
"""
testValue = b'NEEDMOREDATA'
parser = _v2parser.V2Parser()
self.assertRaises(
InvalidProxyHeader,
parser.feed,
testValue,
)
| |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import json
import os
import uuid
import pika
import requests
import time
import urllib
from contextlib import contextmanager
from functools import wraps
from celery import Celery
from multiprocessing import Process
from cloudify.exceptions import NonRecoverableError
from cloudify.utils import setup_logger
from cloudify_rest_client import CloudifyClient
from cloudify_rest_client.executions import Execution
from os import path
from testenv.processes.manager_rest import MANAGER_REST_PORT
PROVIDER_CONTEXT = {
'cloudify': {
'workflows': {
'task_retries': 0,
'task_retry_interval': 0
}
}
}
PROVIDER_NAME = 'integration_tests'
celery = Celery(broker='amqp://',
backend='amqp://')
celery.conf.update(
CELERY_TASK_SERIALIZER="json"
)
logger = setup_logger('testenv.utils')
def task_exists(name, *args):
logger.info('task_exists invoked with : {0}'
.format(args))
if 'non_existent' in name:
logger.info('non_existent operation, raising NonRecoverableError')
raise NonRecoverableError('non_existent operation [{0}]'.format(name))
return True
def deploy_application(dsl_path,
timeout_seconds=30,
blueprint_id=None,
deployment_id=None,
wait_for_execution=True,
inputs=None):
"""
A blocking method which deploys an application from the provided dsl path.
"""
return deploy_and_execute_workflow(dsl_path=dsl_path,
workflow_name='install',
timeout_seconds=timeout_seconds,
blueprint_id=blueprint_id,
deployment_id=deployment_id,
wait_for_execution=wait_for_execution,
inputs=inputs)
def deploy(dsl_path, blueprint_id=None, deployment_id=None, inputs=None):
client = create_rest_client()
if not blueprint_id:
blueprint_id = str(uuid.uuid4())
blueprint = client.blueprints.upload(dsl_path, blueprint_id)
if deployment_id is None:
deployment_id = str(uuid.uuid4())
deployment = client.deployments.create(
blueprint.id,
deployment_id,
inputs=inputs)
wait_for_deployment_creation_to_complete(
deployment_id=deployment_id)
return deployment
def wait_for_deployment_creation_to_complete(
deployment_id, timeout_seconds=30):
do_retries(func=verify_deployment_environment_creation_complete,
timeout_seconds=timeout_seconds,
deployment_id=deployment_id)
def deploy_and_execute_workflow(dsl_path,
workflow_name,
timeout_seconds=240,
blueprint_id=None,
deployment_id=None,
wait_for_execution=True,
parameters=None,
inputs=None):
"""
A blocking method which deploys an application from the provided dsl path.
and runs the requested workflows
"""
deployment = deploy(dsl_path, blueprint_id, deployment_id, inputs)
execution = execute_workflow(workflow_name, deployment.id, parameters,
timeout_seconds, wait_for_execution)
return deployment, execution.id
def execute_workflow(workflow_name, deployment_id,
parameters=None,
timeout_seconds=240,
wait_for_execution=True):
"""
A blocking method which runs the requested workflow
"""
client = create_rest_client()
execution = client.executions.start(deployment_id, workflow_name,
parameters=parameters or {})
if wait_for_execution:
wait_for_execution_to_end(execution,
timeout_seconds=timeout_seconds)
return execution
def verify_deployment_environment_creation_complete(deployment_id):
# a workaround for waiting for the deployment environment creation to
# complete
client = create_rest_client()
execs = client.executions.list(deployment_id)
if not execs \
or execs[0].status != Execution.TERMINATED \
or execs[0].workflow_id != 'create_deployment_environment':
from testenv import TestEnvironment # avoid cyclic import
logs = TestEnvironment.read_celery_management_logs() or ''
logs = logs[len(logs) - 100000:]
raise RuntimeError(
"Expected a single execution for workflow "
"'create_deployment_environment' with status 'terminated'; "
"Found these executions instead: {0}.\nCelery log:\n{1}".format(
json.dumps(execs, indent=2), logs))
def undeploy_application(deployment_id,
timeout_seconds=240,
delete_deployment=False):
"""
A blocking method which undeploys an application from the provided dsl
path.
"""
client = create_rest_client()
execution = client.executions.start(deployment_id,
'uninstall')
wait_for_execution_to_end(execution, timeout_seconds=timeout_seconds)
if execution.error and execution.error != 'None':
raise RuntimeError(
'Workflow execution failed: {0}'.format(execution.error))
if delete_deployment:
time.sleep(5) # elasticsearch...
client.deployments.delete(deployment_id)
def is_node_started(node_id):
client = create_rest_client()
node_instance = client.node_instances.get(node_id)
return node_instance['state'] == 'started'
def create_rest_client():
return CloudifyClient(host='localhost', port=MANAGER_REST_PORT)
def get_resource(resource):
"""
Gets the path for the provided resource.
:param resource: resource name relative to /resources.
"""
import resources
resources_path = path.dirname(resources.__file__)
resource_path = path.join(resources_path, resource)
if not path.exists(resource_path):
raise RuntimeError("Resource '{0}' not found in: {1}".format(
resource, resource_path))
return resource_path
def wait_for_execution_to_end(execution, timeout_seconds=240):
client = create_rest_client()
deadline = time.time() + timeout_seconds
while execution.status not in Execution.END_STATES:
time.sleep(0.5)
execution = client.executions.get(execution.id)
if time.time() > deadline:
raise TimeoutException('Execution timed out: \n{0}'
.format(json.dumps(execution, indent=2)))
if execution.status == Execution.FAILED:
raise RuntimeError(
'Workflow execution failed: {0} [{1}]'.format(execution.error,
execution.status))
return execution
def do_retries(func,
timeout_seconds=10,
exception_class=BaseException,
**kwargs):
deadline = time.time() + timeout_seconds
while True:
try:
func(**kwargs)
break
except exception_class:
if time.time() > deadline:
raise
time.sleep(0.5)
def do_retries_boolean(func, timeout_seconds=10, **kwargs):
deadline = time.time() + timeout_seconds
while True:
return_value = func(**kwargs)
if return_value:
break
else:
if time.time() > deadline:
raise RuntimeError(
'function {0} did not return True in {1} seconds'
.format(func.__name__, timeout_seconds)
)
time.sleep(1)
def timeout(seconds=60):
def decorator(func):
def wrapper(*args, **kwargs):
process = Process(None, func, None, args, kwargs)
process.start()
process.join(seconds)
if process.is_alive():
process.terminate()
raise TimeoutException(
'test timeout exceeded [timeout={0}'.format(seconds))
return wraps(func)(wrapper)
return decorator
def send_task(task, queue, args=None):
task_name = task.name.replace('mock_plugins.', '')
return celery.send_task(
name=task_name,
args=args,
queue=queue)
def publish_event(queue,
routing_key,
event,
exchange_name='cloudify-monitoring',
exchange_type='topic'):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange=exchange_name,
type=exchange_type,
durable=False,
auto_delete=True,
internal=False)
channel.queue_declare(
queue=queue,
auto_delete=True,
durable=False,
exclusive=False)
channel.queue_bind(exchange=exchange_name,
queue=queue,
routing_key=routing_key)
channel.basic_publish(exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(event))
channel.close()
connection.close()
def delete_provider_context():
requests.delete('http://localhost:9200'
'/cloudify_storage/provider_context/CONTEXT')
def restore_provider_context():
delete_provider_context()
client = create_rest_client()
client.manager.create_context(PROVIDER_NAME, PROVIDER_CONTEXT)
def wait_for_url(url, timeout=15):
end = time.time() + timeout
while end >= time.time():
try:
status = urllib.urlopen(url).getcode()
if status == 200:
return
except IOError:
time.sleep(1)
raise RuntimeError('Url {0} is not available (waited {1} '
'seconds)'.format(url, timeout))
def timestamp():
now = time.strftime("%c")
return now.replace(' ', '-')
@contextmanager
def update_storage(ctx):
"""
A context manager for updating plugin state.
:param ctx: task invocation context
"""
deployment_id = ctx.deployment.id
plugin_name = ctx.plugin
if plugin_name is None:
# hack for tasks that are executed locally.
# TODO - Aren't these tasks also a part of a plugin?
# TODO - the ctx in this case should include the plugin name
# TODO - as if it was a remote task.
if ctx.task_name.startswith('worker_installer'):
plugin_name = 'agent_installer'
if ctx.task_name.startswith('plugin_installer'):
plugin_name = 'plugin_installer'
storage_file_path = os.path.join(
os.environ['TEST_WORKING_DIR'],
'plugins-storage',
'{0}.json'.format(plugin_name)
)
# create storage file
# if it doesn't exist
if not os.path.exists(storage_file_path):
f = open(storage_file_path, 'w')
json.dump({}, f)
with open(storage_file_path, 'r') as f:
data = json.load(f)
if deployment_id not in data:
data[deployment_id] = {}
yield data.get(deployment_id)
with open(storage_file_path, 'w') as f:
json.dump(data, f, indent=2)
f.write(os.linesep)
class TimeoutException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
def __str__(self):
return self.message
| |
from doom import wad
from doom.map.data import MapData
from nav.config import Config
from nav.mesh import Mesh
from navedit import pathfind
from util.vector import Vector2, Vector3
import cProfile
import camera
import pygame
import random
import render
import sys
COLOR_BACKGROUND = pygame.Color(0, 31, 63, 255)
COLOR_COLLISION_BOX = pygame.Color(127, 127, 127, 255)
COLOR_COLLISION_BOX_COLLIDE = pygame.Color(255, 255, 0, 255)
COLOR_TEXT = pygame.Color(255, 255, 255, 255)
MODE_INSPECT = 0
MODE_RENDER = 1
class Mouse(object):
def __init__(self):
self.buttons = [False] * 6
self.pos = Vector2()
self.map_pos = Vector2()
class Loop(object):
def __init__(self):
self.screen = None
self.camera = None
self.map_data = None
self.config = None
self.nav_grid = None
self.nav_mesh = None
self.pathfinder = None
self.path = None
self.point_start = None
self.point_end = None
pygame.font.init()
self.font = pygame.font.Font('04b_03__.ttf', 8)
self.mouse = Mouse()
self.keys = [False] * 512
def loop_init(self):
wad_file = 'test/dv.wad'
map_lump = 'MAP05'
mesh_file = 'test/dv_map05.dpm'
configuration = None
print 'Loading map...'
wad_file = wad.WADReader(wad_file)
self.map_data = MapData(wad_file, map_lump)
# Load dataset for map.
if configuration == None:
if self.map_data.is_hexen:
configuration = 'zdoom'
else:
configuration = 'doom'
print 'Loading {} configuration...'.format(configuration)
self.config = Config('doompath.json', configuration)
print 'Map setup...'
self.map_data.setup(self.config)
#print 'Creating navigation grid...'
#self.nav_grid = Grid()
print 'Reading navigation mesh...'
self.nav_mesh = Mesh()
self.nav_mesh.read(mesh_file, self.map_data)
self.map_data.blockmap.generate_areas(self.nav_mesh)
self.map_data.blockmap.prune_empty()
print 'Creating display...'
pygame.init()
self.screen = pygame.display.set_mode((1280, 720))
self.camera = camera.Camera(0, 0, 1280, 720, 1.0)
self.center_map()
#render.render_grid_init(self.nav_grid)
self.pathfinder = pathfind.Pathfinder(self.nav_mesh)
return True
def benchmark_pathfinder(self):
random.seed(1751987)
start = Vector3()
end = Vector3()
for _ in range(5000):
start.x = random.randint(self.map_data.min.x, self.map_data.max.x)
start.y = random.randint(self.map_data.min.y, self.map_data.max.y)
start.z = self.map_data.get_floor_z(start.x, start.y)
end.x = random.randint(self.map_data.min.x, self.map_data.max.x)
end.y = random.randint(self.map_data.min.y, self.map_data.max.y)
end.z = self.map_data.get_floor_z(end.x, end.y)
path = self.pathfinder.find(start, end)
if path is not None and self.pathfinder.nodes_visited > 0:
efficiency = round((len(path) / float(self.pathfinder.nodes_visited)) * 100, 1)
print 'Visited {} areas, path is {} areas. {} distance. {}% efficiency.'.format(self.pathfinder.nodes_visited, len(path), self.pathfinder.distance, efficiency)
def loop_start(self):
update_display = True
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT or self.keys[pygame.K_ESCAPE] == True:
break
elif event.type == pygame.MOUSEBUTTONDOWN:
self.mouse.buttons[event.button] = True
elif event.type == pygame.MOUSEBUTTONUP:
self.mouse.buttons[event.button] = False
if event.button == 1:
self.place_path_point(self.mouse.map_pos.x, self.mouse.map_pos.y)
update_display = True
elif event.type == pygame.MOUSEMOTION:
self.mouse.pos.x = event.pos[0]
self.mouse.pos.y = event.pos[1]
self.mouse.map_pos.x, self.mouse.map_pos.y = self.camera.screen_to_map(event.pos[0], event.pos[1])
self.mouse.map_pos.x = int(self.mouse.map_pos.x)
self.mouse.map_pos.y = int(self.mouse.map_pos.y)
update_display = True
if self.mouse.buttons[3] == True:
self.camera.move_relative(event.rel[0] / self.camera.zoom, event.rel[1] / self.camera.zoom)
update_display = True
elif event.type == pygame.KEYDOWN:
self.keys[event.key] = True
elif event.type == pygame.KEYUP:
self.keys[event.key] = False
if self.mouse.buttons[4] == True:
self.camera.set_zoom(self.camera.zoom / 0.92)
#self.camera.set_center(self.mouse.map_x, self.mouse.map_y)
update_display = True
elif self.mouse.buttons[5] == True:
self.camera.set_zoom(self.camera.zoom * 0.92)
#self.camera.set_center(self.mouse.map_x, self.mouse.map_y)
update_display = True
if update_display == True:
self.update_display()
update_display = False
def place_path_point(self, x, y):
z = self.map_data.get_floor_z(x, y)
if self.point_start is None or self.point_end is not None:
self.point_start = Vector3(x, y, z)
self.point_end = None
elif self.point_end is None:
self.point_end = Vector3(x, y, z)
for area in self.nav_mesh.areas:
area.path = False
area.visited = False
self.path = self.pathfinder.find(self.point_start, self.point_end)
if self.path is None:
print 'No path could be found.'
else:
efficiency = round((len(self.path) / float(self.pathfinder.nodes_visited)) * 100, 1)
print 'Visited {} areas, path is {} areas. {} distance. {}% efficiency.'.format(self.pathfinder.nodes_visited, len(self.path), self.pathfinder.distance, efficiency)
def update_display(self):
sector = -1
state = None
areas = None
connections = None
elements = None
#sector = self.map_data.get_sector(self.mouse.map_pos.x, self.mouse.map_pos.y)
self.screen.fill(COLOR_BACKGROUND)
#elements = render.render_nav(self.nav_grid, self.screen, self.camera, self.mouse.map_pos)
render.render_map(self.map_data, self.screen, self.camera, self.config, sector)
areas, connections = render.render_mesh(self.nav_mesh, self.map_data, self.screen, self.camera, self.mouse.map_pos)
#state = self.render_collision_box()
self.render_debug_text(connections, state, elements, areas)
render.draw_connection_path(self.screen, self.camera, self.point_start, self.point_end, self.path)
render.draw_point(self.screen, self.camera, self.point_start)
render.draw_point(self.screen, self.camera, self.point_end)
pygame.display.flip()
def render_collision_box(self):
x = self.mouse.map_pos.x
y = self.mouse.map_pos.y
z = self.map_data.get_floor_z(self.mouse.map_pos.x, self.mouse.map_pos.y)
pos = Vector3(x, y, z)
radius = self.config.player_radius
height = self.config.player_height
collision, state = self.nav_grid.collider.check_position(pos, radius, height)
if collision == False:
color = COLOR_COLLISION_BOX
else:
color = COLOR_COLLISION_BOX_COLLIDE
x = self.mouse.map_pos.x - self.config.player_radius
y = self.mouse.map_pos.y - self.config.player_radius
x, y = self.camera.map_to_screen(x, y)
size = (self.config.player_radius * 2) * self.camera.zoom
rect = pygame.Rect((x, y), (size, size))
pygame.draw.rect(self.screen, color, rect, 1)
return state
def render_debug_text(self, connections, state, elements, areas):
text = '{}, {}'.format(self.mouse.map_pos.x, self.mouse.map_pos.y)
self.render_text(text, 4, 4)
x = 4
y = 46
if state is not None:
text = 'floor z: {}, ceil z: {}, block line: {}, block thing: {}, special sector {}'.format(round(state.floorz, 2), round(state.ceilz, 2), state.blockline, state.blockthing, state.special_sector)
self.render_text(text, 4, 20)
y += 18
if elements is not None:
for element in elements:
self.render_text(str(element), x, y)
y += 18
if connections is not None:
for connection in connections:
self.render_text(str(connection), x, y)
y += 18
if areas is not None:
for area in areas:
self.render_text(str(area), x, y)
y += 18
def render_text(self, text, x, y):
surf = self.font.render(text, 0, COLOR_TEXT)
surf = pygame.transform.scale(surf, (surf.get_width() * 2, surf.get_height() * 2))
self.screen.blit(surf, (x, y))
def center_map(self):
map_size = max(self.map_data.size.x, self.map_data.size.y)
display_size = min(1280, 720)
zoom = float(display_size) / float(map_size) - 0.005
x = self.map_data.min.x + self.map_data.size.x / 2
y = self.map_data.min.y + self.map_data.size.y / 2
self.camera.set_zoom(zoom)
self.camera.set_center(x, y)
if __name__ == '__main__':
loop = Loop()
if loop.loop_init() == False:
sys.exit()
#cProfile.run('loop.benchmark_pathfinder()', sort=1)
loop.loop_start()
| |
import numpy
from .core import get_constant
class Data:
"""
Provides an interface to load data from files into
[`numpy.ndarray`][1] objects.
Example:
#!python
>>> data = Data()
>>> data.path = 'path/to/data.csv'
>>> data.scale = (1, 'kilo')
>>> data.array
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
def __init__(self, name=None):
self.name = name
"""
The identifier name for this object.
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def path(self):
"""
Path to the file containing data to load.
"""
return self._path
@path.setter
def path(self, value):
self._path = value
@property
def array(self):
"""
Data as a [`numpy.ndarray`][1] in the form
#!python
[
[ x1, x2, x3, ... ],
[ y1, y2, y3, ...]
]
By default, if unset, this will be set on first access
by calling `scipy_data_fitting.Data.load_data`.
When loaded from file, the x and y values will be scaled according
to `scipy_data_fitting.Data.scale`.
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if not hasattr(self, '_array'): self._array = self.load_data()
return self._array
@array.setter
def array(self, value):
self._array = value
@property
def error(self):
"""
Error associated with the data as a two element tuple of the form `(x_error, y_error)`.
Both `x_error` and `y_error` is a [`numpy.ndarray`][1] or `None`.
The array dimensions depend on how the error is specified:
1. Symmetric constant error is a zero dimensional array: `error`.
2. Asymmetric constant error is a one dimensional array: `[lower_error, upper_error]`.
3. Symmetric error which varies for each point is an array with length equal
to the number of points; each element is a zero dimensional array: `error`
4. Asymmetric error which varies for each point is an array with length equal
to the number of points; each element is a one dimensional array: `[lower_error, upper_error]`.
This property can be set manually. If setting constant errors (cases 1 and 2 above),
it is not necessary to explicitly use [`numpy.ndarray`][1] as the type will be converted automatically.
For error that varies at each point (cases 3 and 4 above),
the errors can be loaded from the file given by `scipy_data_fitting.Data.path`
setting `scipy_data_fitting.Data.error_columns`.
Defaults to `(None, None)` unless `scipy_data_fitting.Data.error_columns` is set,
in which case this will be set on first access by calling `scipy_data_fitting.Data.load_error`.
When loaded from file, the x and y values will be scaled according
to `scipy_data_fitting.Data.scale`.
Examples:
#!python
# (x_error, y_error)
(0.1, 0.5)
# (x_error, no y_error)
(0.1, None)
# ([x_lower_error, x_upper_error], y_error)
([0.1, 0.5], 2)
# ([x_lower_error, x_upper_error], [y_lower_error, y_upper_error])
([0.1, 0.5], [2, 0.5])
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if not hasattr(self, '_error'):
if any(v is not None for v in self.error_columns):
self._error = self.load_error()
else:
return (None, None)
return self._error
@error.setter
def error(self, value):
self._error = tuple( numpy.array(v) if v is not None else None for v in value )
@property
def scale(self):
"""
Tuple `(x_scale, y_scale)` that defines how to scale data
imported by `scipy_data_fitting.Data.load_data`
and `scipy_data_fitting.Data.load_error`.
If a scale is specified as a string, it will treated as a named physical constant
and converted to the corresponding number using [`scipy.constants`][1].
[1]: http://docs.scipy.org/doc/scipy/reference/constants.html
"""
if not hasattr(self, '_scale'): self._scale = (1, 1)
return self._scale
@scale.setter
def scale(self, value):
self._scale = tuple( get_constant(v) for v in value )
@property
def error_columns(self):
"""
Two element tuple that defines what columns in the file given
by `scipy_data_fitting.Data.path` are the error values for each data point.
The first element corresponds to the x error and the second to the y error.
Each element is either an integer which gives the column,
a two element tuple of integers, or `None`.
In Python, indexes are zero-based, so the first column is `0`.
Examples:
#!python
# (x_error, y_error)
(2, 3)
# (x_error, no y_error)
(2, None)
# ((x_lower_error, x_upper_error), y_error)
((2, 3), 4)
# ((x_lower_error, x_upper_error), (y_lower_error, y_upper_error))
((2, 3), (4, 5))
Defaults to `(None, None)`.
"""
if not hasattr(self, '_error_columns'): return (None, None)
return self._error_columns
@error_columns.setter
def error_columns(self, value):
self._error_columns = value
@property
def genfromtxt_args(self):
"""
Passed as keyword arguments to [`numpy.genfromtxt`][1]
when called by `scipy_data_fitting.Data.load_data`.
Default:
#!python
{
'unpack': True,
'delimiter': ',',
'usecols': (0 ,1),
}
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
"""
if not hasattr(self, '_genfromtxt_args'):
self._genfromtxt_args = {
'unpack': True,
'delimiter': ',',
'usecols': (0 ,1),
}
return self._genfromtxt_args
@genfromtxt_args.setter
def genfromtxt_args(self, value):
self._genfromtxt_args = value
@property
def genfromtxt_args_error(self):
"""
Passed as keyword arguments to [`numpy.genfromtxt`][1]
when called by `scipy_data_fitting.Data.load_error`.
Even if defined here, the `usecols` value will always be reset based
on `scipy_data_fitting.Data.error_columns` before being passed to [`numpy.genfromtxt`][1].
If not set, this defaults to a copy of
`scipy_data_fitting.Data.genfromtxt_args` on first access.
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
"""
if not hasattr(self, '_genfromtxt_args_error'):
self._genfromtxt_args_error = self.genfromtxt_args.copy()
return self._genfromtxt_args_error
@genfromtxt_args_error.setter
def genfromtxt_args_error(self, value):
self._genfromtxt_args_error = value
def load_data(self):
"""
Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1]
and returns a [`numpy.ndarray`][2].
Data is scaled according to `scipy_data_fitting.Data.scale`.
Arguments to [`numpy.genfromtxt`][1] are controlled
by `scipy_data_fitting.Data.genfromtxt_args`.
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
[2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
array = numpy.genfromtxt(self.path, **self.genfromtxt_args)
for n, scale in enumerate(self.scale): array[n,:] *= self.scale[n]
return array
def load_error(self):
"""
Loads error values from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1]
and returns a two element tuple where each element is of a form described by
cases 3 and 4 in `scipy_data_fitting.Data.error`.
The columns to import are set by `scipy_data_fitting.Data.error_columns`.
Values are scaled according to `scipy_data_fitting.Data.scale`.
Arguments to [`numpy.genfromtxt`][1] are controlled
by `scipy_data_fitting.Data.genfromtxt_args_error`.
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
"""
usecols = []
for v in self.error_columns:
if v is None:
pass
elif isinstance(v, int):
usecols.append(v)
elif len(v) is 2:
for n in v: usecols.append(n)
self.genfromtxt_args_error['usecols'] = tuple(usecols)
array = numpy.genfromtxt(self.path, **self.genfromtxt_args_error)
error = []
for n, v in enumerate(self.error_columns):
if v is None:
error.append(None)
elif isinstance(v, int):
if len(usecols) is 1:
error.append(array * self.scale[n])
else:
error.append(array[0] * self.scale[n])
array = numpy.delete(array, (0), axis=(0))
elif len(v) is 2:
error.append(array[0:2] * self.scale[n])
array = numpy.delete(array, (0, 1), axis=(0))
return tuple(error)
| |
"""
Diagnostics for regression estimations.
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Nicholas Malizia nicholas.malizia@asu.edu "
import pysal
from pysal.common import *
import scipy.sparse as SP
from math import sqrt
from utils import spmultiply, sphstack, spmin, spmax
__all__ = [
"f_stat", "t_stat", "r2", "ar2", "se_betas", "log_likelihood", "akaike", "schwarz",
"condition_index", "jarque_bera", "breusch_pagan", "white", "koenker_bassett", "vif", "likratiotest"]
def f_stat(reg):
"""
Calculates the f-statistic and associated p-value of the
regression. [Greene2003]_
(For two stage least squares see f_stat_tsls)
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
fs_result : tuple
includes value of F statistic and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the F-statistic for the regression.
>>> testresult = diagnostics.f_stat(reg)
Print the results tuple, including the statistic and its significance.
>>> print("%12.12f"%testresult[0],"%12.12f"%testresult[1])
('28.385629224695', '0.000000009341')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
predy = reg.predy # (array) vector of predicted values (n x 1)
mean_y = reg.mean_y # (scalar) mean of dependent observations
Q = utu
U = np.sum((predy - mean_y) ** 2)
fStat = (U / (k - 1)) / (Q / (n - k))
pValue = stats.f.sf(fStat, k - 1, n - k)
fs_result = (fStat, pValue)
return fs_result
def t_stat(reg, z_stat=False):
"""
Calculates the t-statistics (or z-statistics) and associated
p-values. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
z_stat : boolean
If True run z-stat instead of t-stat
Returns
-------
ts_result : list of tuples
each tuple includes value of t statistic (or z
statistic) and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate t-statistics for the regression coefficients.
>>> testresult = diagnostics.t_stat(reg)
Print the tuples that contain the t-statistics and their significances.
>>> print("%12.12f"%testresult[0][0], "%12.12f"%testresult[0][1], "%12.12f"%testresult[1][0], "%12.12f"%testresult[1][1], "%12.12f"%testresult[2][0], "%12.12f"%testresult[2][1])
('14.490373143689', '0.000000000000', '-4.780496191297', '0.000018289595', '-2.654408642718', '0.010874504910')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
vm = reg.vm # (array) coefficients of variance matrix (k x k)
betas = reg.betas # (array) coefficients of the regressors (1 x k)
variance = vm.diagonal()
tStat = betas[range(0, len(vm))].reshape(len(vm),) / np.sqrt(variance)
ts_result = []
for t in tStat:
if z_stat:
ts_result.append((t, stats.norm.sf(abs(t)) * 2))
else:
ts_result.append((t, stats.t.sf(abs(t), n - k) * 2))
return ts_result
def r2(reg):
"""
Calculates the R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
r2_result : float
value of the coefficient of determination for the
regression
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the R^2 value for the regression.
>>> testresult = diagnostics.r2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.55240404
"""
y = reg.y # (array) vector of dep observations (n x 1)
mean_y = reg.mean_y # (scalar) mean of dep observations
utu = reg.utu # (scalar) residual sum of squares
ss_tot = ((y - mean_y) ** 2).sum(0)
r2 = 1 - utu / ss_tot
r2_result = r2[0]
return r2_result
def ar2(reg):
"""
Calculates the adjusted R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
ar2_result : float
value of R^2 adjusted for the number of explanatory
variables.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the adjusted R^2 value for the regression.
>>> testresult = diagnostics.ar2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.53294335
"""
k = reg.k # (scalar) number of ind. variables (includes constant)
n = reg.n # (scalar) number of observations
ar2_result = 1 - (1 - r2(reg)) * (n - 1) / (n - k)
return ar2_result
def se_betas(reg):
"""
Calculates the standard error of the regression coefficients. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
se_result : array
includes standard errors of each coefficient (1 x k)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the standard errors of the regression coefficients.
>>> testresult = diagnostics.se_betas(reg)
Print the vector of standard errors.
>>> testresult
array([ 4.73548613, 0.33413076, 0.10319868])
"""
vm = reg.vm # (array) coefficients of variance matrix (k x k)
variance = vm.diagonal()
se_result = np.sqrt(variance)
return se_result
def log_likelihood(reg):
"""
Calculates the log-likelihood value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ll_result : float
value for the log-likelihood of the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the log-likelihood for the regression.
>>> testresult = diagnostics.log_likelihood(reg)
Print the result.
>>> testresult
-187.3772388121491
"""
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
ll_result = -0.5 * \
(n * (np.log(2 * math.pi)) + n * np.log(utu / n) + (utu / (utu / n)))
return ll_result
def akaike(reg):
"""
Calculates the Akaike Information Criterion. [Akaike1974]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
aic_result : scalar
value for Akaike Information Criterion of the
regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Akaike Information Criterion (AIC).
>>> testresult = diagnostics.akaike(reg)
Print the result.
>>> testresult
380.7544776242982
"""
k = reg.k # (scalar) number of explanatory vars (including constant)
try: # ML estimation, logll already exists
# spatial coefficient included in k
aic_result = 2.0 * k - 2.0 * reg.logll
except AttributeError: # OLS case
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
aic_result = 2 * k + n * (np.log((2 * np.pi * utu) / n) + 1)
return aic_result
def schwarz(reg):
"""
Calculates the Schwarz Information Criterion. [Schwarz1978]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
bic_result : scalar
value for Schwarz (Bayesian) Information Criterion of
the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Schwarz Information Criterion.
>>> testresult = diagnostics.schwarz(reg)
Print the results.
>>> testresult
386.42993851863008
"""
n = reg.n # (scalar) number of observations
k = reg.k # (scalar) number of ind. variables (including constant)
try: # ML case logll already computed
# spatial coeff included in k
sc_result = k * np.log(n) - 2.0 * reg.logll
except AttributeError: # OLS case
utu = reg.utu # (scalar) residual sum of squares
sc_result = k * np.log(n) + n * (np.log((2 * np.pi * utu) / n) + 1)
return sc_result
def condition_index(reg):
"""
Calculates the multicollinearity condition index according to Belsey,
Kuh and Welsh (1980) [Belsley1980]_.
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ci_result : float
scalar value for the multicollinearity condition
index.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the condition index to check for multicollinearity.
>>> testresult = diagnostics.condition_index(reg)
Print the result.
>>> print("%1.3f"%testresult)
6.542
"""
if hasattr(reg, 'xtx'):
xtx = reg.xtx # (array) k x k projection matrix (includes constant)
elif hasattr(reg, 'hth'):
xtx = reg.hth # (array) k x k projection matrix (includes constant)
diag = np.diagonal(xtx)
scale = xtx / diag
eigval = np.linalg.eigvals(scale)
max_eigval = max(eigval)
min_eigval = min(eigval)
ci_result = sqrt(max_eigval / min_eigval)
return ci_result
def jarque_bera(reg):
"""
Jarque-Bera test for normality in the residuals. [Jarque1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
jb_result : dictionary
contains the statistic (jb) for the Jarque-Bera test
and the associated p-value (p-value)
df : integer
degrees of freedom for the test (always 2)
jb : float
value of the test statistic
pvalue : float
p-value associated with the statistic (chi^2
distributed with 2 df)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Jarque-Bera test for normality of residuals.
>>> testresult = diagnostics.jarque_bera(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['jb'])
1.836
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.3994
"""
n = reg.n # (scalar) number of observations
u = reg.u # (array) residuals from regression
u2 = u ** 2
u3 = u ** 3
u4 = u ** 4
mu2 = np.mean(u2)
mu3 = np.mean(u3)
mu4 = np.mean(u4)
S = mu3 / (mu2 ** (1.5)) # skewness measure
K = (mu4 / (mu2 ** 2)) # kurtosis measure
jb = n * (((S ** 2) / 6) + ((K - 3) ** 2) / 24)
pvalue = stats.chisqprob(jb, 2)
jb_result = {"df": 2, "jb": jb, 'pvalue': pvalue}
return jb_result
def breusch_pagan(reg, z=None):
"""
Calculates the Breusch-Pagan test statistic to check for
heteroscedasticity. [Breusch1979]_
Parameters
----------
reg : regression object
output instance from a regression model
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
bp_result : dictionary
contains the statistic (bp) for the test and the
associated p-value (p-value)
bp : float
scalar value for the Breusch-Pagan test statistic
df : integer
degrees of freedom associated with the test (k)
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Breusch-Pagan test for heteroscedasticity.
>>> testresult = diagnostics.breusch_pagan(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['bp'])
7.900
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0193
"""
e2 = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
ete = reg.utu
den = ete / n
g = e2 / den - 1.0
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Now that the variables are prepared, we calculate the statistic
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
bp_array = 0.5 * part2
bp = bp_array[0, 0]
pvalue = stats.chisqprob(bp, df)
bp_result = {'df': df, 'bp': bp, 'pvalue': pvalue}
return bp_result
def white(reg):
"""
Calculates the White test to check for heteroscedasticity. [White1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
white_result : dictionary
contains the statistic (white), degrees of freedom
(df) and the associated p-value (pvalue) for the
White test.
white : float
scalar value for the White test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the White test for heteroscedasticity.
>>> testresult = diagnostics.white(reg)
Print the degrees of freedom for the test.
>>> print testresult['df']
5
Print the test statistic.
>>> print("%1.3f"%testresult['wh'])
19.946
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0013
"""
e = reg.u ** 2
k = int(reg.k)
n = int(reg.n)
y = reg.y
X = reg.x
#constant = constant_check(X)
# Check for constant, if none add one, see Greene 2003, pg. 222
# if constant == False:
# X = np.hstack((np.ones((n,1)),X))
# Check for multicollinearity in the X matrix
ci = condition_index(reg)
if ci > 30:
white_result = "Not computed due to multicollinearity."
return white_result
# Compute cross-products and squares of the regression variables
if type(X).__name__ == 'ndarray':
A = np.zeros((n, (k * (k + 1)) // 2))
elif type(X).__name__ == 'csc_matrix' or type(X).__name__ == 'csr_matrix':
# this is probably inefficient
A = SP.lil_matrix((n, (k * (k + 1)) // 2))
else:
raise Exception, "unknown X type, %s" % type(X).__name__
counter = 0
for i in range(k):
for j in range(i, k):
v = spmultiply(X[:, i], X[:, j], False)
A[:, counter] = v
counter += 1
# Append the original variables
A = sphstack(X, A) # note: this also converts a LIL to CSR
n, k = A.shape
# Check to identify any duplicate or constant columns in A
omitcolumn = []
for i in range(k):
current = A[:, i]
# remove all constant terms (will add a constant back later)
if spmax(current) == spmin(current):
omitcolumn.append(i)
pass
# do not allow duplicates
for j in range(k):
check = A[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed
if type(A).__name__ == 'ndarray':
A = np.delete(A, omitcolumn, 1)
elif type(A).__name__ == 'csc_matrix' or type(A).__name__ == 'csr_matrix':
# this is probably inefficient
keepcolumn = range(k)
for i in omitcolumn:
keepcolumn.remove(i)
A = A[:, keepcolumn]
else:
raise Exception, "unknown A type, %s" % type(X).__name__
A = sphstack(np.ones((A.shape[0], 1)), A) # add a constant back in
n, k = A.shape
# Conduct the auxiliary regression and calculate the statistic
import ols as OLS
aux_reg = OLS.BaseOLS(e, A)
aux_r2 = r2(aux_reg)
wh = aux_r2 * n
df = k - 1
pvalue = stats.chisqprob(wh, df)
white_result = {'df': df, 'wh': wh, 'pvalue': pvalue}
return white_result
def koenker_bassett(reg, z=None):
"""
Calculates the Koenker-Bassett test statistic to check for
heteroscedasticity. [Koenker1982]_ [Greene2003]_
Parameters
----------
reg : regression output
output from an instance of a regression class
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
kb_result : dictionary
contains the statistic (kb), degrees of freedom (df)
and the associated p-value (pvalue) for the test.
kb : float
scalar value for the Koenker-Bassett test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Koenker-Bassett test for heteroscedasticity.
>>> testresult = diagnostics.koenker_bassett(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['kb'])
5.694
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0580
"""
# The notation here matches that of Greene (2003).
u = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
x = reg.x
ete = reg.utu
#constant = constant_check(x)
ubar = ete / n
ubari = ubar * np.ones((n, 1))
g = u - ubari
v = (1.0 / n) * np.sum((u - ubar) ** 2)
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Conduct the auxiliary regression.
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
kb_array = (1.0 / v) * part2
kb = kb_array[0, 0]
pvalue = stats.chisqprob(kb, df)
kb_result = {'kb': kb, 'df': df, 'pvalue': pvalue}
return kb_result
def vif(reg):
"""
Calculates the variance inflation factor for each independent variable.
For the ease of indexing the results, the constant is currently
included. This should be omitted when reporting the results to the
output text. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
vif_result : list of tuples
each tuple includes the vif and the tolerance, the
order of the variables corresponds to their order in
the reg.x matrix
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the variance inflation factor (VIF).
>>> testresult = diagnostics.vif(reg)
Select the tuple for the income variable.
>>> incvif = testresult[1]
Print the VIF for income.
>>> print("%12.12f"%incvif[0])
1.333117497189
Print the tolerance for income.
>>> print("%12.12f"%incvif[1])
0.750121427487
Repeat for the home value variable.
>>> hovalvif = testresult[2]
>>> print("%12.12f"%hovalvif[0])
1.333117497189
>>> print("%12.12f"%hovalvif[1])
0.750121427487
"""
X = reg.x
n, k = X.shape
vif_result = []
for j in range(k):
Z = X.copy()
Z = np.delete(Z, j, 1)
y = X[:, j]
import ols as OLS
aux = OLS.BaseOLS(y, Z)
mean_y = aux.mean_y
utu = aux.utu
ss_tot = sum((y - mean_y) ** 2)
if ss_tot == 0:
resj = pysal.MISSINGVALUE
else:
r2aux = 1 - utu / ss_tot
tolj = 1 - r2aux
vifj = 1 / tolj
resj = (vifj, tolj)
vif_result.append(resj)
return vif_result
def constant_check(array):
"""
Checks to see numpy array includes a constant.
Parameters
----------
array : array
an array of variables to be inspected
Returns
-------
constant : boolean
true signifies the presence of a constant
Example
-------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> reg = OLS(y,X)
>>> diagnostics.constant_check(reg.x)
True
"""
n, k = array.shape
constant = False
for j in range(k):
variable = array[:, j]
varmin = variable.min()
varmax = variable.max()
if varmin == varmax:
constant = True
break
return constant
def likratiotest(reg0, reg1):
"""
Likelihood ratio test statistic [Greene2003]_
Parameters
----------
reg0 : regression object for constrained model (H0)
reg1 : regression object for unconstrained model (H1)
Returns
-------
likratio : dictionary
contains the statistic (likr), the degrees of
freedom (df) and the p-value (pvalue)
likr : float
likelihood ratio statistic
df : integer
degrees of freedom
p-value : float
p-value
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> import scipy.stats as stats
>>> import pysal.spreg.ml_lag as lag
Use the baltim sample data set
>>> db = ps.open(ps.examples.get_path("baltim.dbf"),'r')
>>> y_name = "PRICE"
>>> y = np.array(db.by_col(y_name)).T
>>> y.shape = (len(y),1)
>>> x_names = ["NROOM","NBATH","PATIO","FIREPL","AC","GAR","AGE","LOTSZ","SQFT"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("baltim_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
OLS regression
>>> ols1 = ps.spreg.OLS(y,x)
ML Lag regression
>>> mllag1 = lag.ML_Lag(y,x,w)
>>> lr = likratiotest(ols1,mllag1)
>>> print "Likelihood Ratio Test: {0:.4f} df: {1} p-value: {2:.4f}".format(lr["likr"],lr["df"],lr["p-value"])
Likelihood Ratio Test: 44.5721 df: 1 p-value: 0.0000
"""
likratio = {}
try:
likr = 2.0 * (reg1.logll - reg0.logll)
except AttributeError:
raise Exception, "Missing or improper log-likelihoods in regression objects"
if likr < 0.0: # always enforces positive likelihood ratio
likr = -likr
pvalue = stats.chisqprob(likr, 1)
likratio = {"likr": likr, "df": 1, "p-value": pvalue}
return likratio
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| |
# One Convergence, Inc. CONFIDENTIAL
# Copyright (c) 2012-2015, One Convergence, Inc., USA
# All Rights Reserved.
#
# All information contained herein is, and remains the property of
# One Convergence, Inc. and its suppliers, if any. The intellectual and
# technical concepts contained herein are proprietary to One Convergence,
# Inc. and its suppliers.
#
# Dissemination of this information or reproduction of this material is
# strictly forbidden unless prior written permission is obtained from
# One Convergence, Inc., USA
import logging
import subprocess
import netifaces
from netifaces import AF_LINK
from vyos_session import utils
logger = logging.getLogger(__name__)
utils.init_logger(logger)
class APIHandler(object):
def __init__(self):
pass
def run_command(self, command):
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
if err:
logger.error("Unable to run command %s, ERROR- %s" %
(command, err))
return None
return out
def _get_interface_name(self, interface_mac):
interfaces = netifaces.interfaces()
for interface in interfaces:
try:
mac_addr = netifaces.ifaddresses(interface)[AF_LINK][0]['addr']
if mac_addr == interface_mac:
return interface
except KeyError as keyerr:
logger.error('Unable to Parse Stats Data, ' +
'KeyError: {}'.format(keyerr))
return None
def parse_firewall_stats(self, interface, raw_stats):
"""
sample data for command show_firewall_detail.xsl :
IPv4 Firewall "oc_fw_eth1":
Active on (eth1,OUT)
rule action proto packets bytes
---- ------ ----- ------- -----
11 accept tcp 476405 24805598
condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24 tcp dpt:22
12 accept icmp 1222414 101692572
condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24
13 drop udp 150770055788 DROP
condition - saddr 11.0.2.0/24 daddr /*
14 accept tcp 3589762 238449000
condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24 tcp dpt:80
10000 drop all 0 0
condition - saddr 0.0.0.0/0 daddr 0.0.0.0/0
"""
firewall = {}
firewalls = []
firewall_start = False
table = False
status = None
rule_keys = ['rulepriority', 'packets', 'bytes', 'action',
'source', 'destination']
try:
for line in raw_stats.split('\n'):
words = line.split()
if 'IPv4 Firewall' in line:
firewall_start = True
if 'Active on' in line and interface in line and firewall_start:
status = "Active"
(interface, direction) = words[2][1:-1].split(',')
firewall['interface'] = interface
firewall['dir'] = direction
firewall['rules'] = []
elif len(words) >= 4:
if words[3] in ['ACCEPT', 'DROP'] and status == "Active":
table = True
rule = dict(zip(rule_keys, words))
firewall['rules'].append(rule)
elif table and status == "Active":
command = ('/opt/vyatta/bin/vyatta-show-firewall.pl "all_all" ' +
'/opt/vyatta/share/xsl/show_firewall_detail.xsl')
show_fw_data = self.run_command(command)
firewall = self.add_protocol_and_dest_port_info(firewall, show_fw_data)
logger.info("packed firewall \n %s" % firewall)
firewalls.append(firewall)
break
except KeyError as keyerr:
logger.error('Unable to Parse Firewall Stats Data, ' +
'KeyError: {}'.format(keyerr))
except IndexError as inderr:
logger.error('Unable to Parse Firewall Stats Data, ' +
'IndexError: {}'.format(inderr))
return firewalls
def add_protocol_and_dest_port_info(self, firewall, show_fw_data):
firewall_started = False
firewall_info_started = False
firewall_matcher = "Active on (" + firewall['interface']
firewall_info_end = "-------------"
firewall_info = []
for line in show_fw_data.split('\n'):
if "IPv4 Firewall" in line:
firewall_started = True
if firewall_matcher in line:
firewall_info_started = True
if firewall_started and firewall_info_started:
firewall_info.append(line)
if firewall_started and firewall_info_started and firewall_info_end in line:
break
try:
for rule in firewall.get('rules', []):
for index, stats in enumerate(firewall_info):
if stats is not '':
extract_stats = stats.split()
if rule['rulepriority'] in extract_stats[0]:
rule['protocol'] = extract_stats[2]
for key in firewall_info[index + 1].split():
if "dpt:" in key:
rule['dest_port'] = key.split(':')[1]
break
break
except KeyError as keyerr:
logger.error('Unable to Parse Firewall Stats Data, ' +
'KeyError: {}'.format(keyerr))
except IndexError as inderr:
logger.error('Unable to Parse Firewall Stats Data, ' +
'IndexError: {}'.format(inderr))
return firewall
def parse_vpn_s2s(self, raw_stats):
"""
sample data for command show-ipsec-sa-detail :
Peer IP: 192.168.20.194
Peer ID: 120.0.0.2
Local IP: 91.0.0.11
Local ID: 91.0.0.11
NAT Traversal: no
NAT Source Port: n/a
NAT Dest Port: n/a
Tunnel 1:
State: up
Inbound SPI: c6621bd8
Outbound SPI: cbf2ab18
Encryption: aes128
Hash: sha1
PFS Group: 5
Local Net: 90.0.0.0/24
Local Protocol: all
Local Port: all
Remote Net: 120.0.0.0/24
Remote Protocol: all
Remote Port: all
Inbound Bytes: 654.0
Outbound Bytes: 504.0
Active Time (s): 289
Lifetime (s): 1800
"""
s2s_connection = {}
s2s_connections = []
try:
for line in raw_stats.split('\n'):
key = ''
value = ''
if ':' in line:
key,value = line.split(":")
if 'Peer IP' in key:
s2s_connection['peerip'] = value.strip(" \t\n\r")
elif 'Local IP' in key:
s2s_connection['localip'] = value.strip(" \t\n\r")
elif "Tunnel" in key:
s2s_connection['tunnels'] = []
tunnel_info = { 'tunnel' :
key.strip(" \t\n\r").split(" ")[-1] }
elif "Inbound Bytes" in key:
tunnel_info['in'] = value.strip(" \t\n\r")
elif "Outbound Bytes" in key:
tunnel_info['out'] = value.strip(" \t\n\r")
s2s_connection['tunnels'].append(tunnel_info)
s2s_connections.append(s2s_connection)
s2s_connection = {}
except KeyError as keyerr:
logger.error('Unable to Parse IPSec VPN Stats Data, ' +
'KeyError: {}'.format(keyerr))
except IndexError as inderr:
logger.error('Unable to Parse IPSec VPN Stats Data, ' +
'IndexError: {}'.format(inderr))
return s2s_connections
def parse_vpn_remote(self, raw_stats):
"""
sample data for command vyatta-show-ovpn.pl --mode=server :
OpenVPN server status on vtun0 []
Client CN Remote IP Tunnel IP TX byte RX byte Connected Since
--------- --------- --------- ------- ------- ---------------
UNDEF 192.168.2.81 192.168.200.4 8.0K 2.7K Tue Mar 8 09:01:05 2016
"""
table = False
remote_connection = {}
remote_connections = []
keys = ['clientCN', 'remoteip', 'tunnelip', 'in', 'out', 'connected_since']
try:
for line in raw_stats.split('\n'):
if "Client CN" in line:
table = True
elif len(line.split()) >= 5 and table and "---" not in line:
value_list = line.split()[:-5]
connected_since = " ".join(line.split()[5:])
clients = filter(lambda value: value.strip(), value_list)
clients.append(connected_since)
remote_connection = dict(zip(keys, clients))
remote_connections.append(remote_connection)
except KeyError as keyerr:
logger.error('Unable to Parse Remote VPN Stats Data, ' +
'KeyError: {}'.format(keyerr))
except IndexError as inderr:
logger.error('Unable to Parse Remote VPN Stats Data, ' +
'IndexError: {}'.format(inderr))
return remote_connections
def get_fw_stats(self, mac_address):
"""
sample data for command show_firewall_statistics.xsl :
IPv4 Firewall "oc_fw_eth1":
Active on (eth1,OUT)
rule packets bytes action source destination
---- ------- ----- ------ ------ -----------
11 476.22K 24.80M ACCEPT 11.0.1.0/24 11.0.2.0/24
12 1.22M 101.66M ACCEPT 11.0.1.0/24 11.0.2.0/24
13 3.43G 150.73G DROP 11.0.1.0/24 11.0.2.0/24
14 3.59M 238.39M ACCEPT 11.0.1.0/24 11.0.2.0/24
10000 0 0 DROP 0.0.0.0/0 0.0.0.0/0
"""
interface = None
parsed_stats = {}
command = ('/opt/vyatta/bin/vyatta-show-firewall.pl "all_all" ' +
'/opt/vyatta/share/xsl/show_firewall_statistics.xsl')
raw_stats = self.run_command(command)
interface = self._get_interface_name(mac_address)
if not interface:
logger.error('No interface available for mac address: %s' %
mac_address)
return parsed_stats
parsed_stats = self.parse_firewall_stats(interface, raw_stats)
logger.info("Firewall stats Data, \n %s" % parsed_stats)
return parsed_stats
def get_vpn_stats(self):
vpn_parsed_data = {}
command = ('sudo /opt/vyatta/bin/sudo-users/vyatta-op-vpn.pl ' +
'--show-ipsec-sa-detail')
raw_ipsec_stats = self.run_command(command)
if raw_ipsec_stats:
ipsec_parsed_data = self.parse_vpn_s2s(raw_ipsec_stats)
if ipsec_parsed_data:
vpn_parsed_data['ipsec'] = ipsec_parsed_data
else:
logger.warning("Empty IPSec VPN Stats")
else:
logger.warning("Empty IPSec VPN Stats")
command = ('sudo /opt/vyatta/bin/sudo-users/vyatta-show-ovpn.pl ' +
'--mode=server')
raw_remote_stats = self.run_command(command)
if raw_remote_stats:
remote_parsed_data = self.parse_vpn_remote(raw_remote_stats)
if remote_parsed_data:
vpn_parsed_data['remote'] = remote_parsed_data
else:
logger.warning("Empty Remote VPN Stats")
else:
logger.warning("Empty Remote VPN Stats")
logger.info("VPN stats Data, \n %s" % vpn_parsed_data)
return vpn_parsed_data
| |
import re
from In.core.object_meta import ObjectMetaBase
class ValuatorContainer(dict):
def __missing__(self, key):
vcls = IN.register.get_class(key, 'Valuator')
obj = vcls()
self[key] = obj
return obj
class ValuatorEngine:
'''Valuator class that valuate values based on validation rules.
Instance available as IN.valuator
'''
# dict of all Valuator instances
valuators = ValuatorContainer()
def validate(self, value, rule): # rule is ['type', args] or [[], [], []]
'''
#TODO: allow per false error message
rule = [
'And', [
['Length', '>', 6, 'The value length should be greater than 6.'],
['Not', [['Num']],
['Or', [
['Email', 'Invalid email address.'],
['Domain'],
['Url', 'Invalid Url.'],
]],
]],
]
'''
if not rule: # empty list
return [True]
try:
firstitem = rule[0]
item_type = type(firstitem)
if item_type is str: # ['type', args]
args = rule[1:]
result = self.valuators[firstitem].validate(value, *args)
if not result[0]:
#return [False, args[-1]] # last item is error message
return result
elif item_type is list: # [[], [], []]
for subrule in rule:
result = self.validate(value, subrule) # recursive
if not result[0]:
return result
except Exception as e:
IN.logger.debug()
return [False, str(e)]
return [True]
def __getattr__(self, key):
self.key = self.valuators[key]
return self.key
class ValuatorMeta(ObjectMetaBase):
__class_type_base_name__ = 'ValuatorBase'
__class_type_name__ = 'Valuator'
class ValuatorBase(dict, metaclass = ValuatorMeta):
'''Base class of all IN ValuatorBase.
'''
__allowed_children__ = None
__default_child__ = None
ops = {
'=' : lambda l, al, ml: l == al,
'==' : lambda l, al, ml: l == al,
'!=' : lambda l, al, ml: l != al,
'>' : lambda l, al, ml: l > al,
'<' : lambda l, al, ml: l < al,
'>=' : lambda l, al, ml: l >= al,
'<=' : lambda l, al, ml: l <= al,
'<>' : lambda l, al, ml: al < l > ml,
'><' : lambda l, al, ml: al > l < ml,
}
def validate(self, value):
'''return value should be a list like [False, 'Error message.'] or [True]
'''
return [True]
@IN.register('Valuator', type = 'Valuator')
class Valuator(ValuatorBase):
'''Base class of all IN ValuatorBase.
'''
pass
class And(Valuator):
pass
class Or(Valuator):
pass
class Not(Valuator):
def validate(self, value, rule, message = ''):
'''not validator'''
result = IN.valuator.validate(value, rule[0])
not_result = not result[0]
return [not_result, message]
class Empty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if value else [True]
class NotEmpty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if not value else [True]
class Length(Valuator):
def validate(self, value, length = 0, op = '=', mlength = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](len(value), length, mlength)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Equal(Valuator):
def validate(self, value, tvalue, op = '=', mvalue = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](value, tvalue, mvalue)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Regx(Valuator):
'''Valuator rule class that using regex'''
re_compiled = {} # we dont want to compile again
def get_regx(self, regx):
try:
return self.re_compiled[regx]
except KeyError:
self.re_compiled[regx] = re.compile(regx)
return self.re_compiled[regx]
def validate(self, value, regx, message = ''):
result = self.get_regx(regx).match(value)
return [result, message]
class Domain(Regx):
regex_host = r'(?:(?:[a-zA-Z0-9][a-zA-Z0-9\-]*)?[a-zA-Z0-9])'
def validate(self, domain, message = ''):
false_message = [False, message]
dlen = len(domain)
if dlen < 4 or dlen > 255 or domain.endswith('.') or '.' not in domain:
return false_message
try:
domain = domain.encode('idna').decode('ascii')
except Exception:
return false_message
try:
domain.encode('ascii').decode('idna')
except Exception:
return false_message
reg = self.regex_host + r'(?:\.' + self.regex_host + r')*'
m = re.match(reg + "$", domain)
if not m:
return false_message
return [True]
class Email(Regx):
regex = re.compile(r'^[A-Za-z0-9\.\+_-]')
atext = r'a-zA-Z0-9_\.\-' # !#\$%&\'\*\+/=\?\^`\{\|\}~
atext_utf8 = atext + r"\u0080-\U0010FFFF"
regex_local = re.compile(''.join(('[', atext, ']+(?:\\.[', atext, ']+)*$')))
regex_local_utf8 = re.compile(''.join(('[', atext_utf8, ']+(?:\\.[', atext_utf8, ']+)*$')))
def validate(self, value, message = ''):
parts = value.split('@')
if len(parts) != 2:
return [False, message]
local = self.validate_local(parts[0])
if not local:
return [False, message]
# check domain part
domain_result = IN.valuator.validate(parts[1], ['Domain', message])
if not domain_result[0]:
return domain_result
return [True] # valid
def validate_local(self, local):
# check nabar name part
if not local or len(local) > 64 or '..' in local:
return False
m = re.match(self.regex_local, local) # ASCII
if m: # True
return True
else:
# unicode
m = re.match(self.regex_local_utf8, local)
if m:
return True
else:
return False
class Url(Regx):
def validate(self, value, message = ''):
return True
class Alpha(Valuator):
def validate(self, value, message = ''):
return [str.isalpha(value), message]
class AlphaNum(Valuator):
def validate(self, value, message = ''):
return [str.isalnum(value), message]
class Digit(Valuator):
def validate(self, value, message = ''):
return [str.isdigit(value), message]
class Decimal(Valuator):
def validate(self, value, message = ''):
return [str.isdecimal(value), message]
class Lower(Valuator):
def validate(self, value, message = ''):
return [str.islower(value), message]
class Upper(Valuator):
def validate(self, value, message = ''):
return [str.isupper(value), message]
class Numeric(Valuator):
def validate(self, value, message = ''):
return [str.isnumeric(value), message]
class Space(Valuator):
'''Is value has only non printable chars'''
def validate(self, value, message = ''):
return [str.isspace(value), message]
class Startswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).startswith(start), message]
class Endswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).endswith(start), message]
class In(Valuator):
def validate(self, value, itr, message = ''):
return [value in itr, message]
class INPath(Valuator):
'''Check whether this string is a valid IN route.'''
def validate(self, value, message = ''):
return True
class NabarRole(Valuator):
'''Check whether nabar has this role.'''
def validate(self, value, message = ''):
return True
class NabarAccess(Valuator):
'''Check whether nabar has this access permissions.'''
def validate(self, value):
return True
class Callback(Valuator):
'''call the Callback to valuate.'''
def validate(self, value, message = ''):
return True
#@IN.hook
#def __In_app_init__(app):
### set the valuator
#IN.valuator = ValuatorEngine()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations(object):
"""RouteFilterRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilterRule"]
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2020_05_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterRuleListResult"]
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| |
"""
Actions which modifies the rate at which other Actions execute, usually by some geometric curve.
"""
from IntervalAction import *
from Geometry import *
from Color import *
import math
# TODO: have tables for math.pow and math.cos instead of doing calculations on the fly. might save some
# TODO: CPU cycles, and t is in [0,1], so it should be pretty manageable.
class AbstractEaseAction(AbstractIntervalAction):
"""
An abstract L{AbstractAction} wrapper that modifies the rate at which the wrapped Action executes, usually by some geometric curve.
"""
def __init__(self, action):
"""
Initialization method.
@param action: The wrapped Action.
@type action: L{AbstractAction}
"""
AbstractIntervalAction.__init__(self, action._duration)
self._action = action
def start(self, owner):
AbstractIntervalAction.start(self, owner)
self._action.start(owner)
def stop(self):
AbstractIntervalAction.stop(self)
self._action.stop()
def update(self, time):
self._action.update(time)
def reverse(self):
"""
Returns a new copy of the L{AbstractEaseAction} whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{AbstractEaseAction}
"""
return self.__class__(self._action.reverse())
class AbstractEaseRateAction(AbstractEaseAction):
"""
An L{AbstractEaseAction} whose curve is determined by a given rate.
"""
def __init__(self, action, rate=2.0):
"""
Initialization method.
@param action: The wrapped Action.
@type action: L{AbstractAction}
@param rate: The rate at which the curve will change. Default is C{2.0}.
@type rate: Non-negative C{float}
"""
AbstractEaseAction.__init__(self, action)
self._rate = rate
def getRate(self):
"""
Returns the rate at which the curve will change.
@return: The rate.
@rtype: C{float}
"""
return self._rate
def setRate(self, rate):
"""
Sets the rate at which the curve will change.
@param rate: The rate.
@type rate: Non-negative C{float}
"""
self._rate = rate
rate = property(getRate, setRate, doc="The rate at which the curve will change.")
def reverse(self):
"""
Returns a new copy of the L{AbstractEaseRateAction} whose wrapped Action is reversed and whose rate is inverted.
@return: A new, reversed Action.
@rtype: L{AbstractEaseRateAction}
"""
return self.__class__(self._action.reverse(), 1/self._rate)
class EaseIn(AbstractEaseRateAction):
"""
An L{AbstractEaseRateAction} that starts slowly then increases speed as it nears completion.
"""
def update(self, t):
newTime = math.pow(t, self._rate)
self._action.update(newTime)
class EaseOut(AbstractEaseRateAction):
"""
An L{AbstractEaseRateAction} that starts quickly then decreases speed as it nears completion.
"""
def update(self, t):
newTime = math.pow(t, 1./self._rate)
self._action.update(newTime)
class EaseInOut(AbstractEaseRateAction):
"""
An L{AbstractEaseRateAction} that starts slowly, speeds up to being 50% complete, then slows back down as it nears completion.
"""
def update(self, t):
sign = 1
r = int(self._rate)
if (r%2) == 0:
sign = -1
t *= 2
if t < 1:
newTime = 0.5 * math.pow(t, self._rate)
else:
newTime = sign * 0.5 * (math.pow(t-2, self._rate) + sign * 2)
self._action.update(newTime)
def reverse(self):
"""
Returns a reversed copy of this Action.
@return: A new, reversed Action.
@rtype: L{EaseInOut}
"""
return EaseInOut(self._action.reverse(), self._rate)
class EaseExponentialIn(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts slowly then increases speed as it nears completion. It uses an exponential curve.
"""
def update(self, t):
if t == 0:
newTime = 0
else:
newTime = math.pow(2, 10 * (t - 1)) # TODO: check this out. cocos2d does weird stuff
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseExponentialOut whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseExponentialOut}
"""
return EaseExponentialOut(self._action.reverse())
class EaseExponentialOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts quickly then decreases speed as it nears completion. It uses an exponential curve.
"""
def update(self, t):
if t == 1:
newTime = 1
else:
newTime = -math.pow(2, -10*t) + 1 # TODO: check this out. cocos2d has weird implementation
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseExponentialIn whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseExponentialIn}
"""
return EaseExponentialIn(self._action.reverse())
class EaseExponentialInOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts slowly, speeds up to being 50% complete, then slows back down as it nears completion. It uses an exponential curve.
"""
def update(self, t):
t /= 0.5
if (t < 1):
newTime = 0.5 * math.pow(2, 10*(t-1))
else:
newTime = 0.5 * (-math.pow(2, -10 * (t-1)) + 2)
self._action.update(newTime)
class EaseSineIn(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts slowly then increases speed as it nears completion. It uses a sine curve.
"""
def update(self, t):
newTime = -1*math.cos(t*math.pi/2) + 1
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseSineOut whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseSineOut}
"""
return EaseSineOut(self._action.reverse())
class EaseSineOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts quickly then decreases speed as it nears completion. It uses a sine curve.
"""
def update(self, t):
newTime = math.sin(t*math.pi/2)
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseSineIn whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseSineIn}
"""
return EaseSineIn(self._action.reverse())
class EaseSineInOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} that starts slowly, speeds up to being 50% complete, then slows back down as it nears completion. It uses a sine curve.
"""
def update(self, t):
newTime = -0.5*(math.cos(math.pi*t) - 1)
self._action.update(newTime)
class AbstractEaseElastic(AbstractEaseAction):
"""
An L{AbstractEaseAction} whose curve oscillates so as to produce an elastic effect.
"""
def __init__(self, action, period=0.3):
AbstractEaseAction.__init__(self, action)
self._period = period
def getPeriod(self):
return self._period
def setPeriod(self, period):
self._period = period
period = property(getPeriod, setPeriod)
def reverse(self):
"""
Override this.
"""
return None
class EaseElasticIn(AbstractEaseElastic):
"""
An L{AbstractEaseAction} which oscillates at the beginning before reaching completion.
"""
def update(self, t):
if t is 0 or t is 1:
newTime = t
else:
s = self._period / 4
t = t-1
newTime = -math.pow(2, 10*t) * math.sin((t-s) * 2 * math.pi / self._period)
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseElasticOut whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseElasticOut}
"""
return EaseElasticOut(self._action.reverse(), self._period)
class EaseElasticOut(AbstractEaseElastic):
"""
An L{AbstractEaseAction} which oscillates at the end as it reaches completion.
"""
def update(self, t):
if t is 0 or t is 1:
newTime = t
else:
s = self._period / 4
newTime = math.pow(2, -10*t) * math.sin((t-s) * 2 * math.pi / self._period) + 1
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseElasticIn whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseElasticIn}
"""
return EaseElasticIn(self._action.reverse(), self._period)
class EaseElasticInOut(AbstractEaseElastic):
"""
An L{AbstractEaseAction} which oscillates both at the beginning and as it reaches completion.
"""
def update(self, t):
if t is 0 or t is 1:
newTime = t
else:
t *= 2
s = self._period / 4
t -= 1
if t < 0:
newTime = -0.5 * math.pow(2, 10*t) * math.sin((t-s) * 2 * math.pi / self._period)
else:
newTime = math.pow(2, -10*t) * math.sin((t-s) * 2 * math.pi / self._period) * 0.5 + 1
self._action.update(newTime)
def reverse(self):
return EaseElasticInOut(self._action.reverse(), self._period)
class AbstractEaseBounce(AbstractEaseAction):
"""
An abstract L{AbstractEaseAction} whose curve oscillates so as to produce a bouncing effect.
"""
def bounceTime(self, t):
"""
Returns the modified time so as to emulate the bouncing effect.
@param t: The percentage complete.
@type t: Non-negative C{float}
@return: The modified percentage complete.
@rtype: C{float}
"""
if t < 1./2.75:
return 7.5625 * t * t
elif t < 2 / 2.75:
t -= 1.5 / 2.75
return 7.5625 * t * t + 0.75
elif t < 2.5 / 2.75:
t -= 2.25 / 2.75
return 7.5625 * t * t + 0.9375
else:
t -= 2.625 / 2.75
return 7.5625 * t * t + 0.984375
class EaseBounceIn(AbstractEaseBounce):
"""
An L{AbstractEaseAction} whose curve first bounces before reaching completion.
"""
def update(self, t):
newTime = 1 - self.bounceTime(1-t)
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseBounceOut whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseBounceOut}
"""
return EaseBounceOut(self._action.reverse())
class EaseBounceOut(AbstractEaseBounce):
"""
An L{AbstractEaseAction} whose curve bounces as it reaches completion.
"""
def update(self, t):
newTime = self.bounceTime(t)
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseBounceIn whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseBounceIn}
"""
return EaseBounceIn(self._action.reverse())
class EaseBounceInOut(AbstractEaseBounce):
"""
An L{AbstractEaseAction} whose curve first bounces before reaching completion, then again bounces as it reaches completion.
"""
def update(self, t):
if t < 0.5:
t *= 2
newTime = 0.5 * (1 - self.bounceTime(1-t))
else:
newTime = 0.5 * self.bounceTime(t*2-1) + 0.5
self._action.update(newTime)
class EaseBackIn(AbstractEaseAction):
"""
An L{AbstractEaseAction} whose curve first reverses before reaching completion.
"""
def update(self, t):
overshoot = 1.70158
newTime = t * t * ((overshoot+1)*t - overshoot)
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseBackOut whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseBackOut}
"""
return EaseBackOut(self._action.reverse())
class EaseBackOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} whose curve overshoots and then corrects as it reaches completion.
"""
def update(self, t):
overshoot = 1.70158
t -= 1
newTime = t * t * ((overshoot+1)*t + overshoot) + 1
self._action.update(newTime)
def reverse(self):
"""
Returns an EaseBackIn whose wrapped Action is reversed.
@return: A new, reversed Action.
@rtype: L{EaseBackIn}
"""
return EaseBackIn(self._action.reverse())
class EaseBackInOut(AbstractEaseAction):
"""
An L{AbstractEaseAction} whose curve first reverses then overshoots and corrects as it reaches completion.
"""
def update(self, t):
overshoot = 1.70158 * 1.525
t *= 2
if t < 1:
newTime = t * t * ((overshoot+1)*t - overshoot) / 2
else:
t -= 2
newTime = t * t * ((overshoot+1)*t + overshoot) / 2 + 1
self._action.update(newTime)
| |
#!/bin/python
######################################
# Generate contact maps contactCount.gz
#
# Author: Fabian Buske (22/05/2015)
######################################
import os, sys, re
import traceback
from optparse import OptionParser
import fileinput
import datetime
from readData import *
from quicksect import IntervalTree
import gzip
from scipy.sparse import lil_matrix
import numpy
# manage option and arguments processing
def main():
global options
global args
usage = '''usage: %prog [options] <contactCounts.gz>
reads a fithic contactCounts.gz file and produces a full 2D contact matrix per chromosome
'''
parser = OptionParser(usage)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print status messages to stdout")
parser.add_option("-V", "--veryverbose", action="store_true", dest="vverbose", default=False,
help="print lots of status messages to stdout")
parser.add_option("-P", "--CPU-processes", type="int", dest="cpus", default=-1,
help="number of CPU threads to use, -1 for all available [default -1]")
parser.add_option("-O", "--onlycis", action="store_true", dest="onlycis", default=False,
help="only consider intra chromosomal contacts (cis)")
parser.add_option("-r", "--resolution", type=int, dest="resolution", default=1000000,
help="size of a fragment in bp [default 1000000]")
parser.add_option("-c", "--chromsizes", type="string", dest="chromSizes", default="",
help="tab separated file containing chromosome sizes")
parser.add_option("-C", "--chrompattern", type="string", dest="chromPattern", default="",
help="pattern of chromosomes to filter for [default all]")
parser.add_option("-F", "--fragmentFile", type="string", dest="fragmentFile", default="",
help="fragmentFile.gz, if given filters contacts based on mappability score, see mappabilityThreshold [default: %default]")
parser.add_option("-T", "--mappabilityThreshold", type="float", dest="mappabilityThreshold", default=0.5,
help="minimum mappability threshold used to keep contact bins when generating the 2D contact maps [default: %default]")
parser.add_option("-D", "--removeDiagonal", action="store_true", dest="removeDiagonal", default=False,
help="remove contacts within bin (diagonal)")
parser.add_option("-o", "--outputDir", type="string", dest="outputDir", default="",
help="output directory [default: %default]")
parser.add_option("-n", "--outputFilename", type="string", dest="outputFilename", default="",
help="output filename [default: extracted from first input file")
parser.add_option("-t", "--tmpDir", type="string", dest="tmpDir", default="/tmp",
help="directory for temp files [default: %default]")
parser.add_option("-s", "--sep", type="string", dest="separator", default=" ",
help="delimiter to use when reading the input [default: %default]")
parser.add_option("--matrixFormat", type="string", dest="matrixFormat", default="HiCorrector",
help="either HiCorrector, tadbit or domaincall, [default: %default] ")
parser.add_option("--inputIsFragmentPairs", action="store_true", dest="inputIsFragmentPairs", default=False,
help="input is a gzipped fragment pair file")
parser.add_option("--inputIsReadPairs", type="string", dest="inputIsReadPairs", default="",
help="gzipped files with mapped read pair information, requires 4 column identifier corresponding to chrA,posA,chrB,posB,chrPrefix (separated buy comma), e.g. 2,3,6,7,chr")
parser.add_option("-g", "--genomeFragmentFile", type="string", dest="genomeFragmentFile", default="",
help="file containing the genome fragments after digestion with the restriction enzyme(s), generated by hicup")
(options, args) = parser.parse_args()
if (len(args) < 1):
parser.print_help()
parser.error("[ERROR] Incorrect number of arguments, need a dataset")
if (options.resolution < 1):
parser.error("[ERROR] resolution must be a positive integer, was :"+str(options.resolution))
sys.exit(1)
elif (options.chromSizes == "" or not os.path.isfile(options.chromSizes)):
parser.error("[ERROR] chromSizes not given or not existing, was :"+str(options.chromSizes))
sys.exit(1)
if (options.outputDir != ""):
options.outputDir += os.sep
if (options.outputFilename == ""):
options.outputFilename=os.path.splitext(os.path.basename(args[0]))[0]
if (options.verbose):
print >> sys.stdout, "resolution: %s" % (options.resolution)
print >> sys.stdout, "chromSizes: %s" % (options.chromSizes)
print >> sys.stdout, "outputDir: %s" % (options.outputDir)
print >> sys.stdout, "tmpDir: %s" % (options.tmpDir)
print >> sys.stdout, "format: %s" % (options.matrixFormat)
process()
def saveHiCorrectorMatrix(fragmentsMap , fragmentList, fragmentPairs, mappabilityFilterList, index_subset, matrixOutfile, indexOutfile):
# convert to coordinate format, filter with mappability and remove diagonal
# print fragmentPairs.tolil()[mappabilityFilterList.nonzero()[0], :][:, mappabilityFilterList.nonzero()[0]]
B = fragmentPairs.tolil()[index_subset, :][:, index_subset]
if (options.removeDiagonal):
B = B.setdiag(1).tocoo()
else:
B = B.tocoo()
if (options.verbose):
print >> sys.stdout, "- save 2Dmatrix to %s " % (matrixOutfile)
f_handle=open(matrixOutfile,'w')
C = B.tocsr()
for i in xrange(len(index_subset)):
np.savetxt(f_handle, C[i].toarray(),fmt='%i', delimiter='\t')
f_handle.close()
f_handle=open(indexOutfile,'w')
counter = 1
for fragmentId in fragmentsMap.keys():
if (mappabilityFilterList[fragmentId]>0):
f_handle.write("%010d\n" % ( counter ))
counter += 1
f_handle.close()
def output(fragmentsMap , fragmentList, fragmentPairs, fragmentCount, fragmentsChrom, mappabilityFilterList):
'''
outputs the 2D contact matrix
'''
if (options.verbose):
print >> sys.stdout, "- %s START : output data " % (timeStamp())
# lazy loading
from scipy.sparse import lil_matrix
chromlen={}
for line in fileinput.input([options.chromSizes]):
(chrom, chromsize) =line.split("\t")[0:2]
# check if chromosome needs to be filtered out or not
if (options.chromPattern != "" and not re.match("^"+options.chromPattern+"$", chrom)):
continue
chromlen[chrom]=int(chromsize)
if (options.matrixFormat=="HiCorrector"):
# create matric for HiCorrector
if ( options.onlycis ):
for chr in fragmentsChrom.keys():
matrixOutfile = options.outputDir+options.outputFilename+"."+chr+".matrix"
indexOutfile = options.outputDir+options.outputFilename+"."+chr+".index"
index_subset = np.intersect1d(mappabilityFilterList.nonzero()[0],range(fragmentsChrom[chr][0],fragmentsChrom[chr][1]), assume_unique=True)
saveHiCorrectorMatrix(fragmentsMap , fragmentList, fragmentPairs, mappabilityFilterList, index_subset, matrixOutfile, indexOutfile)
else:
matrixOutfile = options.outputDir+options.outputFilename+".matrix"
indexOutfile = options.outputDir+options.outputFilename+".index"
index_subset = mappabilityFilterList.nonzero()[0]
saveHiCorrectorMatrix(fragmentsMap , fragmentList, fragmentPairs, mappabilityFilterList, index_subset, matrixOutfile, indexOutfile)
else:
B = fragmentPairs.tolil()
for chr in fragmentsChrom.keys():
C = B.tocsc()[:,fragmentsChrom[chr][0]:fragmentsChrom[chr][1]].tocsr()[fragmentsChrom[chr][0]:fragmentsChrom[chr][1],:]
fragmentRange=fragmentsChrom[chr][1]-fragmentsChrom[chr][0]
header=['d']+[ "%s%d" % i for i in zip(['r']*fragmentRange,range(fragmentRange))]
if ( options.outputFilename != "" ):
outfile3 = options.outputDir+options.outputFilename+"."+chr+".matrix"
else:
outfile3 = options.outputDir+os.path.basename(args[0])+"."+chr+".matrix"
if (options.verbose):
print >> sys.stdout, "- save 2Dmatrix for chromosome %s to %s " % (chr, outfile3)
f_handle=open(outfile3,'w')
for i in xrange(fragmentRange):
np.savetxt(f_handle, C[i].toarray(),fmt='%i', delimiter='\t')
f_handle.close()
for chr in fragmentsChrom.keys():
C = B.tocsc()[:,fragmentsChrom[chr][0]:fragmentsChrom[chr][1]].tocsr()[fragmentsChrom[chr][0]:fragmentsChrom[chr][1],:]
fragmentRange=fragmentsChrom[chr][1]-fragmentsChrom[chr][0]
header=['d']+[ "%s%d" % i for i in zip(['r']*fragmentRange,range(fragmentRange))]
if ( options.outputFilename != "" ):
outfile3 = options.outputDir+options.outputFilename+"."+chr+".matrix"
else:
outfile3 = options.outputDir+os.path.basename(args[0])+"."+chr+".matrix"
if (options.verbose):
print >> sys.stdout, "- save 2Dmatrix for chromosome %s to %s " % (chr, outfile3)
f_handle=open(outfile3,'w')
if (options.matrixFormat == "domaincall"):
for i in xrange(fragmentRange):
binStart = fragmentsMap[fragmentsChrom[chr][0]+i][1] - options.resolution/2
binEnd = binStart + options.resolution
f_handle.write(chr+"\t"+str(binStart)+"\t"+str(binEnd)+"\t")
numpy.savetxt(f_handle, C[i].toarray(),fmt='%i', delimiter='\t')
elif (options.matrixFormat == "tadbit"):
f_handle.write('\t'.join(header)+"\n")
for i in xrange(fragmentRange):
f_handle.write(header[i+1]+"\t")
numpy.savetxt(f_handle, C[i].toarray(),fmt='%i', delimiter='\t')
else:
for i in xrange(fragmentRange):
np.savetxt(f_handle, C[i].toarray(),fmt='%i', delimiter='\t')
f_handle.close()
if (options.verbose):
print >> sys.stdout, "- %s FINISHED: output data" % (timeStamp())
def process():
global options
global args
[ fragmentsMap, lookup_structure, fragmentCount, fragmentsChrom ] = createIntervalTreesFragmentResolution(options)
triangular = False
[ fragmentList, fragmentPairs ] = countReadsPerFragment(fragmentCount, lookup_structure, options, args, triangular)
if (options.fragmentFile != ""):
mappabilityFilterList = createMappabilityFilterFromFragmentFile(options.fragmentFile, options.mappabilityThreshold, fragmentCount)
else:
mappabilityFilterList = np.ones((fragmentCount,), dtype=np.uint8)
output(fragmentsMap, fragmentList, fragmentPairs, fragmentCount, fragmentsChrom, mappabilityFilterList)
######################################
# main
######################################
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import subprocess
import sys
from datetime import datetime
from builtins import input
import argparse
import dateutil.parser
import airflow
from airflow import jobs, settings, utils
from airflow import configuration
from airflow.executors import DEFAULT_EXECUTOR
from airflow.models import DagBag, TaskInstance, DagPickle, DagRun
from airflow.utils import AirflowException, State
DAGS_FOLDER = os.path.expanduser(configuration.get('core', 'DAGS_FOLDER'))
# Common help text across subcommands
mark_success_help = "Mark jobs as succeeded without running them"
subdir_help = "File location or directory from which to look for the dag"
def process_subdir(subdir):
dags_folder = configuration.get("core", "DAGS_FOLDER")
dags_folder = os.path.expanduser(dags_folder)
if subdir:
subdir = os.path.expanduser(subdir)
if "DAGS_FOLDER" in subdir:
subdir = subdir.replace("DAGS_FOLDER", dags_folder)
if dags_folder not in subdir:
raise AirflowException(
"subdir has to be part of your DAGS_FOLDER as defined in your "
"airflow.cfg")
return subdir
def log_to_stdout():
log = logging.getLogger()
log.setLevel(settings.LOGGING_LEVEL)
logformat = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logformat)
log.addHandler(ch)
def backfill(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
if args.dry_run:
print("Dry run of DAG {0} on {1}".format(args.dag_id,
args.start_date))
for task in dag.tasks:
print("Task {0}".format(task.task_id))
ti = TaskInstance(task, args.start_date)
ti.dry_run()
else:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
include_adhoc=args.include_adhoc,
local=args.local,
donot_pickle=(args.donot_pickle or configuration.getboolean('core', 'donot_pickle')),
ignore_dependencies=args.ignore_dependencies,
pool=args.pool)
def trigger_dag(args):
log_to_stdout()
session = settings.Session()
# TODO: verify dag_id
execution_date = datetime.now()
dr = session.query(DagRun).filter(
DagRun.dag_id==args.dag_id, DagRun.run_id==args.run_id).first()
if dr:
logging.error("This run_id already exists")
else:
trigger = DagRun(
dag_id=args.dag_id,
run_id=args.run_id,
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
session.add(trigger)
logging.info("Created {}".format(trigger))
session.commit()
def run(args):
utils.pessimistic_connection_handling()
# Setting up logging
log = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
directory = log + "/{args.dag_id}/{args.task_id}".format(args=args)
if not os.path.exists(directory):
os.makedirs(directory)
args.execution_date = dateutil.parser.parse(args.execution_date)
iso = args.execution_date.isoformat()
filename = "{directory}/{iso}".format(**locals())
# store old log (to help with S3 appends)
if os.path.exists(filename):
with open(filename, 'r') as logfile:
old_log = logfile.read()
else:
old_log = None
subdir = process_subdir(args.subdir)
logging.basicConfig(
filename=filename,
level=settings.LOGGING_LEVEL,
format=settings.LOG_FORMAT)
if not args.pickle:
dagbag = DagBag(subdir)
if args.dag_id not in dagbag.dags:
msg = 'DAG [{0}] could not be found'.format(args.dag_id)
logging.error(msg)
raise AirflowException(msg)
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
else:
session = settings.Session()
logging.info('Loading pickle id {args.pickle}'.format(**locals()))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
task_start_date = None
if args.task_start_date:
task_start_date = dateutil.parser.parse(args.task_start_date)
task.start_date = task_start_date
ti = TaskInstance(task, args.execution_date)
if args.local:
print("Logging into: " + filename)
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
force=args.force,
pickle_id=args.pickle,
task_start_date=task_start_date,
ignore_dependencies=args.ignore_dependencies,
pool=args.pool)
run_job.run()
elif args.raw:
ti.run(
mark_success=args.mark_success,
force=args.force,
ignore_dependencies=args.ignore_dependencies,
job_id=args.job_id,
pool=args.pool,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
print((
'Pickled dag {dag} '
'as pickle_id:{pickle_id}').format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = DEFAULT_EXECUTOR
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_dependencies=args.ignore_dependencies,
force=args.force)
executor.heartbeat()
executor.end()
if configuration.get('core', 'S3_LOG_FOLDER').startswith('s3:'):
import boto
s3_log = filename.replace(log, configuration.get('core', 'S3_LOG_FOLDER'))
bucket, key = s3_log.lstrip('s3:/').split('/', 1)
if os.path.exists(filename):
# get logs
with open(filename, 'r') as logfile:
new_log = logfile.read()
# remove old logs (since they are already in S3)
if old_log:
new_log.replace(old_log, '')
try:
s3 = boto.connect_s3()
s3_key = boto.s3.key.Key(s3.get_bucket(bucket), key)
# append new logs to old S3 logs, if available
if s3_key.exists():
old_s3_log = s3_key.get_contents_as_string().decode()
new_log = old_s3_log + '\n' + new_log
# send log to S3
s3_key.set_contents_from_string(new_log)
except:
print('Could not send logs to S3.')
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
def list_dags(args):
dagbag = DagBag(process_subdir(args.subdir))
print("\n".join(sorted(dagbag.dags)))
def list_tasks(args):
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
def test(args):
log_to_stdout()
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
if args.dry_run:
ti.dry_run()
else:
ti.run(force=True, ignore_dependencies=True, test_mode=True)
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
dag.clear(
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm)
def webserver(args):
print(settings.HEADER)
log_to_stdout()
from airflow.www.app import cached_app
app = cached_app(configuration)
workers = args.workers or configuration.get('webserver', 'workers')
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app.run(debug=True, port=args.port, host=args.hostname)
else:
print(
'Running the Gunicorn server with {workers} {args.workerclass}'
'workers on host {args.hostname} and port '
'{args.port}...'.format(**locals()))
sp = subprocess.Popen([
'gunicorn', '-w', str(args.workers), '-k', str(args.workerclass),
'-t', '120', '-b', args.hostname + ':' + str(args.port),
'airflow.www.app:cached_app()'])
sp.wait()
def scheduler(args):
print(settings.HEADER)
log_to_stdout()
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=process_subdir(args.subdir),
num_runs=args.num_runs,
do_pickle=args.do_pickle)
job.run()
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename):
log = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(configuration.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
def worker(args):
# Worker to serve static log files through this simple flask app
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
}
worker.run(**options)
sp.kill()
def initdb(args):
print("DB: " + configuration.get('core', 'SQL_ALCHEMY_CONN'))
utils.initdb()
print("Done.")
def resetdb(args):
print("DB: " + configuration.get('core', 'SQL_ALCHEMY_CONN'))
if input(
"This will drop existing tables if they exist. "
"Proceed? (y/n)").upper() == "Y":
logging.basicConfig(level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
utils.resetdb()
else:
print("Bail.")
def upgradedb(args):
print("DB: " + configuration.get('core', 'SQL_ALCHEMY_CONN'))
utils.upgradedb()
def version(args):
print(settings.HEADER + " v" + airflow.__version__)
def flower(args):
broka = configuration.get('celery', 'BROKER_URL')
args.port = args.port or configuration.get('celery', 'FLOWER_PORT')
port = '--port=' + args.port
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
sp = subprocess.Popen(['flower', '-b', broka, port, api])
sp.wait()
def kerberos(args):
print(settings.HEADER)
log_to_stdout()
import airflow.security.kerberos
airflow.security.kerberos.run()
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help')
ht = "Run subsections of a DAG for a specified date range"
parser_backfill = subparsers.add_parser('backfill', help=ht)
parser_backfill.add_argument("dag_id", help="The id of the dag to run")
parser_backfill.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to backfill (optional)")
parser_backfill.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_backfill.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
parser_backfill.add_argument(
"-m", "--mark_success",
help=mark_success_help, action="store_true")
parser_backfill.add_argument(
"-l", "--local",
help="Run the task using the LocalExecutor", action="store_true")
parser_backfill.add_argument(
"-x", "--donot_pickle",
help=(
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
action="store_true")
parser_backfill.add_argument(
"-a", "--include_adhoc",
help="Include dags with the adhoc parameter.", action="store_true")
parser_backfill.add_argument(
"-i", "--ignore_dependencies",
help=(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction with task_regex"),
action="store_true")
parser_backfill.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_backfill.add_argument(
"-p", "--pool", help="Pool to use to run the backfill")
parser_backfill.add_argument(
"-dr", "--dry_run", help="Perform a dry run", action="store_true")
parser_backfill.set_defaults(func=backfill)
ht = "Clear a set of task instance, as if they never ran"
parser_clear = subparsers.add_parser('clear', help=ht)
parser_clear.add_argument("dag_id", help="The id of the dag to run")
parser_clear.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to clear (optional)")
parser_clear.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_clear.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
ht = "Include upstream tasks"
parser_clear.add_argument(
"-u", "--upstream", help=ht, action="store_true")
ht = "Only failed jobs"
parser_clear.add_argument(
"-f", "--only_failed", help=ht, action="store_true")
ht = "Only running jobs"
parser_clear.add_argument(
"-r", "--only_running", help=ht, action="store_true")
ht = "Include downstream tasks"
parser_clear.add_argument(
"-d", "--downstream", help=ht, action="store_true")
parser_clear.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_clear.add_argument(
"-c", "--no_confirm", help=ht, action="store_true")
parser_clear.set_defaults(func=clear)
ht = "Trigger a DAG"
parser_trigger_dag = subparsers.add_parser('trigger_dag', help=ht)
parser_trigger_dag.add_argument("dag_id", help="The id of the dag to run")
parser_trigger_dag.add_argument(
"-r", "--run_id",
help="Helps to indentify this run")
parser_trigger_dag.set_defaults(func=trigger_dag)
ht = "Run a single task instance"
parser_run = subparsers.add_parser('run', help=ht)
parser_run.add_argument("dag_id", help="The id of the dag to run")
parser_run.add_argument("task_id", help="The task_id to run")
parser_run.add_argument(
"execution_date", help="The execution date to run")
parser_run.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_run.add_argument(
"-s", "--task_start_date",
help="Override the tasks's start_date (used internally)",)
parser_run.add_argument(
"-m", "--mark_success", help=mark_success_help, action="store_true")
parser_run.add_argument(
"-f", "--force",
help="Force a run regardless or previous success",
action="store_true")
parser_run.add_argument(
"-l", "--local",
help="Runs the task locally, don't use the executor",
action="store_true")
parser_run.add_argument(
"-r", "--raw",
help=argparse.SUPPRESS,
action="store_true")
parser_run.add_argument(
"--pool", help="Pool to use to run the task instance")
parser_run.add_argument(
"-i", "--ignore_dependencies",
help="Ignore upstream and depends_on_past dependencies",
action="store_true")
parser_run.add_argument(
"--ship_dag",
help="Pickles (serializes) the DAG and ships it to the worker",
action="store_true")
parser_run.add_argument(
"-p", "--pickle",
help="Serialized pickle object of the entire dag (used internally)")
parser_run.add_argument(
"-j", "--job_id", help=argparse.SUPPRESS)
parser_run.set_defaults(func=run)
ht = (
"Test a task instance. This will run a task without checking for "
"dependencies or recording it's state in the database."
)
parser_test = subparsers.add_parser('test', help=ht)
parser_test.add_argument("dag_id", help="The id of the dag to run")
parser_test.add_argument("task_id", help="The task_id to run")
parser_test.add_argument(
"execution_date", help="The execution date to run")
parser_test.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_test.add_argument(
"-dr", "--dry_run", help="Perform a dry run", action="store_true")
parser_test.set_defaults(func=test)
ht = "Get the status of a task instance."
parser_task_state = subparsers.add_parser('task_state', help=ht)
parser_task_state.add_argument("dag_id", help="The id of the dag to check")
parser_task_state.add_argument("task_id", help="The task_id to check")
parser_task_state.add_argument(
"execution_date", help="The execution date to check")
parser_task_state.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_task_state.set_defaults(func=task_state)
ht = "Start a Airflow webserver instance"
parser_webserver = subparsers.add_parser('webserver', help=ht)
parser_webserver.add_argument(
"-p", "--port",
default=configuration.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="Set the port on which to run the web server")
parser_webserver.add_argument(
"-w", "--workers",
default=configuration.get('webserver', 'WORKERS'),
type=int,
help="Number of workers to run the webserver on")
parser_webserver.add_argument(
"-k", "--workerclass",
default=configuration.get('webserver', 'WORKER_CLASS'),
choices=['sync', 'eventlet', 'gevent', 'tornado'],
help="The worker class to use for gunicorn")
parser_webserver.add_argument(
"-hn", "--hostname",
default=configuration.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server")
ht = "Use the server that ships with Flask in debug mode"
parser_webserver.add_argument(
"-d", "--debug", help=ht, action="store_true")
parser_webserver.set_defaults(func=webserver)
ht = "Start a scheduler scheduler instance"
parser_scheduler = subparsers.add_parser('scheduler', help=ht)
parser_scheduler.add_argument(
"-d", "--dag_id", help="The id of the dag to run")
parser_scheduler.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_scheduler.add_argument(
"-n", "--num_runs",
default=None,
type=int,
help="Set the number of runs to execute before exiting")
parser_scheduler.add_argument(
"-p", "--do_pickle",
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code."),
action="store_true")
parser_scheduler.set_defaults(func=scheduler)
ht = "Initialize the metadata database"
parser_initdb = subparsers.add_parser('initdb', help=ht)
parser_initdb.set_defaults(func=initdb)
ht = "Burn down and rebuild the metadata database"
parser_resetdb = subparsers.add_parser('resetdb', help=ht)
parser_resetdb.set_defaults(func=resetdb)
ht = "Upgrade metadata database to latest version"
parser_upgradedb = subparsers.add_parser('upgradedb', help=ht)
parser_upgradedb.set_defaults(func=upgradedb)
ht = "List the DAGs"
parser_list_dags = subparsers.add_parser('list_dags', help=ht)
parser_list_dags.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_dags.set_defaults(func=list_dags)
ht = "List the tasks within a DAG"
parser_list_tasks = subparsers.add_parser('list_tasks', help=ht)
parser_list_tasks.add_argument(
"-t", "--tree", help="Tree view", action="store_true")
parser_list_tasks.add_argument(
"dag_id", help="The id of the dag")
parser_list_tasks.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_tasks.set_defaults(func=list_tasks)
ht = "Start a Celery worker node"
parser_worker = subparsers.add_parser('worker', help=ht)
parser_worker.add_argument(
"-q", "--queues",
help="Comma delimited list of queues to serve",
default=configuration.get('celery', 'DEFAULT_QUEUE'))
parser_worker.add_argument(
"-c", "--concurrency",
type=int,
help="The number of worker processes",
default=configuration.get('celery', 'celeryd_concurrency'))
parser_worker.set_defaults(func=worker)
ht = "Serve logs generate by worker"
parser_logs = subparsers.add_parser('serve_logs', help=ht)
parser_logs.set_defaults(func=serve_logs)
ht = "Start a Celery Flower"
parser_flower = subparsers.add_parser('flower', help=ht)
parser_flower.add_argument(
"-p", "--port", help="The port")
parser_flower.add_argument(
"-a", "--broker_api", help="Broker api")
parser_flower.set_defaults(func=flower)
parser_version = subparsers.add_parser('version', help="Show version")
parser_version.set_defaults(func=version)
ht = "Start a kerberos ticket renewer"
parser_kerberos = subparsers.add_parser('kerberos', help=ht)
parser_kerberos.add_argument(
"-kt", "--keytab", help="keytab",
nargs='?', default=configuration.get('kerberos', 'keytab'))
parser_kerberos.add_argument(
"principal", help="kerberos principal",
nargs='?', default=configuration.get('kerberos', 'principal'))
parser_kerberos.set_defaults(func=kerberos)
return parser
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Benchmark concurrent tests for checking consistency of cache on object changes
CREATE
GET
PUT
DELETE
The threads are started for simultaneous GET/PUT/DELETE operation
"""
import requests
import json
import time
from datetime import datetime
from copy import deepcopy
import threading
localhost_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-requested-by': 'gGRC',
'Cookie': 'Please enter cookie'
}
ggrcdev_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-requested-by': 'gGRC',
'Cookie': 'Please enter cookie'
}
appspot_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-requested-by': 'gGRC',
'Cookie': 'Please enter cookie'
}
create_resources = {
'regulations': '{"regulation":{"kind":"Regulation","contact":{"id":1260,"t'
'ype":"Person"},"title":"Benchmark Regulation","description":"Benchmark Re'
'gulation","notes":"Benchmark Regulation CREATED","url":"","reference_url"'
':"","slug":"","start_date":"","end_date":"","status":null,"context":{"id"'
':null},"owners":[{"id":1260,"href":"/api/people/1260","type":"Person"}],"'
'provisional_id":"provisional_6741102"}}'
}
update_resources = {
'regulations': '{"regulation":{"kind":"Regulation","contact":{"id":1260,"h'
'ref":"/api/people/1260","type":"Person"},"description":"Benchmark Regulat'
'ion","object_people":[],"program_directives":[],"controls":[],"url":"","t'
'ype":"Regulation","status":"Draft","owners":[{"id":1260,"href":"/api/peop'
'le/1260","type":"Person"}],"scope":"","directive_controls":[],"sections":'
'[],"selfLink":"/api/regulations/71","programs":[],"created_at":"2014-03-2'
'0T22:09:24Z","updated_at":"2014-03-20T22:09:24Z","object_owners":[{"statu'
's":"Draft","modified_by":{"href":"/api/people/1260","id":1260,"type":"Per'
'son"},"id":1083,"selfLink":"/api/object_owners/1083","person":{"href":"/a'
'pi/people/1260","id":1260,"type":"Person"},"context":null,"created_at":"2'
'014-03-20T22:09:24","updated_at":"2014-03-20T22:09:24","type":"ObjectOwne'
'r","ownable":{"href":"/api/regulations/71","id":71,"type":"Regulation"}}]'
',"reference_url":"","organization":"","documents":[],"title":"Benchmark R'
'egulation","objectives":[],"modified_by":{"id":1260,"href":"/api/people/1'
'260","type":"Person"},"people":[],"id":71,"notes":"Benchmark Regulation U'
'PDATED 1","version":"","viewLink":"/regulations/71","object_documents":[]'
',"related_sources":[],"related_destinations":[], "slug": "REGULATION-101"'
', "start_date":"","end_date":"","context":{"id":null}}}'
}
update_resources2 = {
'regulations': '{"regulation":{"kind":"Regulation","contact":{"id":1260,"h'
'ref":"/api/people/1260","type":"Person"},"description":"Benchmark Regulat'
'ion","object_people":[],"program_directives":[],"controls":[],"url":"","t'
'ype":"Regulation","status":"Draft","owners":[{"id":1260,"href":"/api/peop'
'le/1260","type":"Person"}],"scope":"","directive_controls":[],"sections":'
'[],"selfLink":"/api/regulations/71","programs":[],"created_at":"2014-03-2'
'0T22:09:24Z","updated_at":"2014-03-20T22:09:24Z","object_owners":[{"statu'
's":"Draft","modified_by":{"href":"/api/people/1260","id":1260,"type":"Per'
'son"},"id":1083,"selfLink":"/api/object_owners/1083","person":{"href":"/a'
'pi/people/1260","id":1260,"type":"Person"},"context":null,"created_at":"2'
'014-03-20T22:09:24","updated_at":"2014-03-20T22:09:24","type":"ObjectOwne'
'r","ownable":{"href":"/api/regulations/71","id":71,"type":"Regulation"}}]'
',"reference_url":"","organization":"","documents":[],"title":"Benchmark R'
'egulation","objectives":[],"modified_by":{"id":1260,"href":"/api/people/1'
'260","type":"Person"},"people":[],"id":71,"notes":"Benchmark Regulation U'
'PDATED 2","version":"","viewLink":"/regulations/71","object_documents":[]'
',"related_sources":[],"related_destinations":[],"slug": "REGULATION-101",'
'"start_date":"","end_date":"","context":{"id":null}}}'
}
mapping_resource = {
'regulations': 'regulation'
}
class TestGetThread(threading.Thread):
def __init__(self, name, data, loop_cnt):
super(TestGetThread, self).__init__()
self.name = name
self.data = data
self.starttime = None
self.endtime = None
self.loop_cnt = loop_cnt
def run(self):
self.starttime = datetime.now()
for cnt in range(self.loop_cnt):
# print "Running GET Thread: " + self.name + " Iteration " + str(cnt+1)
if not cnt % 100:
print "GET Iteration " + str(cnt + 1) + " of " + str(self.loop_cnt)
benchmark_get(self.data, 1, "Concurrency Test")
self.endtime = datetime.now()
class TestPutThread(threading.Thread):
def __init__(self, name, put_data, get_data, loop_cnt):
super(TestPutThread, self).__init__()
self.name = name
self.get_data = get_data
self.put_data = put_data
self.starttime = None
self.endtime = None
self.loop_cnt = loop_cnt
def run(self):
self.starttime = datetime.now()
for cnt in range(self.loop_cnt):
# print "Running PUT/GET Thread: " + self.name + " Iteration " +
# str(cnt+1)
if not cnt % 100:
print "PUT/GET Iteration " + str(cnt + 1) + " of " + str(self.loop_cnt)
for resource, payload in self.put_data.items():
json_payload = json.loads(payload)
updated_notes = "Benchmark Regulation UPDATED#" + str(cnt + 1)
json_payload[mapping_resource[resource]]['notes'] = updated_notes
self.put_data[resource] = json.dumps(json_payload)
benchmark_update(self.put_data, self.get_data, 1)
benchmark_get(self.get_data, 1, "Concurrency GET Test", updated_notes)
self.endtime = datetime.now()
def invoke_url(op, prefix, host, url, payload, headers, count):
response = None
for cnt in range(count):
testurl = prefix + "://" + host + url
if op == 'post':
response = requests.post(testurl, data=payload, headers=headers)
elif op == 'get':
response = requests.get(testurl, headers=headers, params=payload)
elif op == 'put':
response = requests.put(testurl, data=payload, headers=headers)
else:
response = requests.delete(testurl, headers=headers)
time.sleep(1)
return response
localhost_url = "localhost:8080"
ggrcdev_url = "ggrc-dev.googleplex.com"
appspot_url = "grc-audit.appspot.com"
# headers=appspot_headers
# targethost=appspot_url
targethost = localhost_url
headers = localhost_headers
num_iterations = 1
etag = 10023
# prefix="https"
prefix = "http"
def benchmark_delete(resource_data, num_iterations):
payload = None
for resource, data in resource_data.items():
# print "Test DELETE for resource: " + resource + " with ids " +
# str(data['ids'])
testurl = "/api/" + resource
ids = data['ids']
etags = data['etag']
last_modified_items = data['last-modified']
for cnt in range(len(ids)):
id = ids[cnt]
delete_headers = deepcopy(headers)
delete_headers['If-Match'] = etags[cnt]
delete_headers['If-Unmodified-Since'] = last_modified_items[cnt]
response = invoke_url('delete', prefix, targethost, testurl +
"/" + str(id), payload, delete_headers,
num_iterations)
if response.status_code != 200:
print "DELETE Failed: " + str(response.status_code)
def benchmark_get(resource_data, num_iterations, name, verify_notes=None):
for resource, data in resource_data.items():
# print "Test GET for owner: " + name + " resource: " + resource + " with
# ids " + str(data['ids'])
testurl = "/api/" + resource
ids = ""
idslen = len(data['ids'])
cnt = 0
for id in data['ids']:
cnt = cnt + 1
if cnt == idslen:
ids = ids + str(id)
else:
ids = ids + str(id) + ","
payload = {'id__in': ids, '_': str(etag)}
response = invoke_url('get', prefix, targethost,
testurl, payload, headers, num_iterations)
if response.status_code != 200:
print "GET Failed: " + str(response.status_code)
else:
json_response = json.loads(response.text)
responses = json_response[resource + '_collection'][resource]
for item in responses:
if verify_notes is not None:
# print "UPDATE Notes: " + verify_notes
if item["notes"] != verify_notes:
print "[WARN]: UPDATE Notes: " + verify_notes + "GET Notes: " + \
item["notes"]
def benchmark_create(resource_data, resource_cnt, num_iterations):
resource_dict = {}
for resource, payload in resource_data.items():
testurl = "/api/" + resource
resource_dict[resource] = {}
resource_dict[resource]['ids'] = []
resource_dict[resource]['etag'] = []
resource_dict[resource]['last-modified'] = []
resource_dict[resource]['json-response'] = []
for cnt in range(resource_cnt):
response = invoke_url('post', prefix, targethost,
testurl, payload, headers, num_iterations)
if response.status_code == 201:
json_response = json.loads(response.text)
for key, value in json_response.items():
resource_dict[resource]['ids'].append(value['id'])
resource_dict[resource]['etag'].append(response.headers['etag'])
resource_dict[resource][
'last-modified'].append(response.headers['last-modified'])
# print "Test CREATE for resource: " + resource + " with id " +
# str(value[u'id'])
else:
print "CREATE Failed: " + str(response.status_code)
return None
return resource_dict
def benchmark_update(resource_data, resource_dict, num_iterations):
for resource, payload in resource_data.items():
testurl = "/api/" + resource
ids = resource_dict[resource]['ids']
# print "Test UPDATE for resource: " + resource + " with ids " + str(ids)
etags = resource_dict[resource]['etag']
last_modified_items = resource_dict[resource]['last-modified']
for cnt in range(len(ids)):
# print etags
# print last_modified_items
id = ids[cnt]
update_headers = deepcopy(headers)
update_headers['If-Match'] = etags[cnt]
update_headers['If-Unmodified-Since'] = str(last_modified_items[cnt])
response = invoke_url('put', prefix, targethost, testurl +
'/' + str(id), payload, update_headers,
num_iterations)
if response.status_code != 200:
print "UPDATE Failed: " + str(response.status_code)
else:
resource_dict[resource]['etag'][cnt] = response.headers['etag']
resource_dict[resource][
'last-modified'][cnt] = response.headers['last-modified']
# print response.headers['etag']
# print response.headers['last-modified']
def run_singlethreaded_tests():
print "Running single threaded benchmark tests create, GET, PUT, GET, DELETE"
resource_dict = benchmark_create(create_resources, 1, 1)
if resource_dict is not None:
benchmark_get(resource_dict, 1, "Single Threaded GET Test")
benchmark_update(update_resources, resource_dict, 1)
benchmark_get(resource_dict, 1, "Single Threaded GET Test")
benchmark_update(update_resources2, resource_dict, 1)
benchmark_get(resource_dict, 1, "Single Threaded GET Test")
benchmark_delete(resource_dict, 1)
else:
print "ERROR: Unable to run benchmark tests"
def run_concurrent_tests(loop_cnt):
print "Running Benchmark Concurrent tests PUT/GET and GET..."
resource_dict = benchmark_create(create_resources, 1, 1)
if resource_dict is not None:
get_threads = []
put_threads = []
for cnt in range(1):
get_threads.append(TestGetThread(
"GET Thread" + str(cnt + 1), resource_dict, loop_cnt + 20))
put_threads.append(TestPutThread(
"PUT Thread" + str(cnt + 1), update_resources, resource_dict,
loop_cnt))
for cnt in range(1):
get_threads[cnt].start()
put_threads[cnt].start()
for cnt in range(1):
get_threads[cnt].join()
put_threads[cnt].join()
benchmark_delete(resource_dict, 1)
for cnt in range(1):
print get_threads[cnt].name + " starttime: " + str(
get_threads[cnt].starttime) + " endtime: " + str(
get_threads[cnt].endtime)
print put_threads[cnt].name + " starttime: " + str(
put_threads[cnt].starttime) + " endtime: " + str(
put_threads[cnt].endtime)
if __name__ == '__main__':
run_singlethreaded_tests()
run_concurrent_tests(1000)
| |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for accessing nested attributes."""
import collections
__all__ = [
'get_innermost',
'get_nested_objs',
'get_outermost',
'has_nested',
'replace_innermost',
'replace_outermost',
'UnnestingWrapper',
]
def get_nested_objs(obj,
nested_key='__nested_attrs__',
fallback_attrs=(
'inner_results',
'accepted_results',
'_inner_kernel', # used by TransformedTransitionKernel
'inner_kernel',
)):
"""Finds the list of nested objects inside an object's attributes.
`get_nested_objs` proceeds as follow:
1. If `hasattr(obj, nested_key)`, then set
`nested_attrs = getattr(obj, nested_key)`.
2. Otherwise set `nested_attrs = fallback_attrs`.
3. In either case, `nested_attrs` should now be a string or collection of
strings. Return the list `[(attr, getattr(obj, attr)) for attr in
nested_attrs]` omitting missing attributes.
`nested_key` is for class- or object-level customization, and `fallback_attrs`
for invocation-level customization.
Example:
```
class Nest:
__nested_attrs__ = ('inner1', 'inner2')
def __init__(self, inner1, inner2, inner3):
self.inner1 = inner1
self.inner2 = inner2
self.inner3 = inner3
nest = Nest('x', 'y', 'z')
# Search stops if `nested_key` is found.
get_nested_objs(
nest,
nested_key='__nested_attrs__',
fallback_attrs=('inner1', 'inner2', 'inner3')) == [
('inner1', 'x'), ('inner2', 'y')] # True
# If `nested_key` is not found, search in all of `fallback_attrs`.
get_nested_objs(
nest,
nested_key='does_not_exist',
fallback_attrs=('inner1', 'inner2', 'inner3')) == [
('inner1', 'x'), ('inner2', 'y'), ('inner3', 'z')] # True
# If nothing is found, empty list is returned.
get_nested_objs(
nest,
nested_key='does_not_exist',
fallback_attrs=('does_not_exist')) == [] # True
# `getattr(obj, nested_key)` and `fallback_attrs` can be either strings or
# collections of strings.
nest2 = Nest('x', 'y', 'z')
nest2.__nested_attrs__ = 'inner3'
get_nested_objs(
nest2,
nested_key='__nested_attrs__',
fallback_attrs=('inner1', 'inner2')) == [('inner3', 'z')] # True
get_nested_objs(
nest2,
nested_key='does_not_exist',
fallback_attrs='inner2') == [('inner2', 'y')] # True
```
Args:
obj: The object to find nested objects in.
nested_key: A string that names an attribute on `obj` which contains the
names of attributes to search for nested objects in. See function
documentation for details. Default is `__nested_attrs__`.
fallback_attrs: A string or collection of strings that name attributes to
search for nested objects in, when `nested_key` is not present. See
function documentation for details. Default is the tuple
`('inner_results', 'accepted_results', 'inner_kernel')`, which works well
for MCMC kernels and kernel results.
Returns:
pairs: Returns a (possibly empty) list of (field name, nested results).
"""
if hasattr(obj, nested_key):
attrs = getattr(obj, nested_key)
else:
attrs = fallback_attrs
if isinstance(attrs, str):
attrs = [attrs]
return [(attr, getattr(obj, attr)) for attr in attrs
if hasattr(obj, attr)]
def has_nested(obj, attr, nested_lookup_fn=get_nested_objs):
"""Check if the object has a (nested) attribute.
Args:
obj: The object to find (nested) attributes in.
attr: A `string` attribute name to search for.
nested_lookup_fn: A single-argument callable that returns a list of
(attribute name, nested object) pairs. Defaults to `get_nested_objs`.
Returns:
has_nested: Boolean if the attribute was found or not.
"""
if hasattr(obj, attr):
return True
for _, nested in nested_lookup_fn(obj):
if has_nested(nested, attr, nested_lookup_fn=nested_lookup_fn):
return True
return False
SENTINEL = object()
def get_innermost(obj, attr, default=SENTINEL,
nested_lookup_fn=get_nested_objs):
"""Return a (nested) attribute value.
The first attribute found is returned. Nested objects are traversed
depth-first in post-order, with level-wise order determined by the list
ordering returned from `nested_lookup_fn`.
Args:
obj: The object to find (nested) attributes in.
attr: A `string` attribute name to search for.
default: If `attr` does not exist in `obj` or nested objects, and `default`
is set, return `default`.
nested_lookup_fn: A single-argument callable that returns a list of
(attribute name, nested object) pairs. Defaults to `get_nested_objs`.
Returns:
value: The (nested) attribute value, or `default` if it does not exist and
`default` is set.
Raises:
AttributeError: if `attr` is not found and `default` is not specified.
"""
for _, nested in nested_lookup_fn(obj):
try:
return get_innermost(nested, attr, nested_lookup_fn=nested_lookup_fn)
except AttributeError:
pass
try:
return getattr(obj, attr)
except AttributeError:
if default is not SENTINEL:
return default
raise AttributeError('No attribute `' + attr + '` in nested results of '
+ str(obj.__class__))
def get_outermost(obj, attr, default=SENTINEL,
nested_lookup_fn=get_nested_objs):
"""Return a (nested) attribute value.
The first attribute found is returned. Nested objects are traversed
breadth-first, with level-wise order determined by the list ordering returned
from `nested_lookup_fn`.
Args:
obj: The object to find (nested) attributes in.
attr: A `string` attribute name to search for.
default: If `attr` does not exist in `obj` or nested objects, and `default`
is set, return `default`.
nested_lookup_fn: A single-argument callable that returns a list of
(attribute name, nested object) pairs. Defaults to `get_nested_objs`.
Returns:
value: The (nested) attribute value, or `default` if it does not exist and
`default` is set.
Raises:
AttributeError: if `attr` is not found and `default` is not specified.
"""
to_visit = collections.deque([obj])
while to_visit:
nested = to_visit.popleft()
to_visit.extend((nested for _, nested in nested_lookup_fn(nested)))
try:
return getattr(nested, attr)
except AttributeError:
pass
if default is not SENTINEL:
return default
raise AttributeError('No attribute `' + attr + '` in nested results of '
+ str(obj.__class__))
def replace_innermost(ntuple, return_unused=False,
nested_lookup_fn=get_nested_objs, **kw):
"""Replace (nested) fields in a `namedtuple`.
For each attribute-value update specified, this function only replaces the
first matching attribute found. Nested objects are traversed depth-first in
post-order, with level-wise order determined by the list ordering returned
from `nested_lookup_fn`.
Args:
ntuple: A `namedtuple` to replace (nested) fields in.
return_unused: If `True`, return the `dict` of attribute-value pairs in
`**kw` that were not found and updated in `ntuple`.
nested_lookup_fn: A single-argument callable that returns a list of
(attribute name, nested object) pairs. Defaults to `get_nested_objs`.
**kw: The attribute-value pairs to update.
Returns:
updated: A copy of `ntuple` with (nested) fields updated.
unused: If `return_unused` is `True`, the dictionary of attribute-value
pairs in `**kw` that were not found and updated in `ntuple`.
Raises:
ValueError: if `returne_unused=False` and attributes in `**kw` are not found
in `ntuple`.
"""
nested_updates = {}
for fieldname, nested in nested_lookup_fn(ntuple):
updated, kw = replace_innermost(
nested, return_unused=True, nested_lookup_fn=nested_lookup_fn, **kw)
nested_updates[fieldname] = updated
kw.update(nested_updates)
if not return_unused:
return ntuple._replace(**kw)
outer_updates = {attr: kw[attr] for attr in ntuple._fields
if attr in kw}
extra = {attr: val for attr, val in kw.items()
if attr not in outer_updates}
return ntuple._replace(**outer_updates), extra
def replace_outermost(ntuple, return_unused=False,
nested_lookup_fn=get_nested_objs, **kw):
"""Replace (nested) fields in a `namedtuple`.
For each attribute-value update specified, this function only replaces the
first matching attribute found. Nested objects are traversed breadth-first,
with level-wise order determined by the list ordering returned from
`nested_lookup_fn`.
Args:
ntuple: A `namedtuple` to replace (nested) fields in.
return_unused: If `True`, return the `dict` of attribute-value pairs in
`**kw` that were not found and updated in `ntuple`.
nested_lookup_fn: A single-argument callable that returns a list of
(attribute name, nested object) pairs. Defaults to `get_nested_objs`.
**kw: The attribute-value pairs to update.
Returns:
updated: A copy of `ntuple` with (nested) fields updated.
unused: If `return_unused` is `True`, the dictionary of attribute-value
pairs in `**kw` that were not found and updated in `ntuple`.
Raises:
ValueError: if `returne_unused=False` and attributes in `**kw` are not found
in `ntuple`.
"""
root = ntuple
root_update = {k: kw[k] for k in root._fields if k in kw}
kw = {k: v for k, v in kw.items() if k not in root_update}
# Collect the updates to apply later by traversing breadth-first, but with
# backlinks to parent updates.
to_visit = collections.deque(
[(root_update, field, child)
for field, child in nested_lookup_fn(root)])
inner_updates = []
while to_visit and kw:
parent_update, field, child = to_visit.popleft()
child_update = {k: kw[k] for k in child._fields if k in kw}
if child_update:
kw = {k: v for k, v in kw.items() if k not in child_update}
inner_updates.append((parent_update, field, child, child_update))
to_visit.extend(
[(child_update, child_field, child_child)
for child_field, child_child in nested_lookup_fn(child)])
# Now apply updates in reverse order, propogating up to root.
for parent_update, field, child, child_update in reversed(inner_updates):
parent_update[field] = child._replace(**child_update)
root = root._replace(**root_update)
if not return_unused:
if kw:
raise ValueError(
'Got unexpected (nested) field names: {}'.format(list(kw)))
return root
return root, kw
class UnnestingWrapper:
"""For when you want to get (nested) fields by usual attribute access.
Example usage:
```
results = ...
wrapped = UnnestingWrapper(results)
wrapped.my_attr # equivalent to `get_innermost(results, 'my_attr')
# Use `_object` to get at the wrapped object.
new_results = replace_innermost(wrapped._object, ...)
```
"""
def __init__(self, obj, innermost=True):
"""Wraps objects so attribute access searches nested objects.
Args:
obj: The object to find nested objects in.
innermost: Boolean. When `True`, attribute access uses `get_innermost`;
otherwise uses `get_outermost`. Defaults to `True`.
"""
self._object = obj
self._innermost = innermost
def __getattr__(self, attr):
if self._innermost:
return get_innermost(self._object, attr)
else:
return get_outermost(self._object, attr)
def __repr__(self):
return 'UnnestingWrapper(innermost={}):\n{}'.format(
self._innermost,
repr(self._object))
| |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import mock
import testtools
from neutron.agent.linux import daemon
from neutron.common import exceptions
from neutron.tests import base
FAKE_FD = 8
class FakeEntry(object):
def __init__(self, name, value):
setattr(self, name, value)
class TestPrivileges(base.BaseTestCase):
def test_setuid_with_name(self):
with mock.patch('pwd.getpwnam', return_value=FakeEntry('pw_uid', 123)):
with mock.patch('os.setuid') as setuid_mock:
daemon.setuid('user')
setuid_mock.assert_called_once_with(123)
def test_setuid_with_id(self):
with mock.patch('os.setuid') as setuid_mock:
daemon.setuid('321')
setuid_mock.assert_called_once_with(321)
def test_setuid_fails(self):
with mock.patch('os.setuid', side_effect=OSError()):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.setuid, '321')
log_critical.assert_once_with(mock.ANY)
def test_setgid_with_name(self):
with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)):
with mock.patch('os.setgid') as setgid_mock:
daemon.setgid('group')
setgid_mock.assert_called_once_with(123)
def test_setgid_with_id(self):
with mock.patch('os.setgid') as setgid_mock:
daemon.setgid('321')
setgid_mock.assert_called_once_with(321)
def test_setgid_fails(self):
with mock.patch('os.setgid', side_effect=OSError()):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.setgid, '321')
log_critical.assert_once_with(mock.ANY)
@mock.patch.object(os, 'setgroups')
@mock.patch.object(daemon, 'setgid')
@mock.patch.object(daemon, 'setuid')
def test_drop_no_privileges(self, mock_setuid, mock_setgid,
mock_setgroups):
daemon.drop_privileges()
for cursor in (mock_setuid, mock_setgid, mock_setgroups):
self.assertFalse(cursor.called)
@mock.patch.object(os, 'geteuid', return_value=0)
@mock.patch.object(os, 'setgroups')
@mock.patch.object(daemon, 'setgid')
@mock.patch.object(daemon, 'setuid')
def _test_drop_privileges(self, setuid, setgid, setgroups,
geteuid, user=None, group=None):
daemon.drop_privileges(user=user, group=group)
if user:
setuid.assert_called_once_with(user)
else:
self.assertFalse(setuid.called)
if group:
setgroups.assert_called_once_with([])
setgid.assert_called_once_with(group)
else:
self.assertFalse(setgroups.called)
self.assertFalse(setgid.called)
def test_drop_user_privileges(self):
self._test_drop_privileges(user='user')
def test_drop_uid_privileges(self):
self._test_drop_privileges(user='321')
def test_drop_group_privileges(self):
self._test_drop_privileges(group='group')
def test_drop_gid_privileges(self):
self._test_drop_privileges(group='654')
def test_drop_privileges_without_root_permissions(self):
with mock.patch('os.geteuid', return_value=1):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.drop_privileges, 'user')
log_critical.assert_once_with(mock.ANY)
class TestPidfile(base.BaseTestCase):
def setUp(self):
super(TestPidfile, self).setUp()
self.os_p = mock.patch.object(daemon, 'os')
self.os = self.os_p.start()
self.os.open.return_value = FAKE_FD
self.fcntl_p = mock.patch.object(daemon, 'fcntl')
self.fcntl = self.fcntl_p.start()
self.fcntl.flock.return_value = 0
def test_init(self):
self.os.O_CREAT = os.O_CREAT
self.os.O_RDWR = os.O_RDWR
daemon.Pidfile('thefile', 'python')
self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR)
self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX |
self.fcntl.LOCK_NB)
def test_init_open_fail(self):
self.os.open.side_effect = IOError
with mock.patch.object(daemon.sys, 'stderr'):
with testtools.ExpectedException(SystemExit):
daemon.Pidfile('thefile', 'python')
sys.assert_has_calls([
mock.call.stderr.write(mock.ANY),
mock.call.exit(1)]
)
def test_unlock(self):
p = daemon.Pidfile('thefile', 'python')
p.unlock()
self.fcntl.flock.assert_has_calls([
mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB),
mock.call(FAKE_FD, self.fcntl.LOCK_UN)]
)
def test_write(self):
p = daemon.Pidfile('thefile', 'python')
p.write(34)
self.os.assert_has_calls([
mock.call.ftruncate(FAKE_FD, 0),
mock.call.write(FAKE_FD, '34'),
mock.call.fsync(FAKE_FD)]
)
def test_read(self):
self.os.read.return_value = '34'
p = daemon.Pidfile('thefile', 'python')
self.assertEqual(34, p.read())
def test_is_running(self):
with mock.patch('__builtin__.open') as mock_open:
p = daemon.Pidfile('thefile', 'python')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertTrue(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
def test_is_running_uuid_true(self):
with mock.patch('__builtin__.open') as mock_open:
p = daemon.Pidfile('thefile', 'python', uuid='1234')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python 1234'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertTrue(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
def test_is_running_uuid_false(self):
with mock.patch('__builtin__.open') as mock_open:
p = daemon.Pidfile('thefile', 'python', uuid='6789')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python 1234'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertFalse(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
class TestDaemon(base.BaseTestCase):
def setUp(self):
super(TestDaemon, self).setUp()
self.os_p = mock.patch.object(daemon, 'os')
self.os = self.os_p.start()
self.pidfile_p = mock.patch.object(daemon, 'Pidfile')
self.pidfile = self.pidfile_p.start()
def test_init(self):
d = daemon.Daemon('pidfile')
self.assertEqual(d.procname, 'python')
def test_fork_parent(self):
self.os.fork.return_value = 1
d = daemon.Daemon('pidfile')
d._fork()
self.os._exit.assert_called_once_with(mock.ANY)
def test_fork_child(self):
self.os.fork.return_value = 0
d = daemon.Daemon('pidfile')
self.assertIsNone(d._fork())
def test_fork_error(self):
self.os.fork.side_effect = OSError(1)
with mock.patch.object(daemon.sys, 'stderr'):
with testtools.ExpectedException(SystemExit):
d = daemon.Daemon('pidfile', 'stdin')
d._fork()
def test_daemonize(self):
self.os.devnull = '/dev/null'
d = daemon.Daemon('pidfile')
with mock.patch.object(d, '_fork') as fork:
with mock.patch.object(daemon, 'atexit') as atexit:
with mock.patch.object(daemon, 'signal') as signal:
signal.SIGTERM = 15
with mock.patch.object(daemon, 'sys') as sys:
sys.stdin.fileno.return_value = 0
sys.stdout.fileno.return_value = 1
sys.stderr.fileno.return_value = 2
d.daemonize()
signal.signal.assert_called_once_with(15, d.handle_sigterm)
atexit.register.assert_called_once_with(d.delete_pid)
fork.assert_has_calls([mock.call(), mock.call()])
self.os.assert_has_calls([
mock.call.chdir('/'),
mock.call.setsid(),
mock.call.umask(0),
mock.call.dup2(mock.ANY, 0),
mock.call.dup2(mock.ANY, 1),
mock.call.dup2(mock.ANY, 2),
mock.call.getpid()]
)
def test_delete_pid(self):
self.pidfile.return_value.__str__.return_value = 'pidfile'
d = daemon.Daemon('pidfile')
d.delete_pid()
self.os.remove.assert_called_once_with('pidfile')
def test_handle_sigterm(self):
d = daemon.Daemon('pidfile')
with mock.patch.object(daemon, 'sys') as sys:
d.handle_sigterm(15, 1234)
sys.exit.assert_called_once_with(0)
def test_start(self):
self.pidfile.return_value.is_running.return_value = False
d = daemon.Daemon('pidfile')
with mock.patch.object(d, 'daemonize') as daemonize:
with mock.patch.object(d, 'run') as run:
d.start()
run.assert_called_once_with()
daemonize.assert_called_once_with()
def test_start_running(self):
self.pidfile.return_value.is_running.return_value = True
d = daemon.Daemon('pidfile')
with mock.patch.object(daemon.sys, 'stderr'):
with mock.patch.object(d, 'daemonize') as daemonize:
with testtools.ExpectedException(SystemExit):
d.start()
self.assertFalse(daemonize.called)
| |
<<<<<<< HEAD
<<<<<<< HEAD
"""Queues"""
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
'QueueFull', 'QueueEmpty']
import collections
import heapq
from . import events
from . import futures
from . import locks
from .tasks import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Pairs of (item, Future).
self._putters = collections.deque()
self._init(maxsize)
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
return result
def _consume_done_getters(self):
# Delete waiters at the head of the get() queue who've timed out.
while self._getters and self._getters[0].done():
self._getters.popleft()
def _consume_done_putters(self):
# Delete waiters at the head of the put() queue who've timed out.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
waiter = futures.Future(loop=self._loop)
self._putters.append((item, waiter))
yield from waiter
else:
self._put(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
raise QueueFull
else:
self._put(item)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# When a getter runs and frees up a slot so this putter can
# run, we need to defer the put for a tick to ensure that
# getters and putters alternate perfectly. See
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
return self._get()
elif self.qsize():
return self._get()
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
return (yield from waiter)
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# Wake putter on next tick.
# getter cannot be cancelled, we just removed done putters
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
class JoinableQueue(Queue):
"""A subclass of Queue with task_done() and join() methods."""
def __init__(self, maxsize=0, *, loop=None):
super().__init__(maxsize=maxsize, loop=loop)
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
def _format(self):
result = Queue._format(self)
if self._unfinished_tasks:
result += ' tasks={}'.format(self._unfinished_tasks)
return result
def _put(self, item):
super()._put(item)
self._unfinished_tasks += 1
self._finished.clear()
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
yield from self._finished.wait()
=======
"""Queues"""
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
'QueueFull', 'QueueEmpty']
import collections
import heapq
from . import events
from . import futures
from . import locks
from .tasks import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Pairs of (item, Future).
self._putters = collections.deque()
self._init(maxsize)
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
return result
def _consume_done_getters(self):
# Delete waiters at the head of the get() queue who've timed out.
while self._getters and self._getters[0].done():
self._getters.popleft()
def _consume_done_putters(self):
# Delete waiters at the head of the put() queue who've timed out.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
waiter = futures.Future(loop=self._loop)
self._putters.append((item, waiter))
yield from waiter
else:
self._put(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
raise QueueFull
else:
self._put(item)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# When a getter runs and frees up a slot so this putter can
# run, we need to defer the put for a tick to ensure that
# getters and putters alternate perfectly. See
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
return self._get()
elif self.qsize():
return self._get()
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
return (yield from waiter)
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# Wake putter on next tick.
# getter cannot be cancelled, we just removed done putters
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
class JoinableQueue(Queue):
"""A subclass of Queue with task_done() and join() methods."""
def __init__(self, maxsize=0, *, loop=None):
super().__init__(maxsize=maxsize, loop=loop)
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
def _format(self):
result = Queue._format(self)
if self._unfinished_tasks:
result += ' tasks={}'.format(self._unfinished_tasks)
return result
def _put(self, item):
super()._put(item)
self._unfinished_tasks += 1
self._finished.clear()
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
yield from self._finished.wait()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Queues"""
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
'QueueFull', 'QueueEmpty']
import collections
import heapq
from . import events
from . import futures
from . import locks
from .tasks import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Pairs of (item, Future).
self._putters = collections.deque()
self._init(maxsize)
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
return result
def _consume_done_getters(self):
# Delete waiters at the head of the get() queue who've timed out.
while self._getters and self._getters[0].done():
self._getters.popleft()
def _consume_done_putters(self):
# Delete waiters at the head of the put() queue who've timed out.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
waiter = futures.Future(loop=self._loop)
self._putters.append((item, waiter))
yield from waiter
else:
self._put(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self._put(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
raise QueueFull
else:
self._put(item)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# When a getter runs and frees up a slot so this putter can
# run, we need to defer the put for a tick to ensure that
# getters and putters alternate perfectly. See
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
return self._get()
elif self.qsize():
return self._get()
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
return (yield from waiter)
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self._put(item)
# Wake putter on next tick.
# getter cannot be cancelled, we just removed done putters
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
class JoinableQueue(Queue):
"""A subclass of Queue with task_done() and join() methods."""
def __init__(self, maxsize=0, *, loop=None):
super().__init__(maxsize=maxsize, loop=loop)
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
def _format(self):
result = Queue._format(self)
if self._unfinished_tasks:
result += ' tasks={}'.format(self._unfinished_tasks)
return result
def _put(self, item):
super()._put(item)
self._unfinished_tasks += 1
self._finished.clear()
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
yield from self._finished.wait()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
"""
Base implementation of the Page Object pattern.
See https://github.com/SeleniumHQ/selenium/wiki/PageObjects
and http://www.seleniumhq.org/docs/06_test_design_considerations.jsp#page-object-design-pattern
"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from functools import wraps
from contextlib import contextmanager
import logging
import os
import socket
import re
from textwrap import dedent
from urllib import parse
from lazy import lazy
from selenium.common.exceptions import WebDriverException
from .query import BrowserQuery, no_error
from .promise import Promise, EmptyPromise, BrokenPromise
from .a11y import AxeCoreAudit, AxsAudit
LOGGER = logging.getLogger(__name__)
# String that can be used to test for XSS vulnerabilities.
# Taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XSS_Locator.
XSS_INJECTION = "'';!--\"<XSS>=&{()}"
# When the injected string appears within an attribute (for instance, value of an input tag,
# or alt of an img tag), if it is properly escaped this is the format we will see from
# document.documentElement.innerHTML. To avoid false positives, we need to allow this
# specific string, which hopefully is unique/odd enough that it would never appear accidentally.
EXPECTED_ATTRIBUTE_FORMAT = re.compile(r'\'\';!--"<xss>=&{\(\)}')
XSS_HTML = "<xss"
class WrongPageError(WebDriverException):
"""
The page object reports that we're on the wrong page!
"""
class PageLoadError(WebDriverException):
"""
An error occurred while loading the page.
"""
class XSSExposureError(Exception):
"""
An XSS issue has been found on the current page.
"""
def no_selenium_errors(func):
"""
Decorator to create an `EmptyPromise` check function that is satisfied
only when `func` executes without a Selenium error.
This protects against many common test failures due to timing issues.
For example, accessing an element after it has been modified by JavaScript
ordinarily results in a `StaleElementException`. Methods decorated
with `no_selenium_errors` will simply retry if that happens, which makes tests
more robust.
Args:
func (callable): The function to execute, with retries if an error occurs.
Returns:
Decorated function
"""
def _inner(*args, **kwargs):
try:
return_val = func(*args, **kwargs)
except WebDriverException:
LOGGER.warning('Exception ignored during retry loop:', exc_info=True)
return False
else:
return return_val
return _inner
def unguarded(method):
"""
Mark a PageObject method as unguarded.
Unguarded methods don't verify that the PageObject is
on the current browser page before they execute
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
method._unguarded = True # pylint: disable=protected-access
return method
def pre_verify(method):
"""
Decorator that calls self._verify_page() before executing the decorated method
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
self._verify_page() # pylint: disable=protected-access
return method(self, *args, **kwargs)
return wrapper
class _PageObjectMetaclass(ABCMeta):
"""
Decorates any callable attributes of the class
so that they call self._verify_page() before executing.
Excludes any methods marked as unguarded with the @unguarded
decorator, any methods starting with _, or in the list ALWAYS_UNGUARDED.
"""
ALWAYS_UNGUARDED = ['url', 'is_browser_on_page']
def __new__(mcs, cls_name, cls_bases, cls_attrs, **kwargs):
for name, attr in list(cls_attrs.items()):
# Skip methods marked as unguarded
if getattr(attr, '_unguarded', False) or name in mcs.ALWAYS_UNGUARDED:
continue
# Skip private methods
if name.startswith('_'):
continue
# Skip class attributes that are classes themselves
if isinstance(attr, type):
continue
is_property = isinstance(attr, property)
# Skip non-callable attributes
if not (callable(attr) or is_property):
continue
if is_property:
# For properties, wrap each of the sub-methods separately
property_methods = defaultdict(None)
for fn_name in ('fdel', 'fset', 'fget'):
prop_fn = getattr(cls_attrs[name], fn_name, None)
if prop_fn is not None:
# Check for unguarded properties
if getattr(prop_fn, '_unguarded', False):
property_methods[fn_name] = prop_fn
else:
property_methods[fn_name] = pre_verify(prop_fn)
cls_attrs[name] = property(**property_methods)
else:
cls_attrs[name] = pre_verify(attr)
return super().__new__(mcs, cls_name, cls_bases, cls_attrs)
class PageObject(metaclass=_PageObjectMetaclass):
"""
Encapsulates user interactions with a specific part
of a web application.
The most important thing is this:
Page objects encapsulate Selenium.
If you find yourself writing CSS selectors in tests,
manipulating forms, or otherwise interacting directly
with the web UI, stop!
Instead, put these in a :class:`PageObject` subclass :)
PageObjects do their best to verify that they are only
used when the browser is on a page containing the object.
To do this, they will call :meth:`is_browser_on_page` before executing
any of their methods, and raise a :class:`WrongPageError` if the
browser isn't on the correct page.
Generally, this is the right behavior. However, at times it
will be useful to not verify the page before executing a method.
In those cases, the method can be marked with the :func:`unguarded`
decorator. Additionally, private methods (those beginning with `_`)
are always unguarded.
Class or instance properties are never guarded. However, methods
marked with the :func:`property` are candidates for being guarded.
To make them unguarded, you must mark the getter, setter, and deleter
as :func:`unguarded` separately, and those decorators must be applied before
the :func:`property` decorator.
Correct::
@property
@unguarded
def foo(self):
return self._foo
Incorrect::
@unguarded
@property
def foo(self):
return self._foo
"""
def __init__(self, browser, *args, **kwargs):
"""
Initialize the page object to use the specified browser instance.
Args:
browser (selenium.webdriver): The Selenium-controlled browser.
Returns:
PageObject
"""
super().__init__(*args, **kwargs)
self.browser = browser
a11y_flag = os.environ.get('VERIFY_ACCESSIBILITY', 'False')
self.verify_accessibility = a11y_flag.lower() == 'true'
xss_flag = os.environ.get('VERIFY_XSS', 'False')
self.verify_xss = xss_flag.lower() == 'true'
@lazy
def a11y_audit(self):
"""
Initializes the a11y_audit attribute.
"""
rulesets = {
"axe_core": AxeCoreAudit,
"google_axs": AxsAudit,
}
ruleset = rulesets[
os.environ.get("BOKCHOY_A11Y_RULESET", 'axe_core')]
return ruleset(self.browser, self.url)
@abstractmethod
def is_browser_on_page(self):
"""
Check that we are on the right page in the browser.
The specific check will vary from page to page,
but usually this amounts to checking the:
1) browser URL
2) page title
3) page headings
Returns:
A `bool` indicating whether the browser is on the correct page.
"""
return False
@property
@abstractmethod
def url(self):
"""
Return the URL of the page. This may be dynamic,
determined by configuration options passed to the
page object's constructor.
Some pages may not be directly accessible:
perhaps the page object represents a "navigation"
component that occurs on multiple pages.
If this is the case, subclasses can return `None`
to indicate that you can't directly visit the page object.
"""
return None
@unguarded
def warning(self, msg):
"""
Subclasses call this to indicate that something unexpected
occurred while interacting with the page.
Page objects themselves should never make assertions or
raise exceptions, but they can issue warnings to make
tests easier to debug.
Args:
msg (str): The message to log as a warning.
Returns:
None
"""
log = logging.getLogger(self.__class__.__name__)
log.warning(msg)
@unguarded
def visit(self):
"""
Open the page containing this page object in the browser.
Some page objects may not provide a URL, in which case
a `NotImplementedError` will be raised.
Raises:
PageLoadError: The page did not load successfully.
NotImplementedError: The page object does not provide a URL to visit.
Returns:
PageObject
"""
if self.url is None:
raise NotImplementedError(f"Page {self} does not provide a URL to visit.")
# Validate the URL
if not self.validate_url(self.url):
raise PageLoadError(f"Invalid URL: '{self.url}'")
# Visit the URL
try:
self.browser.get(self.url)
except (WebDriverException, socket.gaierror) as err:
LOGGER.warning("Unexpected page load exception:", exc_info=True)
raise PageLoadError("Could not load page '{!r}' at URL '{}'".format(
self, self.url
)) from err
# Give the browser enough time to get to the page, then return the page object
# so that the caller can chain the call with an action:
# Example: FooPage.visit().do_something()
#
# A BrokenPromise will be raised if the page object's is_browser_on_page method
# does not return True before timing out.
try:
return self.wait_for_page()
except BrokenPromise as err:
raise PageLoadError("Timed out waiting to load page '{!r}' at URL '{}'".format(
self, self.url
)) from err
@classmethod
@unguarded
def validate_url(cls, url):
"""
Return a boolean indicating whether the URL has a protocol and hostname.
If a port is specified, ensure it is an integer.
Arguments:
url (str): The URL to check.
Returns:
Boolean indicating whether the URL has a protocol and hostname.
"""
result = parse.urlsplit(url)
# Check that we have a protocol and hostname
if not result.scheme:
LOGGER.warning("%s is missing a protocol", url)
return False
if not result.netloc:
LOGGER.warning("%s is missing a hostname", url)
return False
# Check that the port is an integer
try:
if result.port is not None:
int(result.port)
elif result.netloc.endswith(':'):
# Valid URLs do not end with colons.
LOGGER.warning("%s has a colon after the hostname but no port", url)
return False
except ValueError:
LOGGER.warning("%s uses an invalid port", url)
return False
else:
return True
def _verify_page(self):
"""
Ask the page object if we're on the right page;
if not, raise a `WrongPageError`.
"""
if not self.is_browser_on_page():
msg = "Not on the correct page to use '{!r}' at URL '{}'".format(
self, self.url
)
raise WrongPageError(msg)
def _verify_xss_exposure(self):
"""
Verify that there are no obvious XSS exposures on the page (based on test authors
including XSS_INJECTION in content rendered on the page).
If an xss issue is found, raise a 'XSSExposureError'.
"""
# Use innerHTML to get dynamically injected HTML as well as server-side HTML.
html_source = self.browser.execute_script(
"return document.documentElement.innerHTML.toLowerCase()"
)
# Check taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XSS_Locator.
all_hits_count = html_source.count(XSS_HTML)
if all_hits_count > 0:
safe_hits_count = len(EXPECTED_ATTRIBUTE_FORMAT.findall(html_source))
if all_hits_count > safe_hits_count:
potential_hits = re.findall('<[^<]+<xss', html_source)
raise XSSExposureError(
"{} XSS issue(s) found on page. Potential places are {}".format(
all_hits_count - safe_hits_count, potential_hits
)
)
@unguarded
def wait_for_page(self, timeout=30):
"""
Block until the page loads, then returns the page.
Useful for ensuring that we navigate successfully to a particular page.
Keyword Args:
timeout (int): The number of seconds to wait for the page before timing out with an exception.
Raises:
BrokenPromise: The timeout is exceeded without the page loading successfully.
"""
def _is_document_interactive():
"""
Check the loading state of the document to ensure the document is in interactive mode
"""
return self.browser.execute_script(
"return document.readyState=='interactive'")
def _is_document_ready():
"""
Check the loading state of the document to ensure the document and all sub-resources
have finished loading (the document load event has been fired.)
"""
return self.browser.execute_script(
"return document.readyState=='complete'")
try:
# Wait for page to load completely i.e. for document.readyState to become complete
EmptyPromise(
_is_document_ready,
"The document and all sub-resources have finished loading.",
timeout=timeout
).fulfill()
except BrokenPromise:
# pylint: disable=logging-format-interpolation
LOGGER.warning(
'document.readyState does not become complete for following url: {}'.format(self.url),
exc_info=True
)
# If document.readyState does not become complete after a specific time relax the
# condition and check for interactive state
EmptyPromise(
_is_document_interactive,
"The document is in interactive mode.",
timeout=timeout
).fulfill()
result = Promise(
lambda: (self.is_browser_on_page(), self), f"loaded page {self!r}",
timeout=timeout
).fulfill()
if self.verify_accessibility:
self.a11y_audit.check_for_accessibility_errors()
return result
@unguarded
def q(self, **kwargs): # pylint: disable=invalid-name
"""
Construct a query on the browser.
Example usages:
.. code:: python
self.q(css="div.foo").first.click()
self.q(xpath="/foo/bar").text
Keyword Args:
css: A CSS selector.
xpath: An XPath selector.
Returns:
BrowserQuery
"""
if self.verify_xss:
self._verify_xss_exposure()
return BrowserQuery(self.browser, **kwargs)
@contextmanager
def handle_alert(self, confirm=True):
"""
Context manager that ensures alerts are dismissed.
Example usage:
.. code:: python
with self.handle_alert():
self.q(css='input.submit-button').first.click()
Keyword Args:
confirm (bool): Whether to confirm or cancel the alert.
Returns:
None
"""
# Before executing the `with` block, stub the confirm/alert functions
script = dedent("""
window.confirm = function() {{ return {0}; }};
window.alert = function() {{ return; }};
""".format("true" if confirm else "false")).strip()
self.browser.execute_script(script)
# Execute the `with` block
yield
@unguarded
def wait_for_ajax(self, timeout=30):
"""
Wait for jQuery to be loaded and for all ajax requests to finish. Note
that we have to wait for jQuery to load first because it is used to
check that ajax requests are complete.
Important: If you have an ajax requests that results in a page reload,
you will need to use wait_for_page or some other method to confirm that
the page has finished reloading after wait_for_ajax has returned.
Example usage:
.. code:: python
self.q(css='input#email').fill("foo")
self.wait_for_ajax()
Keyword Args:
timeout (int): The number of seconds to wait before timing out with
a BrokenPromise exception.
Returns:
None
Raises:
BrokenPromise: The timeout is exceeded before (1) jQuery is defined
and (2) all ajax requests are completed.
"""
def _is_ajax_finished():
"""
Check if all the ajax calls on the current page have completed.
"""
# Wait for jQuery to be defined first, so that jQuery.active
# doesn't raise an error that 'jQuery is not defined'. We have
# seen this as a flaky pattern possibly related to pages reloading
# while wait_for_ajax is being called.
return self.browser.execute_script(
"return typeof(jQuery)!='undefined' && jQuery.active==0")
EmptyPromise(
_is_ajax_finished,
"Finished waiting for ajax requests.",
timeout=timeout
).fulfill()
@unguarded
def wait_for(self, promise_check_func, description, result=False, timeout=60):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise.
Retries if a WebDriverException is encountered (until the timeout is reached).
Arguments:
promise_check_func (callable):
* If `result` is False Then
Function that accepts no arguments and returns a boolean indicating whether the promise is fulfilled
* If `result` is True Then
Function that accepts no arguments and returns a `(is_satisfied, result)` tuple,
where `is_satisfied` is a boolean indicating whether the promise was satisfied, and `result`
is a value to return from the fulfilled `Promise`
description (str): Description of the Promise, used in log messages
result (bool): Indicates whether we need result
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
Raises:
BrokenPromise: the `Promise` was not satisfied
"""
if result:
return Promise(no_error(promise_check_func), description, timeout=timeout).fulfill()
return EmptyPromise(no_selenium_errors(promise_check_func), description, timeout=timeout).fulfill()
@unguarded
def wait_for_element_presence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` to be present in DOM.
Example usage:
.. code:: python
self.wait_for_element_presence('.submit', 'Submit Button is Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_absence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from DOM.
Example usage:
.. code:: python
self.wait_for_element_absence('.submit', 'Submit Button is not Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: not self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_visibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it is displayed on web page.
Example usage:
.. code:: python
self.wait_for_element_visibility('.submit', 'Submit Button is Visible')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).visible, description=description, timeout=timeout)
@unguarded
def wait_for_element_invisibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from the web page.
Example usage:
.. code:: python
self.wait_for_element_invisibility('.submit', 'Submit Button Disappeared')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).invisible, description=description, timeout=timeout)
@unguarded
def scroll_to_element(self, element_selector, timeout=60):
"""
Scrolls the browser such that the element specified appears at the top. Before scrolling, waits for
the element to be present.
Example usage:
.. code:: python
self.scroll_to_element('.far-down', 'Scroll to far-down')
Arguments:
element_selector (str): css selector of the element.
timeout (float): Maximum number of seconds to wait for the element to be present on the
page before timing out.
Raises: BrokenPromise if the element does not exist (and therefore scrolling to it is not possible)
"""
# Ensure element exists
msg = f"Element '{element_selector}' is present"
self.wait_for(lambda: self.q(css=element_selector).present, msg, timeout=timeout)
# Obtain coordinates and use those for JavaScript call
loc = self.q(css=element_selector).first.results[0].location
self.browser.execute_script("window.scrollTo({x},{y})".format(x=loc['x'], y=loc['y']))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from eventlet import greenthread
from lxml import etree
from oslo_log import log as logging
from oslo_utils import importutils
from nova.cloudpipe import pipelib
import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
"""This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, host, **kwargs):
"""Create an NWFilter firewall driver
:param host: nova.virt.libvirt.host.Host instance
:param kwargs: currently unused
"""
global libvirt
if libvirt is None:
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warning(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
self._host = host
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
def nova_no_nd_reflection_filter(self):
"""This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
uuid = self._get_filter_uuid('nova-no-nd-reflection')
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<uuid>%s</uuid>
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>''' % uuid
def nova_dhcp_filter(self):
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway.
"""
uuid = self._get_filter_uuid('nova-allow-dhcp-server')
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>%s</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>''' % uuid
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
instance=instance)
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
LOG.info(_LI('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
dhcp_base_filter = self.get_base_filter_list(instance, True)
for vif in network_info:
_base_filter = nodhcp_base_filter
for subnet in vif['network']['subnets']:
if subnet.get_meta('dhcp_server'):
_base_filter = dhcp_base_filter
break
self._define_filter(self._get_instance_filter_xml(instance,
_base_filter,
vif))
def _get_instance_filter_parameters(self, vif):
parameters = []
def format_parameter(parameter, value):
return ("<parameter name='%s' value='%s'/>" % (parameter, value))
network = vif['network']
if not vif['network'] or not vif['network']['subnets']:
return parameters
v4_subnets = [s for s in network['subnets'] if s['version'] == 4]
v6_subnets = [s for s in network['subnets'] if s['version'] == 6]
for subnet in v4_subnets:
for ip in subnet['ips']:
parameters.append(format_parameter('IP', ip['address']))
dhcp_server = subnet.get_meta('dhcp_server')
if dhcp_server:
parameters.append(format_parameter('DHCPSERVER', dhcp_server))
if CONF.use_ipv6:
for subnet in v6_subnets:
gateway = subnet.get('gateway')
if gateway:
ra_server = gateway['address'] + "/128"
parameters.append(format_parameter('RASERVER', ra_server))
if CONF.allow_same_net_traffic:
for subnet in v4_subnets:
ipv4_cidr = subnet['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
parameters.append(format_parameter('PROJNET', net))
parameters.append(format_parameter('PROJMASK', mask))
if CONF.use_ipv6:
for subnet in v6_subnets:
ipv6_cidr = subnet['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
parameters.append(format_parameter('PROJNET6', net))
parameters.append(format_parameter('PROJMASK6', prefix))
return parameters
def _get_instance_filter_xml(self, instance, filters, vif):
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
parameters = self._get_instance_filter_parameters(vif)
uuid = self._get_filter_uuid(instance_filter_name)
xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
xml += '<uuid>%s</uuid>' % uuid
for f in filters:
xml += '''<filterref filter='%s'>''' % f
xml += ''.join(parameters)
xml += '</filterref>'
xml += '</filter>'
return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""Obtain a list of base filters to apply to an instance.
The return value should be a list of strings, each
specifying a filter name. Subclasses can override this
function to add additional filters as needed. Additional
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance.image_ref):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
self._define_filter(self.nova_no_nd_reflection_filter())
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter())
self.static_filters_configured = True
def _filter_container(self, name, filters):
uuid = self._get_filter_uuid(name)
xml = '''<filter name='%s' chain='root'>
<uuid>%s</uuid>
%s
</filter>''' % (name, uuid,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _get_filter_uuid(self, name):
try:
flt = self._conn.nwfilterLookupByName(name)
xml = flt.XMLDesc(0)
doc = etree.fromstring(xml)
u = doc.find("./uuid").text
except Exception as e:
LOG.debug(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
{'name': name, 'e': e})
u = uuid.uuid4().hex
LOG.debug("UUID for filter '%s' is '%s'", name, u)
return u
def _define_filter(self, xml):
if callable(xml):
xml = xml()
self._conn.nwfilterDefineXML(xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
break
except libvirt.libvirtError as e:
if cnt == max_retry - 1:
raise
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in use
# (ie. when the instance has not terminated properly)
LOG.info(_LI('Failed to undefine network filter '
'%(name)s. Try %(cnt)d of '
'%(max_retry)d.'),
{'name': instance_filter_name,
'cnt': cnt + 1,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
else:
LOG.debug('The nwfilter(%s) is not found.',
instance_filter_name, instance=instance)
break
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance.name)
return 'nova-instance-%s-%s' % (instance.name, nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, execute=None, **kwargs):
"""Create an IP tables firewall driver instance
:param execute: unused, pass None
:param kwargs: extra arguments
The @kwargs parameter must contain a key 'host' that
maps to an instance of the nova.virt.libvirt.host.Host
class.
"""
super(IptablesFirewallDriver, self).__init__(**kwargs)
self.nwfilter = NWFilterFirewall(kwargs['host'])
def setup_basic_filtering(self, instance, network_info):
"""Set up basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
| |
# Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import traceback
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'admin_actions:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
# TODO(bcwaldon): These action names should be prefixed with 'os-'
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
try:
server = self.compute_api.get(ctxt, id)
self.compute_api.pause(ctxt, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
try:
server = self.compute_api.get(ctxt, id)
self.compute_api.unpause(ctxt, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
try:
server = self.compute_api.get(context, id)
self.compute_api.suspend(context, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
try:
server = self.compute_api.get(context, id)
self.compute_api.resume(context, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
try:
instance = self.compute_api.get(context, id)
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate')
except Exception, e:
LOG.exception(_("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
try:
instance = self.compute_api.get(context, id)
self.compute_api.reset_network(context, instance)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
try:
instance = self.compute_api.get(context, id)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Permit admins to lock a server."""
context = req.environ['nova.context']
authorize(context, 'lock')
try:
instance = self.compute_api.get(context, id)
self.compute_api.lock(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Permit admins to lock a server."""
context = req.environ['nova.context']
authorize(context, 'unlock')
try:
instance = self.compute_api.get(context, id)
self.compute_api.unlock(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('createBackup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'createBackup')
try:
entity = body["createBackup"]
except (KeyError, TypeError):
raise exc.HTTPBadRequest(_("Malformed request body"))
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("createBackup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed createBackup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = int(rotation)
except ValueError:
msg = _("createBackup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
if rotation < 0:
msg = _("createBackup attribute 'rotation' must be greater "
"than or equal to zero")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound(_("Instance not found"))
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup')
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = os.path.join(req.application_url, 'images', image_id)
resp.headers['Location'] = image_ref
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
try:
block_migration = body["os-migrateLive"]["block_migration"]
disk_over_commit = body["os-migrateLive"]["disk_over_commit"]
host = body["os-migrateLive"]["host"]
except (TypeError, KeyError):
msg = _("host and block_migration must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except Exception:
msg = _("Live migration of instance %(id)s to host %(host)s"
" failed") % locals()
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('os-resetState')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'resetState')
# Identify the desired state from the body
try:
state = state_map[body["os-resetState"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.update_state(context, instance, state)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
"""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1"
updated = "2011-09-20T00:00:00+00:00"
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| |
#
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for producing IPMI sample messages from notification events.
"""
import mock
from oslotest import base
from ceilometer.ipmi.notifications import ironic as ipmi
from ceilometer import sample
from ceilometer.tests.ipmi.notifications import ipmi_test_data
class TestNotifications(base.BaseTestCase):
def test_ipmi_temperature_notification(self):
"""Test IPMI Temperature sensor data.
Based on the above ipmi_testdata the expected sample for a single
temperature reading has::
* a resource_id composed from the node_uuid Sensor ID
* a name composed from 'hardware.ipmi.' and 'temperature'
* a volume from the first chunk of the Sensor Reading
* a unit from the last chunk of the Sensor Reading
* some readings are skipped if the value is 'Disabled'
* metatata with the node id
"""
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(10, len(counters),
'expected 10 temperature readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)'
)
test_counter = counters[resource_id]
self.assertEqual(26.0, test_counter.volume)
self.assertEqual('C', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.temperature', test_counter.name)
self.assertEqual('hardware.ipmi.metrics.update',
test_counter.resource_metadata['event_type'])
self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad',
test_counter.resource_metadata['node'])
def test_ipmi_current_notification(self):
"""Test IPMI Current sensor data.
A single current reading is effectively the same as temperature,
modulo "current".
"""
processor = ipmi.CurrentSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(1, len(counters), 'expected 1 current reading')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)'
)
test_counter = counters[resource_id]
self.assertEqual(130.0, test_counter.volume)
self.assertEqual('W', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.current', test_counter.name)
def test_ipmi_fan_notification(self):
"""Test IPMI Fan sensor data.
A single fan reading is effectively the same as temperature,
modulo "fan".
"""
processor = ipmi.FanSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(12, len(counters), 'expected 12 fan readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)'
)
test_counter = counters[resource_id]
self.assertEqual(6900.0, test_counter.volume)
self.assertEqual('RPM', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.fan', test_counter.name)
def test_ipmi_voltage_notification(self):
"""Test IPMI Voltage sensor data.
A single voltage reading is effectively the same as temperature,
modulo "voltage".
"""
processor = ipmi.VoltageSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(4, len(counters), 'expected 4 volate readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)'
)
test_counter = counters[resource_id]
self.assertEqual(3.137, test_counter.volume)
self.assertEqual('V', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.voltage', test_counter.name)
def test_disabed_skips_metric(self):
"""Test that a meter which a disabled volume is skipped."""
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(10, len(counters),
'expected 10 temperature readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)'
)
self.assertNotIn(resource_id, counters)
def test_empty_payload_no_metrics_success(self):
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.EMPTY_PAYLOAD)])
self.assertEqual(0, len(counters), 'expected 0 readings')
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_sensor_data(self, mylog):
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.MISSING_SENSOR))
self.assertEqual(
'invalid sensor data for '
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): '
"missing 'Sensor Reading' in payload",
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_sensor_data_malformed(self, mylog):
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.BAD_SENSOR))
self.assertEqual(
'invalid sensor data for '
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): '
'unable to parse sensor reading: some bad stuff',
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_node_uuid(self, mylog):
"""Test for desired error message when 'node_uuid' missing.
Presumably this will never happen given the way the data
is created, but better defensive than dead.
"""
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.NO_NODE_ID))
self.assertEqual(
'invalid sensor data for missing id: missing key in payload: '
"'node_uuid'",
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_sensor_id(self, mylog):
"""Test for desired error message when 'Sensor ID' missing."""
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID))
self.assertEqual(
'invalid sensor data for missing id: missing key in payload: '
"'Sensor ID'",
messages[0]
)
| |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
from os import path
import shutil
import sys
from nototools import unicode_data
"""Create aliases in target directory.
In addition to links/copies named with aliased sequences, this can also
create canonically named aliases/copies, if requested."""
DATA_ROOT = path.dirname(path.abspath(__file__))
def str_to_seq(seq_str):
res = [int(s, 16) for s in seq_str.split('_')]
if 0xfe0f in res:
print('0xfe0f in file name: %s' % seq_str)
res = [x for x in res if x != 0xfe0f]
return tuple(res)
def seq_to_str(seq):
return '_'.join('%04x' % cp for cp in seq)
def read_default_unknown_flag_aliases():
unknown_flag_path = path.join(DATA_ROOT, 'unknown_flag_aliases.txt')
return read_emoji_aliases(unknown_flag_path)
def read_default_emoji_aliases():
alias_path = path.join(DATA_ROOT, 'emoji_aliases.txt')
return read_emoji_aliases(alias_path)
def read_emoji_aliases(filename):
result = {}
with open(filename, 'r') as f:
for line in f:
ix = line.find('#')
if (ix > -1):
line = line[:ix]
line = line.strip()
if not line:
continue
als, trg = (s.strip() for s in line.split(';'))
try:
als_seq = tuple([int(x, 16) for x in als.split('_')])
trg_seq = tuple([int(x, 16) for x in trg.split('_')])
except:
print('cannot process alias %s -> %s' % (als, trg))
continue
result[als_seq] = trg_seq
return result
def add_aliases(
srcdir, dstdir, aliasfile, prefix, ext, replace=False, copy=False,
canonical_names=False, dry_run=False):
"""Use aliasfile to create aliases of files in srcdir matching prefix/ext in
dstdir. If dstdir is null, use srcdir as dstdir. If replace is false
and a file already exists in dstdir, report and do nothing. If copy is false
create a symlink, else create a copy.
If canonical_names is true, check all source files and generate aliases/copies
using the canonical name if different from the existing name.
If dry_run is true, report what would be done. Dstdir will be created if
necessary, even if dry_run is true."""
if not path.isdir(srcdir):
print('%s is not a directory' % srcdir, file=sys.stderr)
return
if not dstdir:
dstdir = srcdir
elif not path.isdir(dstdir):
os.makedirs(dstdir)
prefix_len = len(prefix)
suffix_len = len(ext) + 1
filenames = [path.basename(f)
for f in glob.glob(path.join(srcdir, '%s*.%s' % (prefix, ext)))]
seq_to_file = {
str_to_seq(name[prefix_len:-suffix_len]) : name
for name in filenames}
aliases = read_emoji_aliases(aliasfile)
aliases_to_create = {}
aliases_to_replace = []
alias_exists = False
def check_alias_seq(seq):
alias_str = seq_to_str(seq)
alias_name = '%s%s.%s' % (prefix, alias_str, ext)
alias_path = path.join(dstdir, alias_name)
if path.exists(alias_path):
if replace:
aliases_to_replace.append(alias_name)
else:
print('alias %s exists' % alias_str, file=sys.stderr)
alias_exists = True
return None
return alias_name
canonical_to_file = {}
for als, trg in sorted(aliases.items()):
if trg not in seq_to_file:
print('target %s for %s does not exist' % (
seq_to_str(trg), seq_to_str(als)), file=sys.stderr)
continue
alias_name = check_alias_seq(als)
if alias_name:
target_file = seq_to_file[trg]
aliases_to_create[alias_name] = target_file
if canonical_names:
canonical_seq = unicode_data.get_canonical_emoji_sequence(als)
if canonical_seq and canonical_seq != als:
canonical_alias_name = check_alias_seq(canonical_seq)
if canonical_alias_name:
canonical_to_file[canonical_alias_name] = target_file
if canonical_names:
print('adding %d canonical aliases' % len(canonical_to_file))
for seq, f in seq_to_file.iteritems():
canonical_seq = unicode_data.get_canonical_emoji_sequence(seq)
if canonical_seq and canonical_seq != seq:
alias_name = check_alias_seq(canonical_seq)
if alias_name:
canonical_to_file[alias_name] = f
print('adding %d total canonical sequences' % len(canonical_to_file))
aliases_to_create.update(canonical_to_file)
if replace:
if not dry_run:
for k in sorted(aliases_to_replace):
os.remove(path.join(dstdir, k))
print('replacing %d files' % len(aliases_to_replace))
elif alias_exists:
print('aborting, aliases exist.', file=sys.stderr)
return
for k, v in sorted(aliases_to_create.items()):
if dry_run:
msg = 'replace ' if k in aliases_to_replace else ''
print('%s%s -> %s' % (msg, k, v))
else:
try:
if copy:
shutil.copy2(path.join(srcdir, v), path.join(dstdir, k))
else:
# fix this to create relative symlinks
if srcdir == dstdir:
os.symlink(v, path.join(dstdir, k))
else:
raise Exception('can\'t create cross-directory symlinks yet')
except Exception as e:
print('failed to create %s -> %s' % (k, v), file=sys.stderr)
raise Exception('oops, ' + str(e))
print('created %d %s' % (
len(aliases_to_create), 'copies' if copy else 'symlinks'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-s', '--srcdir', help='directory containing files to alias',
required=True, metavar='dir')
parser.add_argument(
'-d', '--dstdir', help='directory to write aliases, default srcdir',
metavar='dir')
parser.add_argument(
'-a', '--aliasfile', help='alias file (default emoji_aliases.txt)',
metavar='file', default='emoji_aliases.txt')
parser.add_argument(
'-p', '--prefix', help='file name prefix (default emoji_u)',
metavar='pfx', default='emoji_u')
parser.add_argument(
'-e', '--ext', help='file name extension (default png)',
choices=['ai', 'png', 'svg'], default='png')
parser.add_argument(
'-r', '--replace', help='replace existing files/aliases',
action='store_true')
parser.add_argument(
'-c', '--copy', help='create a copy of the file, not a symlink',
action='store_true')
parser.add_argument(
'--canonical_names', help='include extra copies with canonical names '
'(including fe0f emoji presentation character)', action='store_true');
parser.add_argument(
'-n', '--dry_run', help='print out aliases to create only',
action='store_true')
args = parser.parse_args()
add_aliases(
args.srcdir, args.dstdir, args.aliasfile, args.prefix, args.ext,
args.replace, args.copy, args.canonical_names, args.dry_run)
if __name__ == '__main__':
main()
| |
from lxml import etree
from django.db.models import Model as djangoModel
from django.core.exceptions import ObjectDoesNotExist
class FieldError(ValueError):
pass
class ForeignKeyFieldError(FieldError):
def __init__(self, msg, model, value):
self.model = model
self.value = value
self.msg = msg
super(ForeignKeyFieldError, self).__init__(self.msg)
class FieldValueMissing(FieldError):
def __init__(self, field_name):
super(FieldValueMissing, self).__init__("No value found for field %s" % field_name)
class Field(object):
position = 0
def __init__(self, **kwargs):
if 'row_num' in kwargs:
self.position = kwargs.pop('row_num')
else:
self.position = Field.position
Field.position += 1
if 'match' in kwargs:
self.match = kwargs.pop('match')
if 'transform' in kwargs:
self.transform = kwargs.pop('transform')
if 'validator' in kwargs:
self.validator = kwargs.pop('validator')
if 'multiple' in kwargs:
self.has_multiple = kwargs.pop('multiple')
if 'prepare' in kwargs:
self.prepare = kwargs.pop('prepare')
if 'keys' in kwargs and isinstance(self, ComposedKeyField):
self.keys = kwargs.pop('keys')
if len(kwargs) > 0:
raise ValueError("Arguments %s unexpected" % kwargs.keys())
def get_prep_value(self, value):
try:
if hasattr(self, "prepare"):
value = self.prepare(value)
value = self.to_python(value)
if hasattr(self, "transform"):
value = self.transform(value)
if hasattr(self, "validator"):
validator = self.validator()
if not validator.validate(value):
raise FieldError(validator.__class__.validation_message)
return value
except FieldError, e:
raise e
except ValueError, e:
raise ValueError("Value \'%s\' in columns %d does not match the expected type %s" %
(value, self.position + 1, self.__class__.field_name))
class IntegerField(Field):
field_name = "Integer"
def to_python(self, value):
return int(value)
class BooleanField(Field):
field_name = "Boolean"
def default_is_true_method(self, value):
return value.lower() == "true"
def __init__(self, *args, **kwargs):
if 'is_true' in kwargs:
self.is_true_method = kwargs.pop('is_true')
else:
self.is_true_method = self.default_is_true_method
super(BooleanField, self).__init__(*args, **kwargs)
def to_python(self, value):
return self.is_true_method(value)
class CharField(Field):
field_name = "String"
def to_python(self, value):
return value
class FloatField(Field):
field_name = "A float number"
def to_python(self, value):
return float(value)
class IgnoredField(Field):
field_name = "Ignore the value"
class ForeignKey(Field):
field_name = "not defined"
def __init__(self, *args, **kwargs):
self.pk = kwargs.pop('pk', 'pk')
if len(args) < 1:
raise ValueError("You should provide a Model as the first argument.")
self.model = args[0]
try:
if not issubclass(self.model, djangoModel):
raise TypeError("The first argument should be a django model class.")
except TypeError, e:
raise TypeError("The first argument should be a django model class.")
super(ForeignKey, self).__init__(**kwargs)
def to_python(self, value):
try:
return self.model.objects.get(**{self.pk: value})
except ObjectDoesNotExist, e:
raise ForeignKeyFieldError("No match found for %s" % self.model.__name__, self.model.__name__, value)
class ComposedKeyField(ForeignKey):
def to_python(self, value):
try:
return self.model.objects.get(**value)
except ObjectDoesNotExist, e:
raise ForeignKeyFieldError("No match found for %s" % self.model.__name__, self.model.__name__, value)
class XMLField(Field):
type_field_class = None
def __init__(self, *args, **kwargs):
self.path = kwargs.pop("path")
self.root = kwargs.pop("root", None)
self.null = kwargs.pop("null", False)
self.default = kwargs.pop("default", None)
if self.default and not self.null:
raise FieldError("You cannot provide a default without setting the field as nullable")
self.type_class = self._get_type_field()
if self.type_class:
self.type_class.__init__(self, *args, **kwargs)
def _get_type_field(self):
base_classes = self.__class__.__bases__
for base_class in base_classes:
if issubclass(base_class, Field) and not issubclass(base_class, XMLField):
return base_class
def get_prep_value(self, value):
element = self.root or etree.fromstring(value)
values = element.xpath(self.path)
if not values and self.null:
if self.default is not None:
parsed_value = self.default
else:
return None
else:
parsed_value = element.xpath(self.path)[0].text
return self.type_class.get_prep_value(self, parsed_value)
# def to_python(self, value):
# element = self.root or etree.fromstring(value)
# values = element.xpath(self.path)
# if not values and self.null:
# if self.default is not None:
# parsed_value = self.default
# else:
# return None
# else:
# parsed_value = element.xpath(self.path)[0].text
# return self._get_type_field().to_python(self, parsed_value)
def set_root(self, root):
self.root = root
class XMLRootField(XMLField):
def __init__(self, *args, **kwargs):
super(XMLRootField, self).__init__(*args, **kwargs)
kwargs['root'] = self
def get_prep_value(self, value):
pass
def to_python(self, value):
pass
def get_root(self, value):
element = self.root or etree.fromstring(value)
return element.xpath(self.path)
class XMLEmbed(XMLRootField):
def __init__(self, embed_model):
self.embed_model = embed_model
super(XMLEmbed, self).__init__(path=self.embed_model.get_root_field()[1].path)
def get_prep_value(self, value):
roots = self.get_root(self.root)
objects = []
for root in roots:
objects.append(self.embed_model(value, element=root))
return objects
class XMLCharField(XMLField, CharField):
pass
class XMLIntegerField(XMLField, IntegerField):
pass
class XMLFloatField(XMLField, FloatField):
pass
class XMLForeignKey(XMLField, ForeignKey):
def __init__(self, *args, **kwargs):
self.nomatch = kwargs.pop("nomatch", False)
super(XMLForeignKey, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
try:
return super(XMLForeignKey, self).get_prep_value(value)
except ForeignKeyFieldError, e:
if self.nomatch:
return None
else:
raise e
class XMLBooleanField(XMLField, BooleanField):
pass
| |
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].remove()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.remove()
self.root.remove()
for marker in self.jointMarkers:
marker.remove()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
self.contactgroup.empty()
self.world.destroy()
self.space.destroy()
self.world = None
self.space = None
return
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in range(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def simulate(self):
self.colCount = self.space.autoCollide()
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in range(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in [2, 4]:
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in range(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in range(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b.bam')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in range(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def attachMarker(self, body):
if self.canRender:
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.25)
testMarker.setPos(0.0, 0.0, -100.0)
self.odePandaRelationList.append((testMarker, body))
| |
import collections
assignments = []
rows = 'ABCDEFGHI'
cols = '123456789'
def cross(A, B):
"Cross product of elements in A and elements in B."
return [s + t for s in A for t in B]
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
# Account for Diagonal Sudoku
diag_units = [[row_temp[i][i] for i in range(len(row_units))] for row_temp in (row_units, row_units[::-1])]
unitlist = row_units + column_units + square_units + diag_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s], [])) - set([s])) for s in boxes)
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
# Don't waste memory appending actions that don't actually change any values
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# Find all instances of naked twins
# Eliminate the naked twins as possibilities for their peers
for unit in unitlist:
twoValuesBox = [values[u] for u in unit if len(values[u])==2]
# check if list is empty
if not twoValuesBox:
continue
nakedTwins = [val for val, count in collections.Counter(twoValuesBox).items() if count == 2]
# check if list is empty
if not nakedTwins:
continue
for u in unit:
if len(values[u]) > 1:
for val in nakedTwins:
if val == values[u]:
continue
if val[0] in values[u]:
values[u] = values[u].replace(val[0],'')
if val[1] in values[u]:
values[u] = values[u].replace(val[1],'')
return values
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
chars = []
digits = '123456789'
for c in grid:
if c in digits:
chars.append(c)
if c == '.':
chars.append(digits)
assert len(chars) == 81
return dict(zip(boxes, chars))
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1 + max(len(values[s]) for s in boxes)
line = '+'.join(['-' * (width * 3)] * 3)
for r in rows:
print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')
for c in cols))
if r in 'CF':
print(line)
return
def eliminate(values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Args:
A sudoku in dictionary form.
Returns:
The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit, '')
return values
def only_choice(values):
"""
Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box.
Args:
A sudoku in dictionary form.
Returns:
The resulting sudoku in dictionary form.
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
values = grid_values(grid)
searchResult = search(values)
return False if searchResult == False else searchResult
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| |
from lxml.etree import Element, SubElement, tostring
from pyuntl import UNTL_XML_ORDER, HIGHWIRE_ORDER
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
# Namespaces for the DC XML.
DC_NAMESPACES = {
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'dc': 'http://purl.org/dc/elements/1.1/',
'xsi': XSI,
}
class MetadataGeneratorException(Exception):
"""Base exception for Metadata Generation."""
def __init__(self, value):
self.value = value
def __str__(self):
return '%s' % (self.value,)
def py2dict(elements):
"""Convert a Python object into a Python dictionary."""
metadata_dict = {}
# Loop through all elements in the Python object.
for element in elements.children:
# Start an empty element list if an entry for the element
# list hasn't been made in the dictionary.
if element.tag not in metadata_dict:
metadata_dict[element.tag] = []
element_dict = {}
if hasattr(element, 'qualifier') and element.qualifier is not None:
element_dict['qualifier'] = element.qualifier
# Set the element's content as a dictionary
# of children elements.
if element.children:
child_dict = {}
for child in element.children:
if child.content is not None:
child_dict[child.tag] = child.content
element_dict['content'] = child_dict
# Set element content that is not children.
elif element.content is not None:
if element.content.strip() != '':
element_dict['content'] = element.content
# Append the dictionary to the element list
# if the element has content or children.
if element_dict.get('content', False):
metadata_dict[element.tag].append(element_dict)
return metadata_dict
def pydict2xml(filename, metadata_dict, **kwargs):
"""Create an XML file.
Takes a path to where the XML file should be created
and a metadata dictionary.
"""
try:
f = open(filename, 'wb')
f.write(pydict2xmlstring(metadata_dict, **kwargs))
f.close()
except:
raise MetadataGeneratorException(
'Failed to create an XML file. Filename: %s' % filename
)
def pydict2xmlstring(metadata_dict, **kwargs):
"""Create an XML string from a metadata dictionary."""
ordering = kwargs.get('ordering', UNTL_XML_ORDER)
root_label = kwargs.get('root_label', 'metadata')
root_namespace = kwargs.get('root_namespace', None)
elements_namespace = kwargs.get('elements_namespace', None)
namespace_map = kwargs.get('namespace_map', None)
root_attributes = kwargs.get('root_attributes', None)
# Set any root namespace and namespace map.
if root_namespace and namespace_map:
root = Element(root_namespace + root_label, nsmap=namespace_map)
elif namespace_map:
root = Element(root_label, nsmap=namespace_map)
else:
root = Element(root_label)
# Set any root element attributes.
if root_attributes:
for key, value in root_attributes.items():
root.attrib[key] = value
# Create an XML structure from field list.
for metadata_key in ordering:
if metadata_key in metadata_dict:
for element in metadata_dict[metadata_key]:
if 'content' in element and 'qualifier' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'qualifier': element['qualifier']},
namespace=elements_namespace,
)
elif 'content' in element and 'role' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'role': element['role']},
namespace=elements_namespace,
)
elif 'content' in element and 'scheme' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'scheme': element['scheme']},
namespace=elements_namespace,
)
elif 'content' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
namespace=elements_namespace,
)
# Create the XML tree.
return b'<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
encoding='UTF-8',
xml_declaration=False,
pretty_print=True
)
def create_dict_subelement(root, subelement, content, **kwargs):
"""Create a XML subelement from a Python dictionary."""
attribs = kwargs.get('attribs', None)
namespace = kwargs.get('namespace', None)
# Add subelement's namespace and attributes.
if namespace and attribs:
subelement = SubElement(root, namespace + subelement, attribs)
elif namespace:
subelement = SubElement(root, namespace + subelement)
elif attribs:
subelement = SubElement(root, subelement, attribs)
# Otherwise, create SubElement without any extra data.
else:
subelement = SubElement(root, subelement)
if not isinstance(content, dict):
subelement.text = content
else:
for descriptor, value in content.items():
sub_descriptors = SubElement(subelement, descriptor)
sub_descriptors.text = value
def highwiredict2xmlstring(highwire_elements, ordering=HIGHWIRE_ORDER):
"""Create an XML string from the highwire data dictionary."""
# Sort the elements by the ordering list.
highwire_elements.sort(key=lambda obj: ordering.index(obj.name))
root = Element('metadata')
for element in highwire_elements:
attribs = {'name': element.name, 'content': element.content}
SubElement(root, 'meta', attribs)
# Create the XML tree.
return b'<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
encoding='UTF-8',
xml_declaration=False,
pretty_print=True
)
def breakString(text, width=79, firstLineOffset=0):
"""Break up a string into multiple lines.
Lines should each be of length no greater than width.
If externally additional text will be added to the first line,
such as an ANVL key, use firstLineOffset to reduce the allowed
width we have available for the line.
"""
originalWidth = width
# Use firstLineOffset to adjust width allowed for this line.
width = width - firstLineOffset
if len(text) < width + 1:
# string all fits on one line, so return it as is.
return text
index = width
while index > 0:
if ' ' == text[index]:
if not text[index + 1].isspace() and not text[index - 1].isspace():
stringPart1 = text[0:index]
stringPart2 = text[index:]
# Do not pass firstLineOffset.
return stringPart1 + '\n' + breakString(
stringPart2,
originalWidth
)
index = index - 1
# There was insufficient whitespace to break the string in a way that keeps
# all lines under the desired width. Exceed the width.
return text
def writeANVLString(ANVLDict, ordering=UNTL_XML_ORDER):
"""Take a dictionary and write out the key/value pairs
in ANVL format.
"""
lines = []
# Loop through the ordering for the data.
for key in ordering:
# Make sure the element exists in the data set.
if key in ANVLDict:
# Get the list of elements.
element_list = ANVLDict[key]
# Loop through the element contents.
for element in element_list:
value = element.get('content', '')
offset = len(key) + 1
line = '%s: %s' % (key, breakString(value, 79, offset))
lines.append(line)
return '\n'.join(lines)
| |
import http
import xml.etree.ElementTree as ET
import pytest
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from sqlalchemy import text
from tests.conftest import TEST_PROXY_URL, TEST_RADIO_DNS_PORT
from tests.test_station_creation_with_overrides import check_station_override_values
from tests.utilities.utilities import compare_lists, sql_alchemy_result_to_list
MYSQL_STATION_QUERY = "SELECT name, short_name, short_description, genres, radioepg_service, long_name, medium_name," \
"url_default, city, default_language, location_country, phone_number, postal_name, sms_body," \
"sms_description, sms_number, street, zipcode, email_address, email_description" \
" FROM station WHERE id = 3"
STATION_DETAILS_DEFAULT_TR = [
'Short Name snom',
'Medium Name snom',
'Long Name Modified',
'Short Description Modified',
'Default Url Modified url',
'Default Language it',
'Postal Address Modified postal name\nModified street\nModified city, Modified zipcode',
'Country Liechtenstein',
'Phone Number Modified phone number',
'SMS Modified description : Send Modified sms body to Modified sms number',
'E-Mail Modified description : Modified email',
]
STATION_DETAILS_OVERRIDES_TR = [
'Short Name snomo',
'Medium Name snomo',
'Long Name long name overrideModified Override',
'Short Description short description overrideModified Override',
'Default Url https://github.com/ebuModified Override',
'Default Language fr',
'Postal Address postal name overrideModified Override\nstreet overrideModified Override\ncity overrideModified Override, 1110Modified Override',
'Country Azores [Portugal]',
'Phone Number 1111111111Modified Override',
'SMS SMS description overrideModified Override : Send SMS body overrideModified Override to 222222222Modified Override',
'E-Mail Email description overrideModified Override : AlexanderWexler@teleworm.us overrideModified Override'
]
STATION_DATABASE_OVERRIDES_TR = [
'station name overrideModified Override',
'snomo',
'short description overrideModified Override',
'[{"href": "urn:tva:metadata:cs:ContentCS:2011:3.1.1.13", "name": "Weather forecasts"}, {"href": "urn:tva:metadata:cs:ContentCS:2011:3.1.1.16", "name": "Current Affairs"}, {"href": "urn:tva:metadata:cs:ContentCS:2011:3.6.4", "name": "Pop-rock"}]',
None,
'long name overrideModified Override',
'snomo',
'https://github.com/ebuModified Override',
'city overrideModified Override',
'fr',
'pt',
'1111111111Modified Override',
'postal name overrideModified Override',
'SMS body overrideModified Override',
'SMS description overrideModified Override',
'222222222Modified Override',
'street overrideModified Override',
'1110Modified Override',
'AlexanderWexler@teleworm.us overrideModified Override',
'Email description overrideModified Override',
]
@pytest.mark.run(order=7)
def test_station_edition_overrides_no_changes(stack_setup, browser_setup):
db = stack_setup
driver = browser_setup
driver.get(TEST_PROXY_URL + "stations/edit/2")
overrides_tabs = driver.find_elements_by_class_name("nav-item")
assert len(overrides_tabs) == 3
assert "override" not in overrides_tabs[1].get_attribute('class').split()
assert "override" in overrides_tabs[2].get_attribute('class').split()
assert len(driver.find_elements_by_css_selector(".btn.btn-danger.btn-close")) == 1
overrides_tabs[2].find_elements_by_css_selector(".btn.btn-danger.btn-close")
WebDriverWait(driver, 5).until(
lambda x: x.find_element_by_id("location_country_0")
.find_element_by_css_selector("option[value=hk]"))
check_station_override_values(db, driver)
@pytest.mark.run(order=8)
def test_station_edition_with_overrides(stack_setup, browser_setup):
db = stack_setup
driver = browser_setup
driver.get(TEST_PROXY_URL + "stations/edit/2")
# Modify inputs, remove the last genre.
driver.find_element_by_id("station-name_0").send_keys("Modified")
driver.find_element_by_id("short_name_0").clear()
driver.find_element_by_id("short_name_0").send_keys("snom")
driver.find_element_by_id("medium_name_0").clear()
driver.find_element_by_id("medium_name_0").send_keys("snom")
driver.find_element_by_id("long_name_0").clear()
driver.find_element_by_id("long_name_0").send_keys("Modified")
driver.find_element_by_id("short_description_0").clear()
driver.find_element_by_id("short_description_0").send_keys("Modified")
driver.find_element_by_id("default_language_0").find_element_by_css_selector("option[value='it']").click()
driver.find_element_by_name("url_default_0").send_keys("Modified url")
driver.find_element_by_id("postal_name_0").send_keys("Modified postal name")
driver.find_element_by_id("street_0").send_keys("Modified street")
driver.find_element_by_id("zipcode_0").send_keys("Modified zipcode")
driver.find_element_by_id("city_0").send_keys("Modified city")
driver.find_element_by_id("location_country_0").find_element_by_css_selector("option[value='li']").click()
driver.find_element_by_id("phone_number_0").send_keys("Modified phone number")
driver.find_element_by_id("sms_number_0").send_keys("Modified sms number")
driver.find_element_by_id("sms_body_0").send_keys("Modified sms body")
driver.find_element_by_id("sms_description_0").send_keys("Modified description")
driver.find_element_by_id("email_address_0").send_keys("Modified email")
driver.find_element_by_id("email_description_0").send_keys("Modified description")
driver.find_element_by_id("genre_row_template_0-0").find_element_by_css_selector(
"option[value='3.1.1']").click() # News
driver.find_element_by_id("add_gender_button_0").click()
driver.find_element_by_id("genre_row_template_0-1").find_element_by_css_selector(
"option[value='3.1.1.11']").click() # Local/Regional
driver.find_element_by_id("genre_row_template_0-2").find_element_by_css_selector(
".btn.btn-xs.btn-danger").click()
driver.find_element_by_id("nav_tab_1").send_keys(Keys.RETURN)
# Check overrides indicator visibility
assert driver.find_element_by_id("dot_grp_1_name").get_attribute("style") == ""
assert driver.find_element_by_id("dot_grp_1_language").get_attribute("style") == ""
assert driver.find_element_by_id("dot_grp_1_links").get_attribute("style") == ""
assert driver.find_element_by_id("dot_grp_1_address").get_attribute("style") == ""
assert driver.find_element_by_id("dot_grp_1_contact").get_attribute("style") == ""
assert driver.find_element_by_id("dot_grp_1_genres").get_attribute("style") == ""
driver.find_element_by_id("nav_tab_2").send_keys(Keys.RETURN)
# Check overrides indicator visibility
assert driver.find_element_by_id("dot_grp_2_name").get_attribute("style") == "opacity: 1;"
assert driver.find_element_by_id("dot_grp_2_language").get_attribute("style") == "opacity: 1;"
assert driver.find_element_by_id("dot_grp_2_links").get_attribute("style") == "opacity: 1;"
assert driver.find_element_by_id("dot_grp_2_address").get_attribute("style") == "opacity: 1;"
assert driver.find_element_by_id("dot_grp_2_contact").get_attribute("style") == "opacity: 1;"
assert driver.find_element_by_id("dot_grp_2_genres").get_attribute("style") == "opacity: 1;"
# Assert that overridden inputs are not changed even if the defaults one have.
assert driver.find_element_by_id("station-name_2").get_attribute("value") == "station name override"
assert driver.find_element_by_id("short_name_2").get_attribute("value") == "sno"
assert driver.find_element_by_id("medium_name_2").get_attribute("value") == "sno"
assert driver.find_element_by_id("long_name_2").get_attribute("value") == "long name override"
assert driver.find_element_by_id("short_description_2").get_attribute("value") == "short description override"
assert Select(driver.find_element_by_id("default_language_2"))\
.first_selected_option.get_attribute("value") == "fr"
assert driver.find_element_by_name("url_default_2").get_attribute("value") == "https://github.com/ebu"
assert driver.find_element_by_id("postal_name_2").get_attribute("value") == "postal name override"
assert driver.find_element_by_id("street_2").get_attribute("value") == "street override"
assert driver.find_element_by_id("zipcode_2").get_attribute("value") == "1110"
assert driver.find_element_by_id("city_2").get_attribute("value") == "city override"
assert Select(driver.find_element_by_id("location_country_2"))\
.first_selected_option.get_attribute("value") == "pt"
assert driver.find_element_by_id("phone_number_2").get_attribute("value") == "1111111111"
assert driver.find_element_by_id("sms_number_2").get_attribute("value") == "222222222"
assert driver.find_element_by_id("sms_body_2").get_attribute("value") == "SMS body override"
assert driver.find_element_by_id("sms_description_2").get_attribute("value") == "SMS description override"
assert driver.find_element_by_id("email_address_2").get_attribute("value") == "AlexanderWexler@teleworm.us override"
assert driver.find_element_by_id("email_description_2").get_attribute("value") == "Email description override"
assert Select(driver.find_element_by_id("genre_row_template_2-0").find_element_by_tag_name("select"))\
.first_selected_option.get_attribute("value") == "3.1.1.12"
assert Select(driver.find_element_by_id("genre_row_template_2-1").find_element_by_tag_name("select")) \
.first_selected_option.get_attribute("value") == "3.3.5"
assert Select(driver.find_element_by_id("genre_row_template_2-2").find_element_by_tag_name("select")) \
.first_selected_option.get_attribute("value") == "3.6.4.14.2"
# Change override's values
driver.find_element_by_id("station-name_2").send_keys("Modified Override")
driver.find_element_by_id("short_name_2").send_keys("mo")
driver.find_element_by_id("medium_name_2").send_keys("mo")
driver.find_element_by_id("long_name_2").send_keys("Modified Override")
driver.find_element_by_id("short_description_2").send_keys("Modified Override")
driver.find_element_by_id("default_language_2").find_element_by_css_selector("option[value='fr']").click()
driver.find_element_by_name("url_default_2").send_keys("Modified Override")
driver.find_element_by_id("postal_name_2").send_keys("Modified Override")
driver.find_element_by_id("street_2").send_keys("Modified Override")
driver.find_element_by_id("zipcode_2").send_keys("Modified Override")
driver.find_element_by_id("city_2").send_keys("Modified Override")
driver.find_element_by_id("location_country_2").find_element_by_css_selector("option[value='pt']").click()
driver.find_element_by_id("phone_number_2").send_keys("Modified Override")
driver.find_element_by_id("sms_number_2").send_keys("Modified Override")
driver.find_element_by_id("sms_body_2").send_keys("Modified Override")
driver.find_element_by_id("sms_description_2").send_keys("Modified Override")
driver.find_element_by_id("email_address_2").send_keys("Modified Override")
driver.find_element_by_id("email_description_2").send_keys("Modified Override")
driver.find_element_by_id("genre_row_template_2-0").find_element_by_css_selector(
"option[value='3.1.1.13']").click() # Weather forecasts
driver.find_element_by_id("genre_row_template_2-1").find_element_by_css_selector(
"option[value='3.1.1.16']").click() # Current Affairs
driver.find_element_by_id("genre_row_template_2-2").find_element_by_css_selector(
"option[value='3.6.4']").click() # Pop-rock
driver.find_element_by_css_selector("button[type=submit][value=Save]").click()
# go to details
driver.get(TEST_PROXY_URL + "stations/2")
# Check html
tables = driver.find_elements_by_class_name("table-responsive")
assert len(tables) == 3
station_tr = list(map(lambda x: x.text, tables[0].find_elements_by_css_selector("tr")))
driver.find_element_by_id("nav_tab_2").send_keys(Keys.RETURN)
overrides_tr = list(map(lambda x: x.text, tables[2].find_elements_by_css_selector("tr")))
assert compare_lists(station_tr, STATION_DETAILS_DEFAULT_TR)
assert compare_lists(overrides_tr, STATION_DETAILS_OVERRIDES_TR)
# Check DB
result = db.engine.execute(text(MYSQL_STATION_QUERY))
assert result.rowcount == 1
assert compare_lists(sql_alchemy_result_to_list(result)[0], STATION_DATABASE_OVERRIDES_TR, True)
# Check XML
conn = http.client.HTTPConnection("localhost", TEST_RADIO_DNS_PORT)
conn.request('GET', '/radiodns/spi/3.1/SI.xml', headers={"Authorization": "ClientIdentifier TESTIDENTIFIERS2"})
res = conn.getresponse()
assert res.code == 200
xml_root = ET.fromstring(res.read().decode())
assert len(xml_root.findall(".//{http://www.worlddab.org/schemas/spi/31}service")) == 2
# Select the station that is an override
xml_root = xml_root.findall(".//{http://www.worlddab.org/schemas/spi/31}service")[1]
assert xml_root.find(".//{http://www.worlddab.org/schemas/spi/31}shortName").text == "snomo"
assert xml_root.find(".//{http://www.worlddab.org/schemas/spi/31}mediumName").text == "snomo"
assert xml_root.find(".//{http://www.worlddab.org/schemas/spi/31}longName").text == "long name overrideModified Override"
links = xml_root.findall(".//{http://www.worlddab.org/schemas/spi/31}link")
assert links[0].attrib["url"] == "https://github.com/ebuModified Override"
assert links[0].attrib["mimeValue"] == "text/html"
assert links[1].attrib["uri"] == "postal:postal name overrideModified Override/street overrideModified Override/city overrideModified Override/1110Modified Override/Azores [Portugal]"
assert links[2].attrib["uri"] == "tel:1111111111Modified Override"
assert links[3].attrib["description"] == "SMS description overrideModified Override"
assert links[3].attrib["uri"] == "sms:222222222Modified Override?body=SMS+body+overrideModified+Override"
assert links[4].attrib["description"] == "Email description overrideModified Override"
assert links[4].attrib["uri"] == "mailto:AlexanderWexler@teleworm.us overrideModified Override"
assert xml_root.find(".//{http://www.worlddab.org/schemas/spi/31}radiodns").attrib[
"fqdn"] == "stationnameoverridemodifiedoverride.standalone.radio.ebu.io"
assert xml_root.find(".//{http://www.worlddab.org/schemas/spi/31}radiodns").attrib[
"serviceIdentifier"] == "ebu2standalone"
genres = xml_root.findall(".//{http://www.worlddab.org/schemas/spi/31}genre")
assert len(genres) == 3
assert genres[0].attrib["href"] == "urn:tva:metadata:cs:ContentCS:2011:3.1.1.13"
assert genres[0].text == "Weather forecasts"
assert genres[1].attrib["href"] == "urn:tva:metadata:cs:ContentCS:2011:3.1.1.16"
assert genres[1].text == "Current Affairs"
assert genres[2].attrib["href"] == "urn:tva:metadata:cs:ContentCS:2011:3.6.4"
assert genres[2].text == "Pop-rock"
| |
# Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import urllib
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import assisted_volume_snapshots \
as assisted_snaps_v21
from nova.api.openstack.compute.legacy_v2.contrib import \
assisted_volume_snapshots as assisted_snaps_v2
from nova.api.openstack.compute.legacy_v2.contrib import volumes
from nova.api.openstack.compute import volumes as volumes_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
def fake_get_volume(self, context, id):
return {'id': FAKE_UUID_A,
'status': 'available',
'attach_status': 'detached'
}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1})]
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id='1',
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(202, res.status_int)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(1, len(self._block_device_mapping_seen))
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual('1', self._block_device_mapping_seen[0]['volume_id'])
self.assertEqual('/dev/vda',
self._block_device_mapping_seen[0]['device_name'])
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid='1',
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(202, res.status_int)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(1, len(self._block_device_mapping_seen))
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual('1', self._block_device_mapping_seen[0]['volume_id'])
self.assertEqual(0, self._block_device_mapping_seen[0]['boot_index'])
self.assertEqual('/dev/vda',
self._block_device_mapping_seen[0]['device_name'])
class VolumeApiTestV21(test.NoDBTestCase):
url_prefix = '/v2/fake'
def setUp(self):
super(VolumeApiTestV21, self).setUp()
fakes.stub_out_networking(self)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
@property
def app(self):
return fakes.wsgi_app_v21(init_only=('os-volumes', 'servers'))
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('volume', resp_dict)
self.assertEqual(vol['size'], resp_dict['volume']['size'])
self.assertEqual(vol['display_name'],
resp_dict['volume']['displayName'])
self.assertEqual(vol['display_description'],
resp_dict['volume']['displayDescription'])
self.assertEqual(vol['availability_zone'],
resp_dict['volume']['availabilityZone'])
def _test_volume_create_bad(self, cinder_exc, api_exc):
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise cinder_exc
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '#$?',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(api_exc,
volumes.VolumeController().create, req, body=body)
@mock.patch.object(cinder.API, 'get_snapshot')
@mock.patch.object(cinder.API, 'create')
def test_volume_create_bad_snapshot_id(self, mock_create, mock_get):
vol = {"snapshot_id": '1'}
body = {"volume": vol}
mock_get.side_effect = exception.SnapshotNotFound(snapshot_id='1')
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(webob.exc.HTTPNotFound,
volumes.VolumeController().create, req, body=body)
def test_volume_create_bad_input(self):
self._test_volume_create_bad(exception.InvalidInput(reason='fake'),
webob.exc.HTTPBadRequest)
def test_volume_create_bad_quota(self):
self._test_volume_create_bad(exception.OverQuota(overs='fake'),
webob.exc.HTTPForbidden)
def test_volume_index(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_detail(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_show(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(404, resp.status_int)
self.assertIn('Volume 456 could not be found.', resp.body)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(202, resp.status_int)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(404, resp.status_int)
self.assertIn('Volume 456 could not be found.', resp.body)
class VolumeApiTestV2(VolumeApiTestV21):
@property
def app(self):
return fakes.wsgi_app(init_only=('os-volumes', 'servers'))
class VolumeAttachTestsV21(test.NoDBTestCase):
validation_error = exception.ValidationError
def setUp(self):
super(VolumeAttachTestsV21, self).setUp()
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self._set_up_controller()
def _set_up_controller(self):
self.attachments = volumes_v21.VolumeAttachmentController()
def test_show(self):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
def test_show_no_instance(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=None)
def test_show_no_bdms(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_show_bdms_no_mountpoint(self):
FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_NOTEXIST)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
@mock.patch.object(common, 'get_instance')
def test_detach_vol_shelved_not_supported(self, mock_get_instance):
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid', version='2.19')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(compute_api.API, 'detach_volume')
@mock.patch.object(common, 'get_instance')
def test_detach_vol_shelved_supported(self,
mock_get_instance,
mock_detach):
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid', version='2.20')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertTrue(mock_detach.called)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_vol_root(self, mock_isroot):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
mock_isroot.return_value = True
self.assertRaises(exc.HTTPForbidden,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_detach_volume_from_locked_server(self):
def fake_detach_volume_from_locked_server(self, context,
instance, volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume_from_locked_server)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
req, FAKE_UUID, FAKE_UUID_A)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
@mock.patch.object(common, 'get_instance')
def test_attach_vol_shelved_not_supported(self, mock_get_instance):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
version='2.19')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict,
self.attachments.create,
req,
FAKE_UUID,
body=body)
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
@mock.patch.object(common, 'get_instance')
def test_attach_vol_shelved_supported(self,
mock_get_instance,
mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
version='2.20')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
self.assertEqual('/dev/myfake', result['volumeAttachment']['device'])
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
def test_attach_volume_with_auto_device(self, mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': None}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
self.assertEqual('/dev/myfake', result['volumeAttachment']['device'])
def test_attach_volume_to_locked_server(self):
def fake_attach_volume_to_locked_server(self, context, instance,
volume_id, device=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume_to_locked_server)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_without_volumeId(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def _test_swap(self, attachments, uuid=FAKE_UUID_A,
fake_func=None, body=None):
fake_func = fake_func or fake_swap_volume
self.stubs.Set(compute_api.API,
'swap_volume',
fake_func)
body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return attachments.update(req, FAKE_UUID, uuid, body=body)
def test_swap_volume_for_locked_server(self):
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
self.attachments,
fake_func=fake_swap_volume_for_locked_server)
def test_swap_volume(self):
result = self._test_swap(self.attachments)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.update.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_swap_volume_no_attachment(self):
self.assertRaises(exc.HTTPNotFound, self._test_swap,
self.attachments, FAKE_UUID_C)
def test_swap_volume_without_volumeId(self):
body = {'volumeAttachment': {'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
def test_swap_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
class VolumeAttachTestsV2(VolumeAttachTestsV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volume-attachment-update'}
self.attachments = volumes.VolumeAttachmentController(ext_mgr)
ext_mgr_no_update = extensions.ExtensionManager()
ext_mgr_no_update.extensions = {}
self.attachments_no_update = volumes.VolumeAttachmentController(
ext_mgr_no_update)
def test_swap_volume_no_extension(self):
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap,
self.attachments_no_update)
@mock.patch.object(compute_api.API, 'attach_volume',
return_value=[])
def test_attach_volume_with_extra_arg(self, mock_attach):
# NOTE(gmann): V2 does not perform strong input validation
# so volume is attached successfully even with extra arg in
# request body.
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
def test_swap_volume_with_extra_arg(self):
# NOTE(gmann): V2 does not perform strong input validation.
# Volume is swapped successfully even with extra arg in
# request body. So 'pass' this test for V2.
pass
class CommonBadRequestTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
bad_request = exc.HTTPBadRequest
"""
Tests of places we throw 400 Bad Request from
"""
def setUp(self):
super(CommonBadRequestTestCase, self).setUp()
self.controller = self.controller_cls()
def _bad_request_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(self.bad_request,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._bad_request_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._bad_request_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._bad_request_create(body=body)
class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes_v21.VolumeController
bad_request = exception.ValidationError
class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
controller_cls = volumes.VolumeController
bad_request = exc.HTTPBadRequest
class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
entity_name = 'volumeAttachment'
controller_cls = volumes.VolumeAttachmentController
kwargs = {'server_id': FAKE_UUID}
class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes_v21.SnapshotController
bad_request = exception.ValidationError
class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
controller_cls = volumes.SnapshotController
bad_request = exc.HTTPBadRequest
class AssistedSnapshotCreateTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
bad_request = exception.ValidationError
def setUp(self):
super(AssistedSnapshotCreateTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': '1'}}
req.method = 'POST'
self.assertRaises(self.bad_request, self.controller.create,
req, body=body)
class AssistedSnapshotCreateTestCaseV2(AssistedSnapshotCreateTestCaseV21):
assisted_snaps = assisted_snaps_v2
bad_request = webob.exc.HTTPBadRequest
class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def setUp(self):
super(AssistedSnapshotDeleteTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
urllib.parse.urlencode(params))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self._check_status(204, result, self.controller.delete)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
class AssistedSnapshotDeleteTestCaseV2(AssistedSnapshotDeleteTestCaseV21):
assisted_snaps = assisted_snaps_v2
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, res.status_int)
class TestAssistedVolumeSnapshotsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestAssistedVolumeSnapshotsPolicyEnforcementV21, self).setUp()
self.controller = (
assisted_snaps_v21.AssistedVolumeSnapshotsController())
self.req = fakes.HTTPRequest.blank('')
def test_create_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:create"
self.policy.set_rules({rule_name: "project:non_fake"})
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, '5')
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class TestVolumeAttachPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestVolumeAttachPolicyEnforcementV21, self).setUp()
self.controller = volumes_v21.VolumeAttachmentController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes-attachments:index"
rules = {rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name,
self.controller.index, self.req, FAKE_UUID)
def test_show_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:show": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:show"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
def test_create_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:create": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
rule_name = "os_compute_api:os-volumes-attachments:create"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
def test_update_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:update": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
rule_name = "os_compute_api:os-volumes-attachments:update"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
def test_delete_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:delete": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:delete"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
| |
# -*- coding: utf-8 -*-
from eve.tests import TestBase
from eve.utils import api_prefix
from eve.tests.test_settings import MONGO_DBNAME
import simplejson as json
class TestRenders(TestBase):
def test_default_render(self):
r = self.test_client.get('/')
self.assertEqual(r.content_type, 'application/json')
def test_json_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/json')])
self.assertEqual(r.content_type, 'application/json')
def test_xml_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/xml')])
self.assertTrue('application/xml' in r.content_type)
def test_xml_url_escaping(self):
r = self.test_client.get('%s?max_results=1' % self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertTrue(b'&' in r.get_data())
def test_xml_leaf_escaping(self):
# test that even xml leaves content is being properly escaped
# We need to assign a `person` to our test invoice
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact[0]['ref'] = "12345 & 67890"
fake_contact_id = _db.contacts.insert(fake_contact)[0]
r = self.test_client.get('%s/%s' %
(self.known_resource_url, fake_contact_id),
headers=[('Accept', 'application/xml')])
self.assertTrue(b'12345 & 6789' in r.get_data())
def test_xml_ordered_nodes(self):
""" Test that xml nodes are ordered and #441 is addressed.
"""
r = self.test_client.get('%s?max_results=1' % self.known_resource_url,
headers=[('Accept', 'application/xml')])
data = r.get_data()
idx1 = data.index(b'_created')
idx2 = data.index(b'_etag')
idx3 = data.index(b'_id')
idx4 = data.index(b'_updated')
self.assertTrue(idx1 < idx2 < idx3 < idx4)
idx1 = data.index(b'max_results')
idx2 = data.index(b'page')
idx3 = data.index(b'total')
self.assertTrue(idx1 < idx2 < idx3)
idx1 = data.index(b'last')
idx2 = data.index(b'next')
idx3 = data.index(b'parent')
self.assertTrue(idx1 < idx2 < idx3)
def test_unknown_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/html')])
self.assertEqual(r.content_type, 'application/json')
def test_json_xml_disabled(self):
self.app.config['JSON'] = False
self.app.config['XML'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assert500(r.status_code)
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assert500(r.status_code)
r = self.test_client.get(self.known_resource_url)
self.assert500(r.status_code)
def test_json_disabled(self):
self.app.config['JSON'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertTrue('application/xml' in r.content_type)
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertTrue('application/xml' in r.content_type)
r = self.test_client.get(self.known_resource_url)
self.assertTrue('application/xml' in r.content_type)
def test_xml_disabled(self):
self.app.config['XML'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertEqual(r.content_type, 'application/json')
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertEqual(r.content_type, 'application/json')
r = self.test_client.get(self.known_resource_url)
self.assertEqual(r.content_type, 'application/json')
def test_json_keys_sorted(self):
self.app.config['JSON_SORT_KEYS'] = True
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertEqual(
json.dumps(json.loads(r.get_data()), sort_keys=True).encode(),
r.get_data()
)
def test_jsonp_enabled(self):
arg = "callback"
self.app.config['JSONP_ARGUMENT'] = arg
val = "JSON_CALLBACK"
r = self.test_client.get('/?%s=%s' % (arg, val))
self.assertTrue(r.get_data().decode('utf-8').startswith(val))
def test_CORS(self):
# no CORS headers if Origin is not provided with the request.
r = self.test_client.get('/')
self.assertFalse('Access-Control-Allow-Origin' in r.headers)
self.assertFalse('Access-Control-Allow-Methods' in r.headers)
self.assertFalse('Access-Control-Allow-Max-Age' in r.headers)
self.assertFalse('Access-Control-Expose-Headers' in r.headers)
self.assertFalse('Access-Control-Allow-Credentials' in r.headers)
self.assert200(r.status_code)
# test that if X_DOMAINS is set to '*', then any Origin value is
# allowed. Also test that only the Origin header included with the
# request will be # returned back to the client.
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://example.com')
self.assertEqual(r.headers['Vary'], 'Origin')
# Given that CORS is activated with X_DOMAINS = '*'
# test that if X_ALLOW_CREDENTIALS is set to True
# then the relevant header is included in the response
self.app.config['X_ALLOW_CREDENTIALS'] = True
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Credentials'], 'true')
# with any other non-True value, it is missing
self.app.config['X_ALLOW_CREDENTIALS'] = False
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assert200(r.status_code)
self.assertFalse('Access-Control-Allow-Credentials' in r.headers)
# test that if a list is set for X_DOMAINS, then:
# 1. only list values are accepted;
# 2. only the value included with the request is returned back.
self.app.config['X_DOMAINS'] = ['http://1of2.com', 'http://2of2.com']
r = self.test_client.get('/', headers=[('Origin', 'http://1of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://1of2.com')
r = self.test_client.get('/', headers=[('Origin', 'http://2of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://2of2.com')
r = self.test_client.get('/', headers=[('Origin',
'http://notreally.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'], '')
# other Access-Control-Allow- headers are included.
self.assertTrue('Access-Control-Allow-Headers' in r.headers)
self.assertTrue('Access-Control-Allow-Methods' in r.headers)
self.assertTrue('Access-Control-Allow-Max-Age' in r.headers)
self.assertTrue('Access-Control-Expose-Headers' in r.headers)
def test_CORS_MAX_AGE(self):
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assertEqual(r.headers['Access-Control-Allow-Max-Age'],
'21600')
self.app.config['X_MAX_AGE'] = 2000
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assertEqual(r.headers['Access-Control-Allow-Max-Age'],
'2000')
def test_CORS_OPTIONS(self, url='/', methods=None):
if methods is None:
methods = []
r = self.test_client.open(url, method='OPTIONS')
self.assertFalse('Access-Control-Allow-Origin' in r.headers)
self.assertFalse('Access-Control-Allow-Methods' in r.headers)
self.assertFalse('Access-Control-Allow-Max-Age' in r.headers)
self.assertFalse('Access-Control-Expose-Headers' in r.headers)
self.assertFalse('Access-Control-Allow-Credentials' in r.headers)
self.assert200(r.status_code)
# test that if X_DOMAINS is set to '*', then any Origin value is
# allowed. Also test that only the Origin header included with the
# request will be # returned back to the client.
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://example.com')
self.assertEqual(r.headers['Vary'], 'Origin')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
# Given that CORS is activated with X_DOMAINS = '*'
# test that if X_ALLOW_CREDENTIALS is set to True
# then the relevant header is included in the response
self.app.config['X_ALLOW_CREDENTIALS'] = True
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Credentials'], 'true')
# with any other non-True value, it is missing
self.app.config['X_ALLOW_CREDENTIALS'] = False
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://example.com')])
self.assert200(r.status_code)
self.assertFalse('Access-Control-Allow-Credentials' in r.headers)
self.app.config['X_DOMAINS'] = ['http://1of2.com', 'http://2of2.com']
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://1of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://1of2.com')
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://2of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://2of2.com')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
self.assertTrue('Access-Control-Allow-Origin' in r.headers)
self.assertTrue('Access-Control-Allow-Max-Age' in r.headers)
self.assertTrue('Access-Control-Expose-Headers' in r.headers)
r = self.test_client.get(url, headers=[('Origin',
'http://not_an_example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'], '')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
def test_CORS_OPTIONS_resources(self):
prefix = api_prefix(self.app.config['URL_PREFIX'],
self.app.config['API_VERSION'])
del(self.domain['peopleinvoices'])
del(self.domain['peoplerequiredinvoices'])
del(self.domain['peoplesearches'])
del(self.domain['internal_transactions'])
for _, settings in self.app.config['DOMAIN'].items():
# resource endpoint
url = '%s/%s/' % (prefix, settings['url'])
methods = settings['resource_methods'] + ['OPTIONS']
self.test_CORS_OPTIONS(url, methods)
def test_CORS_OPTIONS_item(self):
prefix = api_prefix(self.app.config['URL_PREFIX'],
self.app.config['API_VERSION'])
url = '%s%s' % (prefix, self.item_id_url)
methods = (self.domain[self.known_resource]['resource_methods'] +
['OPTIONS'])
self.test_CORS_OPTIONS(url, methods)
url = '%s%s/%s' % (prefix, self.known_resource_url, self.item_ref)
methods = ['GET', 'OPTIONS']
self.test_CORS_OPTIONS(url, methods)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._diagnostics_operations import build_execute_site_analysis_request, build_execute_site_analysis_slot_request, build_execute_site_detector_request, build_execute_site_detector_slot_request, build_get_hosting_environment_detector_response_request, build_get_site_analysis_request, build_get_site_analysis_slot_request, build_get_site_detector_request, build_get_site_detector_response_request, build_get_site_detector_response_slot_request, build_get_site_detector_slot_request, build_get_site_diagnostic_category_request, build_get_site_diagnostic_category_slot_request, build_list_hosting_environment_detector_responses_request, build_list_site_analyses_request, build_list_site_analyses_slot_request, build_list_site_detector_responses_request, build_list_site_detector_responses_slot_request, build_list_site_detectors_request, build_list_site_detectors_slot_request, build_list_site_diagnostic_categories_request, build_list_site_diagnostic_categories_slot_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticsOperations:
"""DiagnosticsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_hosting_environment_detector_responses(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.DetectorResponseCollection"]:
"""List Hosting Environment Detector Responses.
Description for List Hosting Environment Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Site Name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_hosting_environment_detector_responses_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.list_hosting_environment_detector_responses.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_hosting_environment_detector_responses_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DetectorResponseCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_hosting_environment_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors'} # type: ignore
@distributed_trace_async
async def get_hosting_environment_detector_response(
self,
resource_group_name: str,
name: str,
detector_name: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DetectorResponse":
"""Get Hosting Environment Detector Response.
Description for Get Hosting Environment Detector Response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: App Service Environment Name.
:type name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_hosting_environment_detector_response_request(
resource_group_name=resource_group_name,
name=name,
detector_name=detector_name,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.get_hosting_environment_detector_response.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_hosting_environment_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors/{detectorName}'} # type: ignore
@distributed_trace
def list_site_detector_responses(
self,
resource_group_name: str,
site_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DetectorResponseCollection"]:
"""List Site Detector Responses.
Description for List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_detector_responses_request(
resource_group_name=resource_group_name,
site_name=site_name,
subscription_id=self._config.subscription_id,
template_url=self.list_site_detector_responses.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_detector_responses_request(
resource_group_name=resource_group_name,
site_name=site_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DetectorResponseCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors'} # type: ignore
@distributed_trace_async
async def get_site_detector_response(
self,
resource_group_name: str,
site_name: str,
detector_name: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DetectorResponse":
"""Get site detector response.
Description for Get site detector response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_detector_response_request(
resource_group_name=resource_group_name,
site_name=site_name,
detector_name=detector_name,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.get_site_detector_response.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors/{detectorName}'} # type: ignore
@distributed_trace
def list_site_diagnostic_categories(
self,
resource_group_name: str,
site_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticCategoryCollection"]:
"""Get Diagnostics Categories.
Description for Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticCategoryCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticCategoryCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategoryCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_diagnostic_categories_request(
resource_group_name=resource_group_name,
site_name=site_name,
subscription_id=self._config.subscription_id,
template_url=self.list_site_diagnostic_categories.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_diagnostic_categories_request(
resource_group_name=resource_group_name,
site_name=site_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticCategoryCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_diagnostic_categories.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics'} # type: ignore
@distributed_trace_async
async def get_site_diagnostic_category(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
**kwargs: Any
) -> "_models.DiagnosticCategory":
"""Get Diagnostics Category.
Description for Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticCategory, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticCategory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_diagnostic_category_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
template_url=self.get_site_diagnostic_category.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticCategory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_diagnostic_category.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}'} # type: ignore
@distributed_trace
def list_site_analyses(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticAnalysisCollection"]:
"""Get Site Analyses.
Description for Get Site Analyses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticAnalysisCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticAnalysisCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysisCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_analyses_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
template_url=self.list_site_analyses.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_analyses_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticAnalysisCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_analyses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses'} # type: ignore
@distributed_trace_async
async def get_site_analysis(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
analysis_name: str,
**kwargs: Any
) -> "_models.AnalysisDefinition":
"""Get Site Analysis.
Description for Get Site Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param analysis_name: Analysis Name.
:type analysis_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalysisDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.AnalysisDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalysisDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_analysis_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
analysis_name=analysis_name,
subscription_id=self._config.subscription_id,
template_url=self.get_site_analysis.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AnalysisDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'} # type: ignore
@distributed_trace_async
async def execute_site_analysis(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
analysis_name: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DiagnosticAnalysis":
"""Execute Analysis.
Description for Execute Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name.
:type analysis_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticAnalysis, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticAnalysis
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysis"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_execute_site_analysis_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
analysis_name=analysis_name,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.execute_site_analysis.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticAnalysis', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'} # type: ignore
@distributed_trace
def list_site_detectors(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticDetectorCollection"]:
"""Get Detectors.
Description for Get Detectors.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticDetectorCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticDetectorCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_detectors_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
template_url=self.list_site_detectors.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_detectors_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticDetectorCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_detectors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors'} # type: ignore
@distributed_trace_async
async def get_site_detector(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
detector_name: str,
**kwargs: Any
) -> "_models.DetectorDefinition":
"""Get Detector.
Description for Get Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param detector_name: Detector Name.
:type detector_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DetectorDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_detector_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
detector_name=detector_name,
subscription_id=self._config.subscription_id,
template_url=self.get_site_detector.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'} # type: ignore
@distributed_trace_async
async def execute_site_detector(
self,
resource_group_name: str,
site_name: str,
detector_name: str,
diagnostic_category: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DiagnosticDetectorResponse":
"""Execute Detector.
Description for Execute Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticDetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticDetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_execute_site_detector_request(
resource_group_name=resource_group_name,
site_name=site_name,
detector_name=detector_name,
diagnostic_category=diagnostic_category,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.execute_site_detector.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticDetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'} # type: ignore
@distributed_trace
def list_site_detector_responses_slot(
self,
resource_group_name: str,
site_name: str,
slot: str,
**kwargs: Any
) -> AsyncIterable["_models.DetectorResponseCollection"]:
"""List Site Detector Responses.
Description for List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_detector_responses_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.list_site_detector_responses_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_detector_responses_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DetectorResponseCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_detector_responses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors'} # type: ignore
@distributed_trace_async
async def get_site_detector_response_slot(
self,
resource_group_name: str,
site_name: str,
detector_name: str,
slot: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DetectorResponse":
"""Get site detector response.
Description for Get site detector response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_detector_response_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
detector_name=detector_name,
slot=slot,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.get_site_detector_response_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_response_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors/{detectorName}'} # type: ignore
@distributed_trace
def list_site_diagnostic_categories_slot(
self,
resource_group_name: str,
site_name: str,
slot: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticCategoryCollection"]:
"""Get Diagnostics Categories.
Description for Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticCategoryCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticCategoryCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategoryCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_diagnostic_categories_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.list_site_diagnostic_categories_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_diagnostic_categories_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticCategoryCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_diagnostic_categories_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics'} # type: ignore
@distributed_trace_async
async def get_site_diagnostic_category_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
slot: str,
**kwargs: Any
) -> "_models.DiagnosticCategory":
"""Get Diagnostics Category.
Description for Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticCategory, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticCategory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_diagnostic_category_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.get_site_diagnostic_category_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticCategory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_diagnostic_category_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}'} # type: ignore
@distributed_trace
def list_site_analyses_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
slot: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticAnalysisCollection"]:
"""Get Site Analyses.
Description for Get Site Analyses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticAnalysisCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticAnalysisCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysisCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_analyses_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.list_site_analyses_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_analyses_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticAnalysisCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'} # type: ignore
@distributed_trace_async
async def get_site_analysis_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
analysis_name: str,
slot: str,
**kwargs: Any
) -> "_models.AnalysisDefinition":
"""Get Site Analysis.
Description for Get Site Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param analysis_name: Analysis Name.
:type analysis_name: str
:param slot: Slot - optional.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalysisDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.AnalysisDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalysisDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_analysis_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
analysis_name=analysis_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.get_site_analysis_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AnalysisDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'} # type: ignore
@distributed_trace_async
async def execute_site_analysis_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
analysis_name: str,
slot: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DiagnosticAnalysis":
"""Execute Analysis.
Description for Execute Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name.
:type analysis_name: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticAnalysis, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticAnalysis
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysis"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_execute_site_analysis_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
analysis_name=analysis_name,
slot=slot,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.execute_site_analysis_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticAnalysis', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'} # type: ignore
@distributed_trace
def list_site_detectors_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
slot: str,
**kwargs: Any
) -> AsyncIterable["_models.DiagnosticDetectorCollection"]:
"""Get Detectors.
Description for Get Detectors.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticDetectorCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DiagnosticDetectorCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_site_detectors_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.list_site_detectors_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_site_detectors_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiagnosticDetectorCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'} # type: ignore
@distributed_trace_async
async def get_site_detector_slot(
self,
resource_group_name: str,
site_name: str,
diagnostic_category: str,
detector_name: str,
slot: str,
**kwargs: Any
) -> "_models.DetectorDefinition":
"""Get Detector.
Description for Get Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param detector_name: Detector Name.
:type detector_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DetectorDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_site_detector_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
diagnostic_category=diagnostic_category,
detector_name=detector_name,
slot=slot,
subscription_id=self._config.subscription_id,
template_url=self.get_site_detector_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'} # type: ignore
@distributed_trace_async
async def execute_site_detector_slot(
self,
resource_group_name: str,
site_name: str,
detector_name: str,
diagnostic_category: str,
slot: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
**kwargs: Any
) -> "_models.DiagnosticDetectorResponse":
"""Execute Detector.
Description for Execute Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticDetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DiagnosticDetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_execute_site_detector_slot_request(
resource_group_name=resource_group_name,
site_name=site_name,
detector_name=detector_name,
diagnostic_category=diagnostic_category,
slot=slot,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
time_grain=time_grain,
template_url=self.execute_site_detector_slot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticDetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'} # type: ignore
| |
import json
import pytest
import demistomock as demisto
from CommonServerPython import DemistoException
integration_params = {
'port': '443',
'vsys': 'vsys1',
'server': 'https://1.1.1.1',
'key': 'thisisabogusAPIKEY!',
}
mock_demisto_args = {
'threat_id': "11111",
'vulnerability_profile': "mock_vuln_profile"
}
@pytest.fixture(autouse=True)
def set_params(mocker):
mocker.patch.object(demisto, 'params', return_value=integration_params)
mocker.patch.object(demisto, 'args', return_value=mock_demisto_args)
@pytest.fixture
def patched_requests_mocker(requests_mock):
"""
This function mocks various PANOS API responses so we can accurately test the instance
"""
base_url = "{}:{}/api/".format(integration_params['server'], integration_params['port'])
# Version information
mock_version_xml = """
<response status = "success">
<result>
<sw-version>9.0.6</sw-version>
<multi-vsys>off</multi-vsys>
<model>Panorama</model>
<serial>FAKESERIALNUMBER</serial>
</result>
</response>
"""
version_path = "{}{}{}".format(base_url, "?type=version&key=", integration_params['key'])
requests_mock.get(version_path, text=mock_version_xml, status_code=200)
mock_response_xml = """
<response status="success" code="20">
<msg>command succeeded</msg>
</response>
"""
requests_mock.post(base_url, text=mock_response_xml, status_code=200)
return requests_mock
def test_panorama_get_os_version(patched_requests_mocker):
from Panorama import get_pan_os_version
import Panorama
Panorama.URL = 'https://1.1.1.1:443/api/'
Panorama.API_KEY = 'thisisabogusAPIKEY!'
r = get_pan_os_version()
assert r == '9.0.6'
def test_panorama_override_vulnerability(patched_requests_mocker):
from Panorama import panorama_override_vulnerability
import Panorama
Panorama.URL = 'https://1.1.1.1:443/api/'
r = panorama_override_vulnerability(mock_demisto_args['threat_id'], mock_demisto_args['vulnerability_profile'],
'reset-both')
assert r['response']['@status'] == 'success'
def test_add_argument_list():
from Panorama import add_argument_list
list_argument = ["foo", "bar"]
response_with_member = add_argument_list(list_argument, "test", True)
expected_with_member = '<test><member>foo</member><member>bar</member></test>'
assert response_with_member == expected_with_member
response_with_member_field_name = add_argument_list(list_argument, "member", True)
expected_with_member_field_name = '<member>foo</member><member>bar</member>'
assert response_with_member_field_name == expected_with_member_field_name
def test_add_argument():
from Panorama import add_argument
argument = "foo"
response_with_member = add_argument(argument, "test", True)
expected_with_member = '<test><member>foo</member></test>'
assert response_with_member == expected_with_member
response_without_member = add_argument(argument, "test", False)
expected_without_member = '<test>foo</test>'
assert response_without_member == expected_without_member
def test_add_argument_yes_no():
from Panorama import add_argument_yes_no
arg = 'No'
field = 'test'
option = True
response_option_true = add_argument_yes_no(arg, field, option)
expected_option_true = '<option><test>no</test></option>'
assert response_option_true == expected_option_true
option = False
response_option_false = add_argument_yes_no(arg, field, option)
expected_option_false = '<test>no</test>'
assert response_option_false == expected_option_false
def test_add_argument_target():
from Panorama import add_argument_target
response = add_argument_target('foo', 'bar')
expected = '<bar><devices><entry name=\"foo\"/></devices></bar>'
assert response == expected
def test_prettify_addresses_arr():
from Panorama import prettify_addresses_arr
addresses_arr = [{'@name': 'my_name', 'fqdn': 'a.com'},
{'@name': 'my_name2', 'fqdn': 'b.com'},
{'@name': 'test', 'ip-netmask': '1.1.1.1', 'tag': None}]
response = prettify_addresses_arr(addresses_arr)
expected = [{'Name': 'my_name', 'FQDN': 'a.com'},
{'Name': 'my_name2', 'FQDN': 'b.com'},
{'Name': 'test', 'IP_Netmask': '1.1.1.1'}]
assert response == expected
def test_prettify_address():
from Panorama import prettify_address
address = {'@name': 'my_name', 'ip-netmask': '1.1.1.1', 'description': 'lala'}
response = prettify_address(address)
expected = {'Name': 'my_name', 'IP_Netmask': '1.1.1.1', 'Description': 'lala'}
assert response == expected
def test_prettify_address_tag_none():
from Panorama import prettify_address
address = {'@name': 'test', 'ip-netmask': '1.1.1.1', 'tag': None}
response = prettify_address(address)
expected = {'Name': 'test', 'IP_Netmask': '1.1.1.1'}
assert response == expected
def test_prettify_address_group():
from Panorama import prettify_address_group
address_group_static = {'@name': 'foo', 'static': {'member': 'address object'}}
response_static = prettify_address_group(address_group_static)
expected_address_group_static = {'Name': 'foo', 'Type': 'static', 'Addresses': 'address object'}
assert response_static == expected_address_group_static
address_group_dynamic = {'@name': 'foo', 'dynamic': {'filter': '1.1.1.1 and 2.2.2.2'}}
response_dynamic = prettify_address_group(address_group_dynamic)
expected_address_group_dynamic = {'Name': 'foo', 'Type': 'dynamic', 'Match': '1.1.1.1 and 2.2.2.2'}
assert response_dynamic == expected_address_group_dynamic
address_group_dynamic_tag_none = {'@name': 'foo', 'dynamic': {'filter': '1.1.1.1 or 2.2.2.2'}, 'tag': None}
response_dynamic_tag_none = prettify_address_group(address_group_dynamic_tag_none)
expected_address_group_dynamic_tag_none = {'Name': 'foo', 'Type': 'dynamic', 'Match': '1.1.1.1 or 2.2.2.2'}
assert response_dynamic_tag_none == expected_address_group_dynamic_tag_none
def test_prettify_service():
from Panorama import prettify_service
service = {'@name': 'service_name', 'description': 'foo', 'protocol': {'tcp': {'port': '443'}}}
response = prettify_service(service)
expected = {'Name': 'service_name', 'Description': 'foo', 'Protocol': 'tcp', 'DestinationPort': '443'}
assert response == expected
def test_prettify_service_tag_none():
from Panorama import prettify_service
service = {'@name': 'service_name', 'description': 'foo', 'protocol': {'tcp': {'port': '443'}}, 'tag': None}
response = prettify_service(service)
expected = {'Name': 'service_name', 'Description': 'foo', 'Protocol': 'tcp', 'DestinationPort': '443'}
assert response == expected
def test_prettify_service_group():
from Panorama import prettify_service_group
service_group = {'@name': 'sg', 'members': {'member': ['service1', 'service2']}}
response = prettify_service_group(service_group)
expected = {'Name': 'sg', 'Services': ['service1', 'service2']}
assert response == expected
def test_prettify_service_group_tag_none():
from Panorama import prettify_service_group
service_group = {'@name': 'sg_group', 'members': {'member': ['service1', 'service2']}, 'tag': None}
response = prettify_service_group(service_group)
expected = {'Name': 'sg_group', 'Services': ['service1', 'service2']}
assert response == expected
def test_prettify_custom_url_category():
from Panorama import prettify_custom_url_category
custom_url_category = {'@name': 'foo', 'list': {'member': ['a', 'b', 'c']}}
response = prettify_custom_url_category(custom_url_category)
expected = {'Name': 'foo', 'Sites': ['a', 'b', 'c']}
assert response == expected
def test_panorama_create_custom_url_category_8_x(mocker):
"""
Given:
- an only > 9.x valid argument for custom url category creation
When:
- running the panorama_create_custom_url_category function
- mocking the pan-os version to be 8.x
Then:
- a proper error is raised
"""
from Panorama import panorama_create_custom_url_category
mocker.patch('Panorama.get_pan_os_major_version', return_value=8)
custom_url_category_name = 'name'
description = 'test_desc'
type_ = 'URL List'
with pytest.raises(DemistoException,
match='The type and categories arguments are only relevant for PAN-OS 9.x versions.'):
panorama_create_custom_url_category(custom_url_category_name, type_=type_, description=description)
def test_panorama_create_custom_url_category_9_x(mocker):
"""
Given:
- a non valid argument for custom url category creation
When:
- running the panorama_create_custom_url_category function
- mocking the pan-os version to be 9.x
Then:
- a proper error is raised
"""
from Panorama import panorama_create_custom_url_category
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
custom_url_category_name = 'name'
type_ = 'URL List'
categories = 'phishing'
sites = 'a.com'
description = 'test_desc'
with pytest.raises(DemistoException,
match='The type argument is mandatory for PAN-OS 9.x versions.'):
panorama_create_custom_url_category(custom_url_category_name, sites=sites, description=description)
with pytest.raises(DemistoException,
match='Exactly one of the sites and categories arguments should be defined.'):
panorama_create_custom_url_category(custom_url_category_name, type_=type_, sites=sites, categories=categories)
with pytest.raises(DemistoException,
match='URL List type is only for sites, Category Match is only for categories.'):
panorama_create_custom_url_category(custom_url_category_name, type_=type_, categories=categories)
def test_create_url_filter_params_8_x(mocker):
"""
Given:
- a valid argument for url filter creation
When:
- running the create_url_filter_params utility function
- mocking the pan-os version to be 8.x
Then:
- a proper xml element is generated
"""
from Panorama import create_url_filter_params
mocker.patch('Panorama.get_pan_os_major_version', return_value=8)
url_filter_name = 'name'
action = 'alert'
url_category_list = 'adult'
description = 'test_desc'
url_filter_params = create_url_filter_params(url_filter_name, action, url_category_list=url_category_list,
description=description)
assert url_filter_params['element'].find('<action>block</action>') != -1 # if not -1, then it is found
def test_create_url_filter_params_9_x(mocker):
"""
Given:
- a valid argument for url filter creation
When:
- running the create_url_filter_params utility function
- mocking the pan-os version to be 9.x
Then:
- a proper xml element is generated
"""
from Panorama import create_url_filter_params
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
url_filter_name = 'name'
action = 'alert'
url_category_list = 'adult'
description = 'test_desc'
url_filter_params = create_url_filter_params(url_filter_name, action, url_category_list=url_category_list,
description=description)
assert url_filter_params['element'].find('<action>block</action>') == -1 # if -1, then it is not found
def test_edit_url_filter_non_valid_args_8_x(mocker):
"""
Given:
- a non valid argument for edit url filter
When:
- running the edit_url_filter function
- mocking the pan-os version to be 8.x
Then:
- a proper error is raised
"""
from Panorama import panorama_edit_url_filter
url_filter_object = {
"@name": "fw_test_pb_dont_delete",
"action": "block",
"allow": {
"member": [
"Demisto- block sites",
"test3"
]
},
"allow-list": {
"member": "www.thepill2.com"
},
"block": {
"member": [
"abortion",
"abused-drugs"
]
},
"block-list": {
"member": "www.thepill.com"
},
"credential-enforcement": {
"allow": {
"member": [
"Demisto- block sites",
"test3"
]
},
"block": {
"member": [
"abortion",
"abused-drugs"
]
},
"log-severity": "medium",
},
"description": "gogo"
}
mocker.patch('Panorama.get_pan_os_major_version', return_value=8)
mocker.patch('Panorama.panorama_get_url_filter', return_value=url_filter_object)
url_filter_name = 'fw_test_pb_dont_delete'
element_to_change = 'allow_categories'
element_value = 'gambling'
add_remove_element = 'remove'
err_msg = 'Only the override_allow_list, override_block_list, description properties can be' \
' changed in PAN-OS 8.x or earlier versions.'
with pytest.raises(DemistoException, match=err_msg):
panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element)
def test_edit_url_filter_non_valid_args_9_x(mocker):
"""
Given:
- a non valid argument for edit url filter
When:
- running the edit_url_filter function
- mocking the pan-os version to be 9.x
Then:
- a proper error is raised
"""
from Panorama import panorama_edit_url_filter
url_filter_object = {
"@name": "fw_test_pb_dont_delete",
"allow": {
"member": "Test_pb_custom_url_DONT_DELETE"
},
"credential-enforcement": {
"block": {
"member": [
"gambling",
"abortion"
]
},
"log-severity": "medium",
},
"description": "wowo"
}
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
mocker.patch('Panorama.panorama_get_url_filter', return_value=url_filter_object)
url_filter_name = 'fw_test_pb_dont_delete'
element_to_change = 'override_block_list'
element_value = 'gambling'
add_remove_element = 'remove'
err_msg = 'Only the allow_categories, block_categories, description properties can be changed in PAN-OS 9.x or' \
' later versions.'
with pytest.raises(DemistoException, match=err_msg):
panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element)
def http_mock(url: str, method: str, body: dict = {}):
return body
@pytest.mark.parametrize('category_name, items', [('category_name', ['www.good.com'],)])
def test_remove_from_custom_url_category(category_name, items, mocker):
"""
Given:
- a valid argument for edit custom url group
When:
- running the custom_url_category_remove_items function
Then:
- checks an assertion
"""
import Panorama
from Panorama import panorama_custom_url_category_remove_items
return_results_mock = mocker.patch.object(Panorama, 'return_results')
mocker.patch('Panorama.panorama_get_custom_url_category', return_value={'description': 'description',
'list': {'member': "www.test.com"}
})
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
mocker.patch('Panorama.http_request', side_effect=http_mock)
panorama_custom_url_category_remove_items(category_name, items, "URL List")
demisto_result_got = return_results_mock.call_args.args[0]['Contents']
assert "www.test.com" in demisto_result_got['element']
def test_prettify_edl():
from Panorama import prettify_edl
edl = {'@name': 'edl_name', 'type': {'my_type': {'url': 'abc.com', 'description': 'my_desc'}}}
response = prettify_edl(edl)
expected = {'Name': 'edl_name', 'Type': 'my_type', 'URL': 'abc.com', 'Description': 'my_desc'}
assert response == expected
def test_build_traffic_logs_query():
"""
Given:
- a valid arguments for traffic logs query generation
When:
- running the build_traffic_logs_query utility function
Then:
- a proper query is generated
(addr.src in 192.168.1.222) and (app eq netbios-dg) and (action eq allow) and (port.dst eq 138)
"""
from Panorama import build_traffic_logs_query
source = '192.168.1.222'
application = 'netbios-dg'
action = 'allow'
to_port = '138'
response = build_traffic_logs_query(source, None, None, application, to_port, action)
expected = '(addr.src in 192.168.1.222) and (app eq netbios-dg) and (port.dst eq 138) and (action eq allow)'
assert response == expected
def test_prettify_traffic_logs():
from Panorama import prettify_traffic_logs
traffic_logs = [{'action': 'my_action1', 'category': 'my_category1', 'rule': 'my_rule1'},
{'action': 'my_action2', 'category': 'my_category2', 'rule': 'my_rule2'}]
response = prettify_traffic_logs(traffic_logs)
expected = [{'Action': 'my_action1', 'Category': 'my_category1', 'Rule': 'my_rule1'},
{'Action': 'my_action2', 'Category': 'my_category2', 'Rule': 'my_rule2'}]
assert response == expected
def test_build_logs_query():
"""
Given:
- a valid arguments for logs query generation
When:
- running the build_logs_query utility function
Then:
- a proper query is generated
((url contains 'demisto.com') or (url contains 'paloaltonetworks.com'))
"""
from Panorama import build_logs_query
urls_as_string = "demisto.com, paloaltonetworks.com"
response = build_logs_query(None, None, None, None, None, None, None, None, None, urls_as_string, None)
expected = "((url contains 'demisto.com') or (url contains 'paloaltonetworks.com'))"
assert response == expected
def test_prettify_logs():
from Panorama import prettify_logs
traffic_logs = [{'action': 'my_action1', 'category': 'my_category1', 'rule': 'my_rule1', 'natdport': '100',
'bytes': '12'},
{'action': 'my_action2', 'category': 'my_category2', 'rule': 'my_rule2', 'natdport': '101',
'bytes_sent': '11'}]
response = prettify_logs(traffic_logs)
expected = [{'Action': 'my_action1', 'CategoryOrVerdict': 'my_category1', 'Rule': 'my_rule1',
'NATDestinationPort': '100', 'Bytes': '12'},
{'Action': 'my_action2', 'CategoryOrVerdict': 'my_category2', 'Rule': 'my_rule2',
'NATDestinationPort': '101', 'BytesSent': '11'}]
assert response == expected
prepare_security_rule_inputs = [
('top', 'test_rule_name'),
('bottom', 'test_rule_name'),
]
@pytest.mark.parametrize('where, dst', prepare_security_rule_inputs)
def test_prepare_security_rule_params(where, dst):
"""
Given:
- a non valid arguments for the prepare_security_rule_params function
When:
- running the prepare_security_rule_params utility function
Then:
- a proper exception is raised
"""
from Panorama import prepare_security_rule_params
err_msg = 'Please provide a dst rule only when the where argument is before or after.'
with pytest.raises(DemistoException, match=err_msg):
prepare_security_rule_params(api_action='set', action='drop', destination=['any'], source=['any'],
rulename='test', where=where, dst=dst)
def test_build_policy_match_query():
"""
Given:
- a valid arguments for policy match query generation
When:
- running the build_policy_match_query utility function
Then:
- a proper xml is generated
"""
from Panorama import build_policy_match_query
source = '1.1.1.1'
destination = '6.7.8.9'
protocol = '1'
application = 'gmail-base'
response = build_policy_match_query(application, None, destination, None, None, None, protocol, source)
expected = '<test><security-policy-match><source>1.1.1.1</source><destination>6.7.8.9</destination>' \
'<protocol>1</protocol><application>gmail-base</application></security-policy-match></test>'
assert response == expected
def test_panorama_register_ip_tag_command_wrongful_args(mocker):
"""
Given:
- a non valid arguments for the panorama_register_ip_tag_command function
When:
- running the panorama_register_ip_tag_command function
Then:
- a proper exception is raised
"""
from Panorama import panorama_register_ip_tag_command
args = {'IPs': '1.1.1.1', 'tag': 'test_tag', 'persistent': 'true', 'timeout': '5'}
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
with pytest.raises(DemistoException,
match='When the persistent argument is true, you can not use the timeout argument.'):
panorama_register_ip_tag_command(args)
args['persistent'] = 'false'
mocker.patch('Panorama.get_pan_os_major_version', return_value=8)
with pytest.raises(DemistoException,
match='The timeout argument is only applicable on 9.x PAN-OS versions or higher.'):
panorama_register_ip_tag_command(args)
def test_prettify_matching_rule():
from Panorama import prettify_matching_rule
matching_rule = {'action': 'my_action1', '@name': 'very_important_rule', 'source': '6.7.8.9', 'destination': 'any'}
response = prettify_matching_rule(matching_rule)
expected = {'Action': 'my_action1', 'Name': 'very_important_rule', 'Source': '6.7.8.9', 'Destination': 'any'}
assert response == expected
def test_prettify_static_route():
from Panorama import prettify_static_route
static_route = {'@name': 'name1', 'destination': '1.2.3.4', 'metric': '10', 'nexthop': {'fqdn': 'demisto.com'}}
virtual_router = 'my_virtual_router'
response = prettify_static_route(static_route, virtual_router)
expected = {'Name': 'name1', 'Destination': '1.2.3.4', 'Metric': 10,
'NextHop': 'demisto.com', 'VirtualRouter': 'my_virtual_router'}
assert response == expected
def test_validate_search_time():
from Panorama import validate_search_time
assert validate_search_time('2019/12/26')
assert validate_search_time('2019/12/26 00:00:00')
with pytest.raises(Exception):
assert validate_search_time('219/12/26 00:00:00')
assert validate_search_time('219/10/35')
def test_show_user_id_interface_config_command():
"""
Given:
- missing template and template_stack arguments for the show_user_id_interface_config_command command
When:
- running the show_user_id_interface_config_request function
Then:
- a proper exception is raised
"""
from Panorama import show_user_id_interface_config_command
args = {}
str_match = 'In order to show the User Interface configuration in your Panorama, ' \
'supply either the template or the template_stack arguments.'
with pytest.raises(DemistoException, match=str_match):
show_user_id_interface_config_command(args)
def test_prettify_user_interface_config():
from Panorama import prettify_user_interface_config
raw_response = [{'@name': 'internal', 'network': {'layer3': {'member': 'ethernet1/2'},
'log-setting': 'ToLoggingService'},
'enable-user-identification': 'yes'},
{'@name': 'External', 'network': {'tap': {'member': 'ethernet1/1'},
'log-setting': 'ToLoggingService'}}]
response = prettify_user_interface_config(raw_response)
expected = [{'Name': 'ethernet1/2', 'Zone': 'internal', 'EnableUserIdentification': 'yes'},
{'Name': 'ethernet1/1', 'Zone': 'External', 'EnableUserIdentification': 'no'}]
assert response == expected
def test_list_configured_user_id_agents_command(mocker):
"""
Given:
- missing template and template_stack arguments for the list_configured_user_id_agents_command command
When:
- running the list_configured_user_id_agents_request function
Then:
- a proper exception is raised
"""
from Panorama import list_configured_user_id_agents_command
mocker.patch('Panorama.get_pan_os_major_version', return_value=9)
args = {}
str_match = 'In order to show the the User ID Agents in your Panorama, ' \
'supply either the template or the template_stack arguments.'
with pytest.raises(DemistoException, match=str_match):
list_configured_user_id_agents_command(args)
def test_prettify_configured_user_id_agents__multi_result():
from Panorama import prettify_configured_user_id_agents
raw_response = [{'@name': 'testing2', 'serial-number': 'panorama2'},
{'@name': 'fullinfo', 'host-port': {'port': '67', 'ntlm-auth': 'yes',
'ldap-proxy': 'yes', 'collectorname': 'demisto',
'secret': 'secret', 'host': 'what'}, 'ip-user-mappings': 'yes'}]
response = prettify_configured_user_id_agents(raw_response)
expected = [{'Name': 'testing2', 'Host': None, 'Port': None, 'NtlmAuth': 'no', 'LdapProxy': 'no',
'CollectorName': None, 'Secret': None, 'EnableHipCollection': 'no', 'SerialNumber': 'panorama2',
'IpUserMapping': 'no', 'Disabled': 'no'},
{'Name': 'fullinfo', 'Host': 'what', 'Port': '67', 'NtlmAuth': 'yes', 'LdapProxy': 'yes',
'CollectorName': 'demisto', 'Secret': 'secret', 'EnableHipCollection': 'no', 'SerialNumber': None,
'IpUserMapping': 'yes', 'Disabled': 'no'}]
assert response == expected
def test_prettify_configured_user_id_agents__single_result():
from Panorama import prettify_configured_user_id_agents
raw_response = {'@name': 'fullinfo', 'host-port': {'port': '67', 'ntlm-auth': 'yes',
'ldap-proxy': 'yes', 'collectorname': 'demisto',
'secret': 'secret', 'host': 'what'}, 'ip-user-mappings': 'yes'}
response = prettify_configured_user_id_agents(raw_response)
expected = {'Name': 'fullinfo', 'Host': 'what', 'Port': '67', 'NtlmAuth': 'yes', 'LdapProxy': 'yes',
'CollectorName': 'demisto', 'Secret': 'secret', 'EnableHipCollection': 'no', 'SerialNumber': None,
'IpUserMapping': 'yes', 'Disabled': 'no'}
assert response == expected
def test_prettify_rule():
from Panorama import prettify_rule
with open("test_data/rule.json") as f:
rule = json.load(f)
with open("test_data/prettify_rule.json") as f:
expected_prettify_rule = json.load(f)
prettify_rule = prettify_rule(rule)
assert prettify_rule == expected_prettify_rule
class TestPanoramaEditRuleCommand:
EDIT_SUCCESS_RESPONSE = {'response': {'@status': 'success', '@code': '20', 'msg': 'command succeeded'}}
@staticmethod
def test_sanity(mocker):
import Panorama
args = {
'rulename': 'TestRule',
'element_to_change': 'source',
'element_value': '2.3.4.5,3.3.3.3',
'behaviour': 'add',
}
commited_rule_item = {
'response': {
'@status': 'success',
'@code': '19',
'result': {
'@total-count': '1',
'@count': '1',
'source': {
'member': ['1.1.1.1', '3.3.3.3', '2.3.4.5'],
}
}
}
}
mocker.patch('Panorama.http_request', return_value=commited_rule_item)
Panorama.panorama_edit_rule_command(args)
@staticmethod
def test_add_to_element_on_uncommited_rule(mocker):
import Panorama
args = {
'rulename': 'TestRule',
'element_to_change': 'source',
'element_value': '2.3.4.5',
'behaviour': 'add',
}
uncommited_rule_item = {
'response': {
'@status': 'success',
'@code': '19',
'result': {
'@total-count': '1',
'@count': '1',
'source': {
'@admin': 'admin',
'@dirtyId': '1616',
'@time': '2021/11/27 10:55:18',
'member': {
'@admin': 'admin',
'@dirtyId': '1616',
'@time': '2021/11/27 10:55:18',
'#text': '3.3.3.3',
}
}
}
}
}
mocker.patch('Panorama.http_request', return_value=uncommited_rule_item)
with pytest.raises(DemistoException):
Panorama.panorama_edit_rule_command(args)
class MockedResponse:
def __init__(self, text, status_code, reason):
self.status_code = status_code
self.text = text
self.reason = reason
@pytest.mark.parametrize('args, expected_request_params, request_result, expected_demisto_result',
[pytest.param({'device-group': 'some_device', 'admin_name': 'some_admin_name'},
{'action': 'partial',
'cmd': '<commit><device-group><entry '
'name="some_device"/></device-group><partial><admin>'
'<member>some_admin_name</member></admin></partial></commit>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Commit(val.JobID == obj.JobID)': {'Description': None,
'JobID': '19420',
'Status': 'Pending'}},
id='only admin changes commit'),
pytest.param({'device-group': 'some_device', 'force_commit': 'true'},
{'cmd': '<commit><device-group><entry name="some_device"/></device-group><force>'
'</force></commit>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Commit(val.JobID == obj.JobID)': {'Description': None,
'JobID': '19420',
'Status': 'Pending'}},
id="force commit"),
pytest.param({'device-group': 'some_device', 'exclude_device_network_configuration': 'true'},
{'action': 'partial',
'cmd': '<commit><device-group><entry name="some_device"/></device-group>'
'<partial><device-and-network>excluded</device-and-network></partial>'
'</commit>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Commit(val.JobID == obj.JobID)': {'Description': None,
'JobID': '19420',
'Status': 'Pending'}},
id="device and network excluded"),
pytest.param({'device-group': 'some_device', 'exclude_shared_objects': 'true'},
{'action': 'partial',
'cmd': '<commit><device-group><entry name="some_device"/></device-group>'
'<partial><shared-object>excluded</shared-object></partial></commit>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Commit(val.JobID == obj.JobID)': {'Description': None,
'JobID': '19420',
'Status': 'Pending'}},
id="exclude shared objects"),
pytest.param({'device-group': 'some_device'},
{'cmd': '<commit><device-group><entry name="some_device"/></device-group>'
'</commit>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Commit(val.JobID == obj.JobID)': {'Description': None,
'JobID': '19420',
'Status': 'Pending'}},
id="no args")
])
def test_panorama_commit_command(mocker, args, expected_request_params, request_result, expected_demisto_result):
"""
Given:
- command args
- request result
When:
- Running panorama-commit command
Then:
- Assert the request url is as expected
- Assert demisto results contain the relevant result information
"""
import Panorama
import requests
from Panorama import panorama_commit_command
Panorama.API_KEY = 'thisisabogusAPIKEY!'
return_results_mock = mocker.patch.object(Panorama, 'return_results')
request_mock = mocker.patch.object(requests, 'request', return_value=request_result)
panorama_commit_command(args)
called_request_params = request_mock.call_args.kwargs['data'] # The body part of the request
assert called_request_params == expected_request_params
demisto_result_got = return_results_mock.call_args.args[0]['EntryContext']
assert demisto_result_got == expected_demisto_result
@pytest.mark.parametrize('args, expected_request_params, request_result, expected_demisto_result',
[pytest.param({},
{'action': 'all',
'cmd': '<commit-all><shared-policy><device-group><entry name="some_device"/>'
'</device-group></shared-policy></commit-all>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Push(val.JobID == obj.JobID)': {'DeviceGroup': 'some_device',
'JobID': '19420',
'Status': 'Pending'}},
id='no args'),
pytest.param({'serial_number': '1337'},
{'action': 'all',
'cmd': '<commit-all><shared-policy><device-group><entry name="some_device">'
'<devices><entry name="1337"/></devices></entry></device-group>'
'</shared-policy></commit-all>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Push(val.JobID == obj.JobID)': {'DeviceGroup': 'some_device',
'JobID': '19420',
'Status': 'Pending'}},
id='serial number'),
pytest.param({'include-template': 'false'},
{'action': 'all',
'cmd': '<commit-all><shared-policy><device-group><entry name="some_device"/>'
'</device-group><include-template>no</include-template></shared-policy>'
'</commit-all>',
'key': 'thisisabogusAPIKEY!',
'type': 'commit'},
MockedResponse(text='<response status="success" code="19"><result><msg>'
'<line>Commit job enqueued with jobid 19420</line></msg>'
'<job>19420</job></result></response>', status_code=200,
reason=''),
{'Panorama.Push(val.JobID == obj.JobID)': {'DeviceGroup': 'some_device',
'JobID': '19420',
'Status': 'Pending'}},
id='do not include template')
])
def test_panorama_push_to_device_group_command(mocker, args, expected_request_params, request_result,
expected_demisto_result):
"""
Given:
- command args
- request result
When:
- Running panorama-push-to-device-group command
Then:
- Assert the request url is as expected
- Assert demisto results contain the relevant result information
"""
import Panorama
import requests
from Panorama import panorama_push_to_device_group_command
return_results_mock = mocker.patch.object(Panorama, 'return_results')
request_mock = mocker.patch.object(requests, 'request', return_value=request_result)
Panorama.DEVICE_GROUP = 'some_device'
Panorama.API_KEY = 'thisisabogusAPIKEY!'
panorama_push_to_device_group_command(args)
called_request_params = request_mock.call_args.kwargs['data'] # The body part of the request
assert called_request_params == expected_request_params
demisto_result_got = return_results_mock.call_args.args[0]['EntryContext']
assert demisto_result_got == expected_demisto_result
def test_get_url_category__url_length_gt_1278(mocker):
"""
Given:
- Error in response indicating the url to get category for is over the allowed length (1278 chars)
When:
- Run get_url_category command
Then:
- Validate a commandResult is returned with detailed readable output
"""
# prepare
import Panorama
import requests
from Panorama import panorama_get_url_category_command
Panorama.DEVICE_GROUP = ''
mocked_res_dict = {
'response': {
'@status': 'error',
'@code': '20',
'msg': {'line': 'test -> url Node can be at most 1278 characters, but current length: 1288'}
}}
mocked_res_obj = requests.Response()
mocked_res_obj.status_code = 200
mocked_res_obj._content = json.dumps(mocked_res_dict).encode('utf-8')
mocker.patch.object(requests, 'request', return_value=mocked_res_obj)
mocker.patch.object(Panorama, 'xml2json', return_value=mocked_res_obj._content)
return_results_mock = mocker.patch.object(Panorama, 'return_results')
# run
panorama_get_url_category_command(url_cmd='url', url='test_url', additional_suspicious=[], additional_malicious=[])
# validate
assert 'URL Node can be at most 1278 characters.' == return_results_mock.call_args[0][0][1].readable_output
class TestDevices:
def test_with_fw(self):
import Panorama
Panorama.VSYS = 'this is a FW instance'
assert list(Panorama.devices()) == [(None, None)]
def test_with_specific_target_and_vsys(self):
import Panorama
Panorama.VSYS = None # this a Panorama instance
assert list(Panorama.devices(targets=['target'], vsys_s=['vsys1', 'vsys2'])) == [('target', 'vsys1'),
('target', 'vsys2')]
def test_with_specific_target_only(self, requests_mock):
import Panorama
with open('test_data/devices_list.xml', 'r') as data_file:
requests_mock.get(Panorama.URL, text=data_file.read())
Panorama.VSYS = None # this a Panorama instance
assert list(Panorama.devices(targets=['target1'])) == [('target1', 'vsys1'), ('target1', 'vsys2')]
def test_without_specify(self, requests_mock):
import Panorama
with open('test_data/devices_list.xml', 'r') as data_file:
requests_mock.get(Panorama.URL, text=data_file.read())
Panorama.VSYS = None # this a Panorama instance
assert list(Panorama.devices()) == [('target1', 'vsys1'), ('target1', 'vsys2'), ('target2', None)]
| |
import pytest
from .utils import MockedTime
from .sample_data import sample_data_holder_1, sample_data_holder_2
from hystrix.rolling_percentile import RollingPercentile, PercentileSnapshot
def test_rolling():
time = MockedTime()
percentile = RollingPercentile(time, 60000, 12, 1000, True)
percentile.add_value(1000)
percentile.add_value(1000)
percentile.add_value(1000)
percentile.add_value(2000)
assert percentile.buckets.size == 1
# No bucket turnover yet so percentile not yet generated
assert percentile.percentile(50) == 0
time.increment(6000)
# Still only 1 bucket until we touch it again
assert percentile.buckets.size == 1
# A bucket has been created so we have a new percentile
assert percentile.percentile(50) == 1000
# Now 2 buckets since getting a percentile causes bucket retrieval
assert percentile.buckets.size == 2
percentile.add_value(1000)
percentile.add_value(500)
assert percentile.buckets.size == 2
percentile.add_value(200)
percentile.add_value(200)
percentile.add_value(1600)
percentile.add_value(200)
percentile.add_value(1600)
percentile.add_value(1600)
# We haven't progressed to a new bucket so the percentile should be the
# same and ignore the most recent bucket
assert percentile.percentile(50) == 1000
# Increment to another bucket so we include all of the above in the
# PercentileSnapshot
time.increment(6000)
# The rolling version should have the same data as creating a snapshot
# like this
snapshot = PercentileSnapshot(1000, 1000, 1000, 2000, 1000, 500,
200, 200, 1600, 200, 1600, 1600)
assert snapshot.percentile(0.15) == percentile.percentile(0.15)
assert snapshot.percentile(0.50) == percentile.percentile(0.50)
assert snapshot.percentile(0.90) == percentile.percentile(0.90)
assert snapshot.percentile(0.995) == percentile.percentile(0.995)
# mean = 1000+1000+1000+2000+1000+500+200+200+1600+200+1600+1600/12
assert snapshot.mean() == 991
def test_value_is_zero_after_rolling_window_passes_and_no_traffic():
time = MockedTime()
percentile = RollingPercentile(time, 60000, 12, 1000, True)
percentile.add_value(1000)
percentile.add_value(1000)
percentile.add_value(1000)
percentile.add_value(2000)
percentile.add_value(4000)
assert percentile.buckets.size == 1
# No bucket turnover yet so percentile not yet generated
assert percentile.percentile(50) == 0
time.increment(6000)
# Still only 1 bucket until we touch it again
assert percentile.buckets.size == 1
# A bucket has been created so we have a new percentile
assert percentile.percentile(50) == 1500
# Let 1 minute pass
time.increment(60000)
# No data in a minute should mean all buckets are empty (or reset) so we
# should not have any percentiles
assert percentile.percentile(50) == 0
def test_sample_data_over_time_1():
time = MockedTime()
percentile = RollingPercentile(time, 60000, 12, 1000, True)
previous_time = 0
for time_millis, latency in sample_data_holder_1:
time.increment(time_millis - previous_time)
previous_time = time_millis
percentile.add_value(latency)
print('0.01', percentile.percentile(0.01))
print('Median', percentile.percentile(50))
print('90th', percentile.percentile(90))
print('99th', percentile.percentile(99))
print('99.5th', percentile.percentile(99.5))
print('99.99', percentile.percentile(99.99))
print('Median', percentile.percentile(50))
print('Median', percentile.percentile(50))
print('Median', percentile.percentile(50))
# In a loop as a use case was found where very different values were
# calculated in subsequent requests.
for _ in range(10):
percentile50 = percentile.percentile(50)
if percentile50 > 5:
pytest.fail('We expect around 2 but got: {}'.format(percentile50))
percentile995 = percentile.percentile(99.5)
if percentile995 < 20:
msg = 'We expect to see some high values over 20 but got: {}'
pytest.fail(msg.format(percentile995))
def test_sample_data_over_time_2():
time = MockedTime()
percentile = RollingPercentile(time, 60000, 12, 1000, True)
previous_time = 0
for time_millis, latency in sample_data_holder_2:
time.increment(time_millis - previous_time)
previous_time = time_millis
percentile.add_value(latency)
print('0.01', percentile.percentile(0.01))
print('Median', percentile.percentile(50))
print('90th', percentile.percentile(90))
print('99th', percentile.percentile(99))
print('99.5th', percentile.percentile(99.5))
print('99.99', percentile.percentile(99.99))
percentile50 = percentile.percentile(50)
if percentile50 > 90 or percentile50 < 50:
pytest.fail('We expect around 60-70 but got: {}'.format(percentile50))
percentile99 = percentile.percentile(99)
if percentile99 < 400:
msg = 'We expect to see some high values over 400 but got: {}'
pytest.fail(msg.format(percentile99))
def test_percentile_algorithm_media1():
snapshot = PercentileSnapshot(100, 100, 100, 100, 200, 200,
200, 300, 300, 300, 300)
assert snapshot.percentile(50) == 200
def test_percentile_algorithm_media2():
snapshot = PercentileSnapshot(100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 500)
assert snapshot.percentile(50) == 100
def test_percentile_algorithm_media3():
snapshot = PercentileSnapshot(50, 75, 100, 125, 160, 170,
180, 200, 210, 300, 500)
assert snapshot.percentile(50) == 175
def test_percentile_algorithm_media4():
''' Unsorted so it is expected to sort it for us. '''
snapshot = PercentileSnapshot(300, 75, 125, 500, 100, 160,
180, 200, 210, 50, 170)
assert snapshot.percentile(50) == 175
def test_percentile_algorithm_extremes():
''' Unsorted so it is expected to sort it for us. '''
snapshot = PercentileSnapshot(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
800, 768, 657, 700, 867)
print('0.01', snapshot.percentile(0.01))
print('10th', snapshot.percentile(10))
print('Median', snapshot.percentile(50))
print('75th', snapshot.percentile(75))
print('90th', snapshot.percentile(90))
print('99th', snapshot.percentile(99))
print('99.5th', snapshot.percentile(99.5))
print('99.99', snapshot.percentile(99.99))
assert snapshot.percentile(50) == 2
assert snapshot.percentile(10) == 2
assert snapshot.percentile(75) == 2
if snapshot.percentile(95) < 600:
msg = 'We expect 90th to be over 600 to show the extremes but got: {}'
pytest.fail(msg.format(snapshot.percentile(95)))
if snapshot.percentile(99) < 600:
msg = 'We expect 99th to be over 600 to show the extremes but got: {}'
pytest.fail(msg.format(snapshot.percentile(99)))
def percentile_for_values(*values):
return PercentileSnapshot(*values)
def test_percentile_algorithm_high_percentile():
snapshot = percentile_for_values(1, 2, 3)
assert snapshot.percentile(50) == 2
assert snapshot.percentile(75) == 3
def test_percentile_algorithm_low_percentile():
snapshot = percentile_for_values(1, 2)
assert snapshot.percentile(25) == 1
assert snapshot.percentile(75) == 2
def test_percentile_algorithm_percentiles():
snapshot = percentile_for_values(10, 30, 20, 40)
assert snapshot.percentile(30) == 22
assert snapshot.percentile(25) == 20
assert snapshot.percentile(75) == 40
assert snapshot.percentile(50) == 30
assert snapshot.percentile(-1) == 10
assert snapshot.percentile(101) == 40
def test_percentile_algorithm_NIST_example():
snapshot = percentile_for_values(951772, 951567, 951937, 951959, 951442,
950610, 951591, 951195, 951772, 950925,
951990, 951682)
assert snapshot.percentile(90) == 951983
assert snapshot.percentile(100) == 951990
def test_does_nothing_when_disabled():
time = MockedTime()
percentile = RollingPercentile(time, 60000, 12, 1000, False)
previous_time = 0
for time_millis, latency in sample_data_holder_2:
time.increment(time_millis - previous_time)
previous_time = time_millis
percentile.add_value(latency)
assert percentile.percentile(50) == -1
assert percentile.percentile(75) == -1
assert percentile.mean() == -1
| |
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from collections import namedtuple, OrderedDict
from tyr_error import TyrSyntaxError as LexerError
from tyr_ttt import iteritems
class Token(namedtuple('Token', ['name', 'value', 'lineno', 'column'])):
def __repr__(self):
return str(tuple(self))
def decode_str(s):
regex = re.compile(r'\\(r|n|t|\\|\'|")')
chars = {
'r': '\r',
'n': '\n',
't': '\t',
'\\': '\\',
'"': '"',
"'": "'",
}
def replace(matches):
char = matches.group(1)[0]
if char not in chars:
raise Exception('Unknown escape character {}'.format(char))
return chars[char]
return regex.sub(replace, s[1:-1])
def decode_num(s):
try:
return int(s)
except ValueError:
return float(s)
class Lexer(object):
rules = [
('COMMENT', r'#.*'),
('STRING', r'"((\\"|[^"])*)"'),
('STRING', r"'((\\'|[^'])*)'"),
('NUMBER', r'\d+\.\d+'),
('NUMBER', r'\d+'),
('NAME', r'[a-zA-Z_]\w*'),
('WHITESPACE', '[ \t]+'),
('NEWLINE', r'\n+'),
('OPERATOR', r'[\+\-\*\/%]'),
('OPERATOR', r'<=|>=|==|!=|<|>'),
('OPERATOR', r'\|\||&&'),
('OPERATOR', r'\.\.\.|\.\.'),
('OPERATOR', '!'),
('ASSIGN', '='),
('LPARAM', r'\('),
('RPARAM', r'\)'),
('LBRACK', r'\['),
('RBRACK', r'\]'),
('LCBRACK', r'{'),
('RCBRACK', r'}'),
('COLON', ':'),
('COMMA', ','),
]
keywords = {
'func': 'FUNCTION',
'return': 'RETURN',
'else': 'ELSE',
'elif': 'ELIF',
'if': 'IF',
'while': 'WHILE',
'break': 'BREAK',
'continue': 'CONTINUE',
'for': 'FOR',
'in': 'IN',
'match': 'MATCH',
'when': 'WHEN',
}
ignore_tokens = ['WHITESPACE', 'COMMENT']
decodes = {'STRING': decode_str, 'NUMBER': decode_num}
def __init__(self):
self.source_lines = []
self.regex = self.compile_rules(self.rules)
def compile_rules(self, rules):
return re.compile('|'.join(self.convert_rules(rules)))
def convert_rules(self, rules):
grouped_rules = OrderedDict()
for name, pattern in rules:
grouped_rules.setdefault(name, [])
grouped_rules[name].append(pattern)
for name, patterns in iteritems(grouped_rules):
joined_patterns = '|'.join(['({})'.format(p) for p in patterns])
yield '(?P<{}>{})'.format(name, joined_patterns)
def tokenize_line(self, line, lineno):
pos = 0
while pos < len(line):
matches = self.regex.match(line, pos)
if matches is not None:
name = matches.lastgroup
pos = matches.end(name)
if name not in self.ignore_tokens:
value = matches.group(name)
if name in self.decodes:
value = self.decodes[name](value)
elif name == 'NAME' and value in self.keywords:
name = self.keywords[value]
value = None
yield Token(name, value, lineno, matches.start() + 1)
else:
raise LexerError('Unexcepted character {}'.format(line[pos]), lineno, pos + 1)
def count_leading_characters(self, line, char):
count = 0
for c in line:
if c != char:
break
count += 1
return count
def detect_indent(self, line):
if line[0] in [' ', '\t']:
return line[0] * self.count_leading_characters(line, line[0])
def tokenize(self, s):
indent_symbol = None
tokens = []
last_indent_level = 0
lineno = 0
for lineno, line in enumerate(s.splitlines()):
lineno += 1
line = line.rstrip()
if not line:
self.source_lines.append('')
continue
if indent_symbol is None:
indent_symbol = self.detect_indent(line)
if indent_symbol is not None:
indent_level = line.count(indent_symbol)
line = line[indent_level * len(indent_symbol):]
else:
indent_level = 0
self.source_lines.append(line)
line_tokens = list(self.tokenize_line(line, lineno))
if line_tokens:
if indent_level != last_indent_level:
if indent_level > last_indent_level:
tokens.extend([Token('INDENT', None, lineno, 0)] * (indent_level - last_indent_level))
elif indent_level < last_indent_level:
tokens.extend([Token('DEDENT', None, lineno, 0)] * (last_indent_level - indent_level))
last_indent_level = indent_level
tokens.extend(line_tokens)
tokens.append(Token('NEWLINE', None, lineno, len(line) + 1))
if last_indent_level > 0:
tokens.extend([Token('DEDENT', None, lineno, 0)] * last_indent_level)
return tokens
class TokenStream(object):
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def consume_expected(self, *args):
token = None
for expected_name in args:
token = self.consume()
if token.name != expected_name:
raise LexerError('Expected {}, got {}'.format(expected_name, token.name), token.lineno, token.column)
return token
def consume(self):
token = self.current()
self.pos += 1
return token
def current(self):
try:
return self.tokens[self.pos]
except:
last_token = self.tokens[-1]
raise LexerError('Unexcepted end of input', last_token.lineno, last_token.column)
def expect_end(self):
if self.pos != len(self.tokens):
token = self.current()
raise LexerError('End excepted', token.lineno, token.column)
def is_end(self):
return self.pos == len(self.tokens)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StructuredTensorSpec."""
from absl.testing import parameterized
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import row_partition
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensorSpec
from tensorflow.python.platform import googletest
# TypeSpecs consts for fields types.
T_3 = tensor_spec.TensorSpec([3])
T_1_2 = tensor_spec.TensorSpec([1, 2])
T_1_2_8 = tensor_spec.TensorSpec([1, 2, 8])
T_1_2_3_4 = tensor_spec.TensorSpec([1, 2, 3, 4])
T_2_3 = tensor_spec.TensorSpec([2, 3])
R_1_N = ragged_tensor.RaggedTensorSpec([1, None])
R_1_N_N = ragged_tensor.RaggedTensorSpec([1, None, None])
R_2_1_N = ragged_tensor.RaggedTensorSpec([2, 1, None])
# TensorSpecs for nrows & row_splits in the _to_components encoding.
NROWS_SPEC = tensor_spec.TensorSpec([], dtypes.int64)
PARTITION_SPEC = row_partition.RowPartitionSpec()
# pylint: disable=g-long-lambda
@test_util.run_all_in_graph_and_eager_modes
class StructuredTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
# TODO(edloper): Add a subclass of TensorFlowTestCase that overrides
# assertAllEqual etc to work with StructuredTensors.
def assertAllEqual(self, a, b, msg=None):
if not (isinstance(a, structured_tensor.StructuredTensor) or
isinstance(b, structured_tensor.StructuredTensor)):
return super(StructuredTensorSpecTest, self).assertAllEqual(a, b, msg)
if not (isinstance(a, structured_tensor.StructuredTensor) and
isinstance(b, structured_tensor.StructuredTensor)):
# TODO(edloper) Add support for this once structured_factory_ops is added.
raise ValueError('Not supported yet')
self.assertEqual(repr(a.shape), repr(b.shape))
self.assertEqual(set(a.field_names()), set(b.field_names()))
for field in a.field_names():
self.assertAllEqual(a.field_value(field), b.field_value(field))
def assertAllTensorsEqual(self, x, y):
assert isinstance(x, dict) and isinstance(y, dict)
self.assertEqual(set(x), set(y))
for key in x:
self.assertAllEqual(x[key], y[key])
def testConstruction(self):
spec1_fields = dict(a=T_1_2_3_4)
spec1 = StructuredTensorSpec([1, 2, 3], spec1_fields)
self.assertEqual(spec1._shape, (1, 2, 3))
self.assertEqual(spec1._field_specs, spec1_fields)
spec2_fields = dict(a=T_1_2, b=T_1_2_8, c=R_1_N, d=R_1_N_N, s=spec1)
spec2 = StructuredTensorSpec([1, 2], spec2_fields)
self.assertEqual(spec2._shape, (1, 2))
self.assertEqual(spec2._field_specs, spec2_fields)
@parameterized.parameters([
(None, {}, r"StructuredTensor's shape must have known rank\."),
([], None, r'field_specs must be a dictionary\.'),
([], {1: tensor_spec.TensorSpec(None)},
r'field_specs must be a dictionary with string keys\.'),
([], {'x': 0},
r'field_specs must be a dictionary with TypeSpec values\.'),
])
def testConstructionErrors(self, shape, field_specs, error):
with self.assertRaisesRegex(TypeError, error):
structured_tensor.StructuredTensorSpec(shape, field_specs)
def testValueType(self):
spec1 = StructuredTensorSpec([1, 2, 3], dict(a=T_1_2))
self.assertEqual(spec1.value_type, StructuredTensor)
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}),
(tensor_shape.TensorShape([1, 2, 3]), {})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
(tensor_shape.TensorShape([1, 2]), {'a': T_1_2, 'b': R_1_N})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# Note that we can only use assertEqual because none of our cases include
# a None dimension. A TensorShape with a None dimension is never equal
# to another TensorShape.
self.assertEqual(serialization, expected)
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}),
({}, NROWS_SPEC, (PARTITION_SPEC, PARTITION_SPEC))),
(StructuredTensorSpec([], {'a': T_1_2}),
({'a': T_1_2}, (), ())),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
({'a': T_1_2, 'b': R_1_N}, NROWS_SPEC, (PARTITION_SPEC,))),
(StructuredTensorSpec([], {'a': T_1_2}),
({'a': T_1_2}, (), ())),
]) # pyformat: disable
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
'shape': [],
'fields': dict(x=[[1.0, 2.0]]),
'field_specs': dict(x=T_1_2),
},
{
'shape': [2],
'fields': dict(
a=ragged_factory_ops.constant_value([[1.0], [2.0, 3.0]]),
b=[[4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]),
'field_specs': dict(a=R_1_N, b=T_2_3),
},
]) # pyformat: disable
def testToFromComponents(self, shape, fields, field_specs):
struct = StructuredTensor.from_fields(fields, shape)
spec = StructuredTensorSpec(shape, field_specs)
actual_components = spec._to_components(struct)
self.assertLen(actual_components, 3)
self.assertAllTensorsEqual(actual_components[0], fields)
rt_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(struct, rt_reconstructed)
def testToFromComponentsEmptyScalar(self):
struct = StructuredTensor.from_fields(fields={}, shape=[])
spec = struct._type_spec
components = spec._to_components(struct)
rt_reconstructed = spec._from_components(components)
self.assertAllEqual(struct, rt_reconstructed)
self.assertEqual(components, ({}, (), ()))
def testToFromComponentsEmptyTensor(self):
struct = StructuredTensor.from_fields(fields={}, shape=[1, 2, 3])
spec = struct._type_spec
components = spec._to_components(struct)
rt_reconstructed = spec._from_components(components)
self.assertAllEqual(struct, rt_reconstructed)
self.assertLen(components, 3)
fields, nrows, row_partitions = components
self.assertEmpty(fields)
self.assertAllEqual(nrows, 1)
self.assertLen(row_partitions, 2)
self.assertIsInstance(row_partitions[0], row_partition.RowPartition)
self.assertIsInstance(row_partitions[1], row_partition.RowPartition)
self.assertAllEqual(row_partitions[0].row_splits(), [0, 2])
self.assertAllEqual(row_partitions[1].row_splits(), [0, 3, 6])
@parameterized.parameters([
{
'unbatched': StructuredTensorSpec([], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5], {}),
},
{
'unbatched': StructuredTensorSpec([1, 2], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5, 1, 2], {}),
},
{
'unbatched': StructuredTensorSpec([], dict(a=T_3, b=R_1_N)),
'batch_size': 2,
'batched': StructuredTensorSpec([2], dict(a=T_2_3, b=R_2_1_N)),
}
]) # pyformat: disable
def testBatchUnbatch(self, unbatched, batch_size, batched):
self.assertEqual(unbatched._batch(batch_size), batched)
self.assertEqual(batched._unbatch(), unbatched)
@parameterized.parameters([
{
'unbatched': lambda: [
StructuredTensor.from_fields({'a': 1, 'b': [5, 6]}),
StructuredTensor.from_fields({'a': 2, 'b': [7, 8]})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2], fields={
'a': [1, 2],
'b': [[5, 6], [7, 8]]}),
},
{
'unbatched': lambda: [
StructuredTensor.from_fields(shape=[3], fields={
'a': [1, 2, 3],
'b': [[5, 6], [6, 7], [7, 8]]}),
StructuredTensor.from_fields(shape=[3], fields={
'a': [2, 3, 4],
'b': [[2, 2], [3, 3], [4, 4]]})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2, 3], fields={
'a': [[1, 2, 3], [2, 3, 4]],
'b': [[[5, 6], [6, 7], [7, 8]],
[[2, 2], [3, 3], [4, 4]]]}),
},
{
'unbatched': lambda: [
StructuredTensor.from_fields(shape=[], fields={
'a': 1,
'b': StructuredTensor.from_fields({'x': [5]})}),
StructuredTensor.from_fields(shape=[], fields={
'a': 2,
'b': StructuredTensor.from_fields({'x': [6]})})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2], fields={
'a': [1, 2],
'b': StructuredTensor.from_fields(shape=[2], fields={
'x': [[5], [6]]})}),
},
{
'unbatched': lambda: [
StructuredTensor.from_fields(shape=[], fields={
'Ragged3d': ragged_factory_ops.constant_value([[1, 2], [3]]),
'Ragged2d': ragged_factory_ops.constant_value([1]),
}),
StructuredTensor.from_fields(shape=[], fields={
'Ragged3d': ragged_factory_ops.constant_value([[1]]),
'Ragged2d': ragged_factory_ops.constant_value([2, 3]),
})],
'batch_size': 2,
'batched': lambda: StructuredTensor.from_fields(shape=[2], fields={
'Ragged3d': ragged_factory_ops.constant_value(
[[[1, 2], [3]], [[1]]]),
'Ragged2d': ragged_factory_ops.constant_value([[1], [2, 3]]),
}),
'use_only_batched_spec': True,
},
]) # pyformat: disable
def testBatchUnbatchValues(self, unbatched, batch_size, batched,
use_only_batched_spec=False):
batched = batched() # Deferred init because it creates tensors.
unbatched = unbatched() # Deferred init because it creates tensors.
# Test batching.
if use_only_batched_spec:
unbatched_spec = type_spec.type_spec_from_value(batched)._unbatch()
else:
unbatched_spec = type_spec.type_spec_from_value(unbatched[0])
unbatched_tensor_lists = [unbatched_spec._to_tensor_list(st)
for st in unbatched]
batched_tensor_list = [array_ops.stack(tensors)
for tensors in zip(*unbatched_tensor_lists)]
actual_batched = unbatched_spec._batch(batch_size)._from_tensor_list(
batched_tensor_list)
self.assertTrue(
unbatched_spec._batch(batch_size).is_compatible_with(actual_batched))
self.assertAllEqual(actual_batched, batched)
# Test unbatching
batched_spec = type_spec.type_spec_from_value(batched)
batched_tensor_list = batched_spec._to_batched_tensor_list(batched)
unbatched_tensor_lists = zip(
*[array_ops.unstack(tensor) for tensor in batched_tensor_list])
actual_unbatched = [
batched_spec._unbatch()._from_tensor_list(tensor_list)
for tensor_list in unbatched_tensor_lists]
self.assertLen(actual_unbatched, len(unbatched))
for st in actual_unbatched:
self.assertTrue(batched_spec._unbatch().is_compatible_with(st))
for (actual, expected) in zip(actual_unbatched, unbatched):
self.assertAllEqual(actual, expected)
if __name__ == '__main__':
googletest.main()
| |
from sklearn.model_selection import learning_curve
import matplotlib.pyplot as plt
import numpy as np
import collections
def draw_learning_curve(estimator, X, y, ylim=None, cv=None, scoring=None,
train_sizes=np.linspace(.2, 1.0, 5),
train_axis='n_examples', estimator_titles=None,
n_jobs=1):
"""
Create a learning curve to help us determine if we are overfitting or
underfitting. This is a wrapper over sklearn's excellent learning_curve
function that adds useful capabilities like multiple estimators and
automatically creates a plot, hopefully reducing boilerplate code. Returns
a matplotlib.pyplot object. Don't forget to call plt.show() if needed.
Parameters
----------
estimator: sklearn estimator object type that implements "fit" and "predict"
as expected from sklearn, or array-like of estimator objects
Use an array-like if you want to pass multiple estimators to the
same plot. If passing multiple estimators, scoring must be
specified.
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y: array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim: tuple, shape (ymin, ymax), optional
Defines min and max y-values plotted
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
scoring: string, indicating sklearn scoring nickname from sklearn.metrics
This will be the name of the y-axis on the returned plot if specified
train_sizes: array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
train_axis: string, either 'n_examples' or 'per_examples'
Indicates what should be used on the x-axis of the returned plot.
estimator_titles: array_like shape (n_estimators)
Indicates the title for each estimator to be used in plotting and added
to the legend. This is useful to distinguish between multiple models on
the same learning curve. This should always be specified when using
multiple estimators on the same curve; otherwise, the plot will be hard
to read. (default: None)
n_jobs: integer, optional
Number of jobs to run in parallel. (default: 1)
Returns
-------
plt: matplotlib.pyplot object for further editing by user. Don't forget to
use plt.show() or matplotlib inline if necessary.
"""
# TODO: test cases / error checking
plt.figure()
if ylim is not None:
plt.ylim(*ylim)
# copy sizes in case we need them later
per_examples = np.copy(train_sizes)
if train_axis == 'n_examples':
plt.xlabel('Number of training examples used')
elif train_axis == 'per_examples':
plt.xlabel('Percent of training examples used')
if scoring is not None:
plt.ylabel(scoring.capitalize())
# we don't want users unintentionally using default scoring
else:
raise TypeError('scoring argument must be specified with a string\
indicating the name of the scoring function, such as accuracy')
# if multiple estimators passed
if isinstance(estimator, (collections.Sequence, np.ndarray)):
if not isinstance(estimator_titles, (collections.Sequence, np.ndarray)):
raise TypeError('When giving an array of estimators,you must\
specify names for each of the estimators with estimator_titles')
# get number estimators for color setting
N = len(estimator)
color = plt.cm.rainbow(np.linspace(0,1,N))
for ind, est in enumerate(estimator):
train_sizes, train_scores, test_scores = learning_curve(
est, X, y, cv=cv, n_jobs=n_jobs, scoring=scoring,
train_sizes=train_sizes)
_plot_lc(train_sizes, train_scores, test_scores, train_axis,
scoring, per_examples, color=color[ind],
est_title=estimator_titles[ind])
# if only 1 estimator
else:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, scoring=scoring,
train_sizes=train_sizes)
_plot_lc(train_sizes, train_scores, test_scores, train_axis, scoring,
per_examples)
# add grid and legend
plt.grid()
plt.legend(loc="best")
return plt
def _plot_lc(train_sizes, train_scores, test_scores, train_axis, scoring,
per_examples, color=None, est_title=None, std_bands=True):
"""
Plot the learning curve. Helper function for draw_learning_curve below.
This function adds the actual score curves to the plt object.
Parameters
----------
train_sizes: array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
train_scores: array-like, indicating the scores on the training set.
This is passed from the sklearn function learning_curve.
test_scores: array-like, indicating the scores on the testing set.
This is passed from the sklearn function learning_curve.
train_axis: string, either 'n_examples' or 'per_examples'
Indicates what should be used on the x-axis of the returned plot.
scoring: string, indicating sklearn scoring nickname from sklearn.metrics
This will be the name of the y-axis on the returned plot if specified
per_examples: array-like, indicating the percentages to be used on x-axis.
This is sourced from train_sizes but in percentage form rather than
counts.
color: cm.rainbow object, providing color choices for multiple curves.
This is only used when multiple estimators are passed.
est_title: string, indicating the name of a particular estimator to be
used on the legend. This is only used when multiple estimators are
passed.
std_bands: boolean, indicating whether standard deviation bands should
be drawn on the plot. Defaults to True.
Returns
-------
plt object with curves added
"""
# account for percentage train size
# this is dirty but works
if train_axis == 'per_examples':
train_sizes = per_examples
# convert regression scoring to positive to make easier to present
if scoring in ['mean_absolute_error', 'mean_squared_error',
'median_absolute_error']:
train_scores, test_scores = train_scores * -1.0, test_scores * -1.0
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
if est_title is None:
plt.plot(train_sizes, train_scores_mean, 'o--', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'x-', color="g",
label="Cross-validation score")
if std_bands:
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color='g')
else:
plt.plot(train_sizes, train_scores_mean, 'o--',
label=est_title + " Training score", color=color)
plt.plot(train_sizes, test_scores_mean, 'x-',
label=est_title + " Cross-validation score", color=color*0.99)
if std_bands:
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color=color)
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color=color*0.99)
return plt
| |
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stack object."""
from oslo_log import log as logging
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base
from heat.objects import fields as heat_fields
from heat.objects import raw_template
from heat.objects import stack_tag
LOG = logging.getLogger(__name__)
class Stack(
heat_base.HeatObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject,
):
fields = {
'id': fields.StringField(),
'name': fields.StringField(),
'raw_template_id': fields.IntegerField(),
'backup': fields.BooleanField(),
'created_at': fields.DateTimeField(read_only=True),
'deleted_at': fields.DateTimeField(nullable=True),
'disable_rollback': fields.BooleanField(),
'nested_depth': fields.IntegerField(),
'owner_id': fields.StringField(nullable=True),
'stack_user_project_id': fields.StringField(nullable=True),
'tenant': fields.StringField(nullable=True),
'timeout': fields.IntegerField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'user_creds_id': fields.StringField(nullable=True),
'username': fields.StringField(nullable=True),
'action': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'status_reason': fields.StringField(nullable=True),
'raw_template_obj': fields.ObjectField('RawTemplate'),
'convergence': fields.BooleanField(),
'current_traversal': fields.StringField(),
'current_deps': heat_fields.JsonField(),
'prev_raw_template_id': fields.IntegerField(),
'prev_raw_template': fields.ObjectField('RawTemplate'),
'parent_resource_name': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, stack, db_stack):
for field in stack.fields:
if field == 'raw_template_obj':
raw_template_obj = db_stack.__dict__.get('raw_template')
if raw_template_obj is not None:
# Object is already lazy loaded
raw_template_obj = (
raw_template.RawTemplate.from_db_object(
context,
raw_template.RawTemplate(),
raw_template_obj))
stack._raw_template = raw_template_obj
else:
stack[field] = db_stack.__dict__.get(field)
stack._context = context
stack.obj_reset_changes()
return stack
@property
def raw_template(self):
if hasattr(self, '_raw_template'):
return self._raw_template
LOG.warning('Loading a raw_template that should have been '
'eagerly loaded for stack id %s' % self.id)
self._raw_template = raw_template.RawTemplate.get_by_id(
self._context,
self['raw_template_id'])
return self._raw_template
@raw_template.setter
def raw_template(self, value):
self['raw_template_obj'] = value
self._raw_template = value
@classmethod
def get_root_id(cls, context, stack_id):
return db_api.stack_get_root_id(context, stack_id)
@classmethod
def get_by_id(cls, context, stack_id, **kwargs):
db_stack = db_api.stack_get(context, stack_id, **kwargs)
if not db_stack:
return None
stack = cls._from_db_object(context, cls(context), db_stack)
return stack
@classmethod
def get_by_name_and_owner_id(cls, context, stack_name, owner_id):
db_stack = db_api.stack_get_by_name_and_owner_id(
context,
str(stack_name),
owner_id
)
if not db_stack:
return None
stack = cls._from_db_object(context, cls(context), db_stack)
return stack
@classmethod
def get_by_name(cls, context, stack_name):
db_stack = db_api.stack_get_by_name(context, str(stack_name))
if not db_stack:
return None
stack = cls._from_db_object(context, cls(context), db_stack)
return stack
@classmethod
def get_all(cls, context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None, eager_load=False):
db_stacks = db_api.stack_get_all(
context,
limit=limit,
sort_keys=sort_keys,
marker=marker,
sort_dir=sort_dir,
filters=filters,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any,
eager_load=eager_load)
for db_stack in db_stacks:
try:
yield cls._from_db_object(context, cls(context), db_stack)
except exception.NotFound:
pass
@classmethod
def get_all_by_owner_id(cls, context, owner_id):
db_stacks = db_api.stack_get_all_by_owner_id(context, owner_id)
for db_stack in db_stacks:
try:
yield cls._from_db_object(context, cls(context), db_stack)
except exception.NotFound:
pass
@classmethod
def get_all_by_root_owner_id(cls, context, root_owner_id):
db_stacks = db_api.stack_get_all_by_root_owner_id(context,
root_owner_id)
for db_stack in db_stacks:
try:
yield cls._from_db_object(context, cls(context), db_stack)
except exception.NotFound:
pass
@classmethod
def count_all(cls, context, **kwargs):
return db_api.stack_count_all(context, **kwargs)
@classmethod
def count_total_resources(cls, context, stack_id):
return db_api.stack_count_total_resources(context, stack_id)
@classmethod
def create(cls, context, values):
return cls._from_db_object(context, cls(context),
db_api.stack_create(context, values))
@classmethod
def update_by_id(cls, context, stack_id, values):
"""Update and return (boolean) if it was updated.
Note: the underlying stack_update filters by current_traversal
and stack_id.
"""
return db_api.stack_update(context, stack_id, values)
@classmethod
def select_and_update(cls, context, stack_id, values, exp_trvsl=None):
"""Update the stack by selecting on traversal ID.
Uses UPDATE ... WHERE (compare and swap) to catch any concurrent
update problem.
If the stack is found with given traversal, it is updated.
If there occurs a race while updating, only one will succeed and
other will get return value of False.
"""
return db_api.stack_update(context, stack_id, values,
exp_trvsl=exp_trvsl)
@classmethod
def persist_state_and_release_lock(cls, context, stack_id,
engine_id, values):
return db_api.persist_state_and_release_lock(context, stack_id,
engine_id, values)
@classmethod
def delete(cls, context, stack_id):
db_api.stack_delete(context, stack_id)
def update_and_save(self, values):
has_updated = self.__class__.update_by_id(self._context,
self.id, values)
if not has_updated:
raise exception.NotFound(_('Attempt to update a stack with id: '
'%(id)s %(traversal)s %(msg)s') % {
'id': self.id,
'traversal': self.current_traversal,
'msg': 'that does not exist'})
def __eq__(self, another):
self.refresh() # to make test object comparison work well
return super(Stack, self).__eq__(another)
def __ne__(self, other):
return not self.__eq__(other)
def refresh(self):
db_stack = db_api.stack_get(
self._context, self.id, show_deleted=True)
if db_stack is None:
message = _('No stack exists with id "%s"') % str(self.id)
raise exception.NotFound(message)
return self.__class__._from_db_object(
self._context,
self,
db_stack
)
@classmethod
def encrypt_hidden_parameters(cls, tmpl):
raw_template.RawTemplate.encrypt_hidden_parameters(tmpl)
@classmethod
def get_status(cls, context, stack_id):
"""Return action and status for the given stack."""
return db_api.stack_get_status(context, stack_id)
def identifier(self):
"""Return an identifier for this stack."""
return identifier.HeatIdentifier(self.tenant, self.name, self.id)
@property
def tags(self):
return stack_tag.StackTagList.get(self._context, self.id)
| |
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# from theano.tensor.shared_randomstreams import RandomStreams
from .. import init
from .. import nonlinearities
from .. import utils
from ..theano_extensions import conv
from ..theano_extensions import padding
_srng = RandomStreams()
## Helper methods
def get_all_layers(layer):
"""
Function to gather all layers below the given layer (including the given layer)
"""
layers = [layer]
layers_to_expand = [layer]
while len(layers_to_expand) > 0:
current_layer = layers_to_expand.pop(0)
children = []
if hasattr(current_layer, 'input_layers'):
children = current_layer.input_layers
elif hasattr(current_layer, 'input_layer'):
children = [current_layer.input_layer]
# filter the layers that have already been visited.
children = [child for child in children if child not in layers]
layers_to_expand.extend(children)
layers.extend(children)
return layers
def get_all_params(layer):
layers = get_all_layers(layer)
params = sum([l.get_params() for l in layers], [])
return utils.unique(params)
def get_all_bias_params(layer):
layers = get_all_layers(layer)
params = sum([l.get_bias_params() for l in layers], [])
return utils.unique(params)
def get_all_non_bias_params(layer):
all_params = get_all_params(layer)
all_bias_params = get_all_bias_params(layer)
return [p for p in all_params if p not in all_bias_params]
def count_params(layer):
params = get_all_params(layer)
shapes = [p.get_value().shape for p in params]
counts = [np.prod(shape) for shape in shapes]
return sum(counts)
## Layer base class
class Layer(object):
def __init__(self, input_layer):
self.input_layer = input_layer
def get_params(self):
"""
Get all Theano variables that parameterize the layer.
"""
return []
def get_bias_params(self):
"""
Get all Theano variables that are bias parameters for the layer.
"""
return []
def get_output_shape(self):
input_shape = self.input_layer.get_output_shape()
return self.get_output_shape_for(input_shape)
def get_output(self, input=None, *args, **kwargs):
"""
input can be None, a Theano expression, or a dictionary mapping
layer instances to Theano expressions.
"""
if isinstance(input, dict) and (self in input):
return input[self] # this layer is mapped to an expression
else: # in all other cases, just pass the network input on to the next layer.
layer_input = self.input_layer.get_output(input, *args, **kwargs)
return self.get_output_for(layer_input, *args, **kwargs)
def get_output_shape_for(self, input_shape):
return input_shape # By default, the shape is assumed to be preserved.
# This means that layers performing elementwise operations, or other
# shape-preserving operations (such as normalization), only need to
# implement a single method, i.e. get_output_for().
def get_output_for(self, input, *args, **kwargs):
raise NotImplementedError
@staticmethod
def create_param(param, shape):
"""
Helper method to create Theano shared variables for
Layer parameters and to initialize them.
param: one of three things:
- a numpy array with the initial parameter values
- a Theano shared variable
- a function or callable that takes the desired
shape of the parameter array as its single
argument.
shape: the desired shape of the parameter array.
"""
if isinstance(param, np.ndarray):
if param.shape != shape:
raise RuntimeError("parameter array has shape %s, should be %s" % (param.shape, shape))
return theano.shared(param)
elif isinstance(param, theano.compile.SharedVariable):
# cannot check shape here, the shared variable might not be initialized correctly yet.
return param
elif hasattr(param, '__call__'):
arr = param(shape)
if not isinstance(arr, np.ndarray):
raise RuntimeError("cannot initialize parameters: the provided callable did not return a numpy array")
return theano.shared(utils.floatX(arr))
else:
raise RuntimeError("cannot initialize parameters: 'param' is not a numpy array, a Theano shared variable, or a callable")
class MultipleInputsLayer(Layer):
def __init__(self, input_layers):
self.input_layers = input_layers
def get_output_shape(self):
input_shapes = [input_layer.get_output_shape() for input_layer in self.input_layers]
return self.get_output_shape_for(input_shapes)
def get_output(self, input=None, *args, **kwargs):
if isinstance(input, dict) and (self in input):
return input[self] # this layer is mapped to an expression
else: # in all other cases, just pass the network input on to the next layer.
layer_inputs = [input_layer.get_output(input, *args, **kwargs) for input_layer in self.input_layers]
return self.get_output_for(layer_inputs, *args, **kwargs)
def get_output_shape_for(self, input_shapes):
raise NotImplementedError
def get_output_for(self, inputs, *args, **kwargs):
raise NotImplementedError
class InputLayer(Layer):
def __init__(self, shape):
self.shape = shape
ndim = len(shape)
# create the right TensorType for the given number of dimensions
input_var_type = T.TensorType(theano.config.floatX, [False] * ndim)
self.input_var = input_var_type("input")
def get_output_shape(self):
return self.shape
def get_output(self, input=None, *args, **kwargs):
if input is None:
return self.input_var
elif isinstance(input, theano.gof.Variable):
return input
elif isinstance(input, dict):
return input[self]
## Layer implementations
class DenseLayer(Layer):
def __init__(self, input_layer, num_units, W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify):
super(DenseLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_units = num_units
output_shape = self.input_layer.get_output_shape()
num_inputs = int(np.prod(output_shape[1:]))
self.W = self.create_param(W, (num_inputs, num_units))
self.b = self.create_param(b, (num_units,))
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, *args, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
return self.nonlinearity(T.dot(input, self.W) + self.b.dimshuffle('x', 0))
class DropoutLayer(Layer):
def __init__(self, input_layer, p=0.5, rescale=True):
super(DropoutLayer, self).__init__(input_layer)
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, *args, **kwargs):
if deterministic or self.p == 0:
return input
else:
retain_prob = 1 - self.p
if self.rescale:
input /= retain_prob
return input * utils.floatX(_srng.binomial(input.shape, p=retain_prob, dtype='int32'))
dropout = DropoutLayer # shortcut
class GaussianNoiseLayer(Layer):
def __init__(self, input_layer, sigma=0.1):
super(GaussianNoiseLayer, self).__init__(input_layer)
self.sigma = sigma
def get_output_for(self, input, deterministic=False, *args, **kwargs):
if deterministic or self.sigma == 0:
return input
else:
return input + _srng.normal(input.shape, avg=0.0, std=self.sigma)
## Convolutions
class Conv1DLayer(Layer):
def __init__(self, input_layer, num_filters, filter_length, stride=1, border_mode="valid", untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
convolution=conv.conv1d_mc0):
super(Conv1DLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_length = filter_length
self.stride = stride
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
self.W = self.create_param(W, self.get_W_shape())
if self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2]))
else:
self.b = self.create_param(b, (num_filters,))
def get_W_shape(self):
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_length)
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
if self.border_mode == 'valid':
output_length = (input_shape[2] - self.filter_length) // self.stride + 1
elif self.border_mode == 'full':
output_length = (input_shape[2] + self.filter_length) // self.stride - 1
elif self.border_mode == 'same':
output_length = input_shape[2] // self.stride
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
return (input_shape[0], self.num_filters, output_length)
def get_output_for(self, input, input_shape=None, *args, **kwargs):
# the optional input_shape argument is for when get_output_for is called
# directly with a different shape than the output_shape of self.input_layer.
if input_shape is None:
input_shape = self.input_layer.get_output_shape()
filter_shape = self.get_W_shape()
if self.border_mode in ['valid', 'full']:
conved = self.convolution(input, self.W, subsample=(self.stride,), image_shape=input_shape,
filter_shape=filter_shape, border_mode=self.border_mode)
elif self.border_mode == 'same':
conved = self.convolution(input, self.W, subsample=(self.stride,), image_shape=input_shape,
filter_shape=filter_shape, border_mode='full')
shift = (self.filter_length - 1) // 2
conved = conved[:, :, shift:input_shape[2] + shift]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
if self.untie_biases:
b_shuffled = self.b.dimshuffle('x', 0, 1)
else:
b_shuffled = self.b.dimshuffle('x', 0, 'x')
return self.nonlinearity(conved + b_shuffled)
class Conv2DLayer(Layer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode="valid", untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d):
super(Conv2DLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.strides = strides
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
self.W = self.create_param(W, self.get_W_shape())
if self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters,))
def get_W_shape(self):
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size[0], self.filter_size[1])
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
if self.border_mode == 'valid':
output_width = (input_shape[2] - self.filter_size[0]) // self.strides[0] + 1
output_height = (input_shape[3] - self.filter_size[1]) // self.strides[1] + 1
elif self.border_mode == 'full':
output_width = (input_shape[2] + self.filter_size[0]) // self.strides[0] - 1
output_height = (input_shape[3] + self.filter_size[1]) // self.strides[1] - 1
elif self.border_mode == 'same':
output_width = input_shape[2] // self.strides[0]
output_height = input_shape[3] // self.strides[1]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
return (input_shape[0], self.num_filters, output_width, output_height)
def get_output_for(self, input, input_shape=None, *args, **kwargs):
# the optional input_shape argument is for when get_output_for is called
# directly with a different shape than the output_shape of self.input_layer.
if input_shape is None:
input_shape = self.input_layer.get_output_shape()
filter_shape = self.get_W_shape()
if self.border_mode in ['valid', 'full']:
conved = self.convolution(input, self.W, subsample=self.strides, image_shape=input_shape,
filter_shape=filter_shape, border_mode=self.border_mode)
elif self.border_mode == 'same':
conved = self.convolution(input, self.W, subsample=self.strides, image_shape=input_shape,
filter_shape=filter_shape, border_mode='full')
shift_x = (self.filter_size[0] - 1) // 2
shift_y = (self.filter_size[1] - 1) // 2
conved = conved[:, :, shift_x:input_shape[2] + shift_x, shift_y:input_shape[3] + shift_y]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
if self.untie_biases:
b_shuffled = self.b.dimshuffle('x', 0, 1, 2)
else:
b_shuffled = self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(conved + b_shuffled)
# TODO: add Conv3DLayer
## Pooling
class MaxPool2DLayer(Layer):
def __init__(self, input_layer, ds, ignore_border=False):
super(MaxPool2DLayer, self).__init__(input_layer)
self.ds = ds # a tuple
self.ignore_border = ignore_border
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
if self.ignore_border:
output_shape[2] = int(np.floor(float(output_shape[2]) / self.ds[0]))
output_shape[3] = int(np.floor(float(output_shape[3]) / self.ds[1]))
else:
output_shape[2] = int(np.ceil(float(output_shape[2]) / self.ds[0]))
output_shape[3] = int(np.ceil(float(output_shape[3]) / self.ds[1]))
return tuple(output_shape)
def get_output_for(self, input, *args, **kwargs):
return downsample.max_pool_2d(input, self.ds, self.ignore_border)
# TODO: add reshape-based implementation to MaxPool2DLayer
# TODO: add MaxPool1DLayer
# TODO: add MaxPool3DLayer
class FeaturePoolLayer(Layer):
"""
Pooling across feature maps. This can be used to implement maxout.
IMPORTANT: this layer requires that the number of feature maps is
a multiple of the pool size.
"""
def __init__(self, input_layer, ds, axis=1, pool_function=T.max):
"""
ds: the number of feature maps to be pooled together
axis: the axis along which to pool. The default value of 1 works
for DenseLayer and Conv*DLayers
pool_function: the pooling function to use
"""
super(FeaturePoolLayer, self).__init__(input_layer)
self.ds = ds
self.axis = axis
self.pool_function = pool_function
num_feature_maps = self.input_layer.get_output_shape()[self.axis]
if num_feature_maps % self.ds != 0:
raise RuntimeError("Number of input feature maps (%d) is not a multiple of the pool size (ds=%d)" %
(num_feature_maps, self.ds))
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # make a mutable copy
output_shape[self.axis] = output_shape[self.axis] // self.ds
return tuple(output_shape)
def get_output_for(self, input, *args, **kwargs):
num_feature_maps = input.shape[self.axis]
num_feature_maps_out = num_feature_maps // self.ds
pool_shape = ()
for k in range(self.axis):
pool_shape += (input.shape[k],)
pool_shape += (num_feature_maps_out, self.ds)
for k in range(self.axis + 1, input.ndim):
pool_shape += (input.shape[k],)
input_reshaped = input.reshape(pool_shape)
return self.pool_function(input_reshaped, axis=self.axis + 1)
class FeatureWTALayer(Layer):
"""
Perform 'Winner Take All' across feature maps: zero out all but
the maximal activation value within a group of features.
IMPORTANT: this layer requires that the number of feature maps is
a multiple of the pool size.
"""
def __init__(self, input_layer, ds, axis=1):
"""
ds: the number of feature maps per group. This is called 'ds'
for consistency with the pooling layers, even though this
layer does not actually perform a downsampling operation.
axis: the axis along which the groups are formed.
"""
super(FeatureWTALayer, self).__init__(input_layer)
self.ds = ds
self.axis = axis
num_feature_maps = self.input_layer.get_output_shape()[self.axis]
if num_feature_maps % self.ds != 0:
raise RuntimeError("Number of input feature maps (%d) is not a multiple of the group size (ds=%d)" %
(num_feature_maps, self.ds))
def get_output_for(self, input, *args, **kwargs):
num_feature_maps = input.shape[self.axis]
num_pools = num_feature_maps // self.ds
pool_shape = ()
arange_shuffle_pattern = ()
for k in range(self.axis):
pool_shape += (input.shape[k],)
arange_shuffle_pattern += ('x',)
pool_shape += (num_pools, self.ds)
arange_shuffle_pattern += ('x', 0)
for k in range(self.axis + 1, input.ndim):
pool_shape += (input.shape[k],)
arange_shuffle_pattern += ('x',)
input_reshaped = input.reshape(pool_shape)
max_indices = T.argmax(input_reshaped, axis=self.axis + 1, keepdims=True)
arange = T.arange(self.ds).dimshuffle(*arange_shuffle_pattern)
mask = T.eq(max_indices, arange).reshape(input.shape)
return input * mask
## Network in network
class NINLayer(Layer):
"""
Like DenseLayer, but broadcasting across all trailing dimensions beyond the 2nd.
This results in a convolution operation with filter size 1 on all trailing dimensions.
Any number of trailing dimensions is supported, so NINLayer can be used to implement
1D, 2D, 3D, ... convolutions.
"""
def __init__(self, input_layer, num_units, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify):
super(NINLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_units = num_units
self.untie_biases = untie_biases
output_shape = self.input_layer.get_output_shape()
num_input_channels = output_shape[1]
self.W = self.create_param(W, (num_input_channels, num_units))
if self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_units,) + output_shape[2:])
else:
self.b = self.create_param(b, (num_units,))
def get_params(self):
return [self.W, self.b]
def get_bias_params(self):
return [self.b]
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units) + input_shape[2:]
def get_output_for(self, input, *args, **kwargs):
out_r = T.tensordot(self.W, input, axes=[[0], [1]]) # cf * bc01... = fb01...
remaining_dims = range(2, input.ndim) # input dims to broadcast over
out = out_r.dimshuffle(1, 0, *remaining_dims) # bf01...
if self.untie_biases:
remaining_dims_biases = range(1, input.ndim - 1) # no broadcast
else:
remaining_dims_biases = ['x'] * (input.ndim - 2) # broadcast
b_shuffled = self.b.dimshuffle('x', 0, *remaining_dims_biases)
return self.nonlinearity(out + b_shuffled)
class GlobalPoolLayer(Layer):
"""
Layer that pools globally across all trailing dimensions beyond the 2nd.
"""
def __init__(self, input_layer, pool_function=T.mean):
super(GlobalPoolLayer, self).__init__(input_layer)
self.pool_function = pool_function
def get_output_shape_for(self, input_shape):
return input_shape[:2]
def get_output_for(self, input, *args, **kwargs):
return self.pool_function(input.flatten(3), axis=2)
## Shape modification
class FlattenLayer(Layer):
def get_output_shape_for(self, input_shape):
return (input_shape[0], int(np.prod(input_shape[1:])))
def get_output_for(self, input, *args, **kwargs):
return input.flatten(2)
flatten = FlattenLayer # shortcut
class ConcatLayer(MultipleInputsLayer):
def __init__(self, input_layers, axis=1):
super(ConcatLayer, self).__init__(input_layers)
self.axis = axis
def get_output_shape_for(self, input_shapes):
sizes = [input_shape[self.axis] for input_shape in input_shapes]
output_shape = list(input_shapes[0]) # make a mutable copy
output_shape[self.axis] = sum(sizes)
return tuple(output_shape)
def get_output_for(self, inputs, *args, **kwargs):
# unfortunately the gradient of T.concatenate has no GPU
# implementation, so we have to do this differently.
# Else, we could just do:
# return T.concatenate(inputs, axis=self.axis)
concat_size = sum(input.shape[self.axis] for input in inputs)
output_shape = ()
for k in range(self.axis):
output_shape += (inputs[0].shape[k],)
output_shape += (concat_size,)
for k in range(self.axis + 1, inputs[0].ndim):
output_shape += (inputs[0].shape[k],)
out = T.zeros(output_shape)
offset = 0
for input in inputs:
indices = ()
for k in range(self.axis):
indices += (slice(None),)
indices += (slice(offset, offset + input.shape[self.axis]),)
for k in range(self.axis + 1, inputs[0].ndim):
indices += (slice(None),)
out = T.set_subtensor(out[indices], input)
offset += input.shape[self.axis]
return out
concat = ConcatLayer # shortcut
class PadLayer(Layer):
def __init__(self, input_layer, width, val=0, batch_ndim=2):
super(PadLayer, self).__init__(input_layer)
self.width = width
self.val = val
self.batch_ndim = batch_ndim
def get_output_shape_for(self, input_shape):
output_shape = ()
for k, s in enumerate(input_shape):
if k < self.batch_ndim:
output_shape += (s,)
else:
output_shape += (s + 2 * self.width,)
return output_shape
def get_output_for(self, input, *args, **kwargs):
return padding.pad(input, self.width, self.val, self.batch_ndim)
pad = PadLayer # shortcut
| |
import socket
import ctds
from .base import TestExternalDatabase
from .compat import long_, PY3, unicode_
class TestTdsConnection(TestExternalDatabase):
def test__doc__(self):
self.assertEqual(
ctds.connect.__doc__,
# pylint: disable=line-too-long
'''\
connect(server, port=1433, instance=None, user='', password='', database=None, \
appname='ctds', hostname=None, login_timeout=5, timeout=5, tds_version=None, \
autocommit=False, ansi_defaults=True, enable_bcp=True, paramstyle=None, \
read_only=False, ntlmv2=False)
Connect to a database.
.. note::
:py:meth:`ctds.Connection.close` should be called when the returned
connection object is no longer required.
:pep:`0249#connect`
.. versionadded:: 1.6
`paramstyle`
.. versionadded:: 1.6
`read_only`
.. versionadded:: 1.8
`hostname`
.. versionadded:: 1.8
`ntlmv2`
:param str server: The database server host.
:param int port: The database server port. This value is ignored if
`instance` is provided.
:param str instance: An optional database instance to connect to.
:param str user: The database server username.
:param str password: The database server password.
:param str database: An optional database to initially connect to.
:param str appname: An optional application name to associate with
the connection.
:param str hostname: An optional client host name to associate with
the connection instead of the local device hostname.
:param int login_timeout: An optional login timeout, in seconds.
:param int timeout: An optional timeout for database requests, in
seconds.
:param str tds_version: The TDS protocol version to use. If
:py:data:`None` is specified, the highest version supported by
FreeTDS will be used.
:param bool autocommit: Autocommit transactions on the connection.
:param bool ansi_defaults: Set `ANSI_DEFAULTS` and related settings to
mimic ODBC drivers.
:param bool enable_bcp: Enable bulk copy support on the connection. This
is required for :py:meth:`.bulk_insert` to function.
:param str paramstyle: Override the default :py:data:`ctds.paramstyle` value for
this connection. Supported values: `numeric`, `named`.
:param bool read_only: Indicate 'read-only' application intent.
:param bool ntlmv2: Enable NTLMv2 authentication.
:return: A new `Connection` object connected to the database.
:rtype: Connection
'''
)
def test_arguments(self):
for kwargs in (
{'port': ''},
{'port': '1234'},
{'port': None},
{'instance': 123},
{'user': 123},
{'password': 123},
{'database': 123},
{'appname': 123},
{'login_timeout': ''},
{'login_timeout': '1234'},
{'login_timeout': None},
{'timeout': ''},
{'timeout': '1234'},
{'timeout': None},
{'tds_version': 123},
):
self.assertRaises(
TypeError,
ctds.connect,
'127.0.0.1', # use an IP to avoid DNS lookup timeouts
**kwargs
)
def test_typeerror(self):
def string_case(name):
cases = [
(('127.0.0.1',), {name: 1234}),
(('127.0.0.1',), {name: object()}),
]
if PY3: # pragma: nocover
cases.append((('127.0.0.1',), {name: b'1234'}))
return cases
def uint_case(name):
return [
(('127.0.0.1',), {name: '1234'}),
(('127.0.0.1',), {name: unicode_('1234')}),
(('127.0.0.1',), {name: b'1234'}),
(('127.0.0.1',), {name: None}),
(('127.0.0.1',), {name: object()}),
]
def bool_case(name):
return [
(('127.0.0.1',), {name: 'False'}),
(('127.0.0.1',), {name: 0}),
(('127.0.0.1',), {name: 1}),
(('127.0.0.1',), {name: None}),
]
cases = (
[
((None,), {}),
((1,), {},),
] +
uint_case('port') +
string_case('instance') +
string_case('user') +
string_case('password') +
string_case('database') +
string_case('appname') +
string_case('hostname') +
uint_case('login_timeout') +
uint_case('timeout') +
string_case('tds_version') +
bool_case('autocommit') +
bool_case('ansi_defaults') +
bool_case('enable_bcp') +
string_case('paramstyle') +
bool_case('read_only') +
bool_case('ntlmv2')
)
for args, kwargs in cases:
try:
connection = ctds.connect(*args, **kwargs)
connection.close() # pragma: nocover
except TypeError:
pass
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_tds_version(self):
for tds_version in (
'7',
'7.13',
'7.30'
):
try:
connection = ctds.connect('hostname', tds_version=tds_version)
connection.close() # pragma: nocover
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'unsupported TDS version "{0}"'.format(tds_version))
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_paramstyle(self):
for paramstyle in (
'qmark',
'NUMERIC',
'nAmed',
'unknown'
):
try:
connection = ctds.connect('hostname', paramstyle=paramstyle)
connection.close() # pragma: nocover
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'unsupported paramstyle "{0}"'.format(paramstyle))
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_interfaceerror(self):
for kwargs in (
{'user': '*' * 256},
{'password': '*' * 256},
{'appname': '*' * 256},
{'hostname': '*' * 256},
):
try:
connection = ctds.connect('hostname', **kwargs)
connection.close() # pragma: nocover
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), next(iter(kwargs.values())))
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_error_unavailable(self):
host = '127.0.0.1' # use an IP to avoid DNS lookup timeouts
try:
ctds.connect(
host,
login_timeout=1,
tds_version='7.1',
port=self.get_option('port', int) + 1000
)
except ctds.OperationalError as ex:
# FreeTDS version 0.95+ adds a (<host>:<port) to this error.
self.assertTrue(
'Unable to connect: Adaptive Server is unavailable or does not exist' in str(ex)
)
self.assertEqual(ex.severity, 9)
self.assertEqual(ex.db_error['number'], 20009)
self.assertTrue(
'Unable to connect: Adaptive Server is unavailable or does not exist' in ex.db_error['description']
)
# Sepcific errors vary by platform and FreeTDS version.
self.assertTrue(isinstance(ex.os_error['description'], unicode_))
self.assertTrue(isinstance(ex.os_error['number'], long_))
self.assertEqual(ex.last_message, None)
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_error_login(self):
for username, password in (
(self.get_option('user'), self.get_option('password') + 'invalid'),
(self.get_option('user') + 'invalid', self.get_option('password')),
):
try:
ctds.connect(
self.get_option('server'),
port=self.get_option('port', type_=int),
instance=self.get_option('instance'),
user=username,
password=password,
tds_version='7.1'
)
except ctds.OperationalError as ex:
msg = "Login failed for user '{0}'.".format(username)
self.assertEqual(
str(ex),
msg
)
self.assertEqual(ex.severity, 9)
self.assertEqual(ex.db_error['number'], 20002)
# FreeTDS version 0.95+ adds a (<host>:<port) to this error.
self.assertTrue(
'Adaptive Server connection failed' in ex.db_error['description']
)
self.assertEqual(ex.os_error, None)
self.assertTrue(self.server_name_and_instance in ex.last_message.pop('server'))
self.assertEqual(ex.last_message, {
'description': msg,
'line': 1,
'number': 18456,
'proc': '',
'severity': 14,
'state': 1
})
else:
self.fail('.connect() did not fail as expected') # pragma: nocover
def test_appname(self):
with self.connect(appname='test_appname') as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT APP_NAME();
'''
)
self.assertEqual('test_appname', cursor.fetchone()[0])
def test_autocommit(self):
with self.connect(autocommit=True) as connection:
self.assertEqual(connection.autocommit, True)
with connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM sys.objects')
cursor.execute('SELECT @@TRANCOUNT')
self.assertEqual(0, cursor.fetchone()[0])
# Verify the IMPLICIT_TRANSACTIONS setting is OFF.
cursor.execute('SELECT @@OPTIONS')
self.assertFalse(self.IMPLICIT_TRANSACTIONS & cursor.fetchone()[0])
with self.connect(autocommit=False) as connection:
self.assertEqual(connection.autocommit, False)
with connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM sys.objects')
cursor.execute('SELECT @@TRANCOUNT')
self.assertEqual(1, cursor.fetchone()[0])
# Verify the IMPLICIT_TRANSACTIONS setting is ON.
cursor.execute('SELECT @@OPTIONS')
self.assertTrue(self.IMPLICIT_TRANSACTIONS & cursor.fetchone()[0])
connection.rollback()
cursor.execute('SELECT @@TRANCOUNT')
self.assertEqual(0, cursor.fetchone()[0])
# Verify the IMPLICIT_TRANSACTIONS setting is ON.
cursor.execute('SELECT @@OPTIONS')
self.assertTrue(self.IMPLICIT_TRANSACTIONS & cursor.fetchone()[0])
def test_ansi_defaults(self):
for autocommit in (True, False):
with self.connect(ansi_defaults=True, autocommit=autocommit) as connection:
with connection.cursor() as cursor:
cursor.execute('SELECT @@OPTIONS')
options = cursor.fetchone()[0]
self.assertEqual(bool(self.IMPLICIT_TRANSACTIONS & options), not autocommit)
self.assertTrue(self.CURSOR_CLOSE_ON_COMMIT & options)
self.assertTrue(self.ANSI_PADDING & options)
self.assertTrue(self.ANSI_NULLS & options)
self.assertTrue(self.ARITHABORT & options)
self.assertFalse(self.ARITHIGNORE & options)
self.assertTrue(self.QUOTED_IDENTIFIER & options)
self.assertTrue(self.ANSI_NULL_DFLT_ON & options)
self.assertFalse(self.ANSI_NULL_DFLT_OFF & options)
with self.connect(ansi_defaults=False, autocommit=autocommit) as connection:
with connection.cursor() as cursor:
cursor.execute('SELECT @@OPTIONS')
options = cursor.fetchone()[0]
self.assertEqual(bool(self.IMPLICIT_TRANSACTIONS & options), not autocommit)
self.assertFalse(self.CURSOR_CLOSE_ON_COMMIT & options)
self.assertFalse(self.ANSI_PADDING & options)
self.assertFalse(self.ANSI_NULLS & options)
self.assertFalse(self.ARITHABORT & options)
self.assertFalse(self.ARITHIGNORE & options)
self.assertFalse(self.QUOTED_IDENTIFIER & options)
self.assertFalse(self.ANSI_NULL_DFLT_ON & options)
self.assertFalse(self.ANSI_NULL_DFLT_OFF & options)
def test_hostname(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT hostname
FROM master..sysprocesses
WHERE spid = @@SPID;
'''
)
self.assertEqual(
socket.gethostname(),
cursor.fetchone()[0].rstrip() # SQL Server pads the column
)
with self.connect(hostname='test_hostname') as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT hostname
FROM master..sysprocesses
WHERE spid = @@SPID;
'''
)
self.assertEqual(
'test_hostname',
cursor.fetchone()[0].rstrip() # SQL Server pads the column
)
def test_read_only(self):
try:
with self.connect(read_only=True):
pass
except NotImplementedError:
self.assertFalse(self.read_only_intent_supported)
else:
self.assertTrue(self.read_only_intent_supported)
def test_ntlmv2(self):
try:
with self.connect(ntlmv2=True):
pass
except NotImplementedError:
self.assertFalse(self.ntlmv2_supported)
else:
self.assertTrue(self.ntlmv2_supported)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os.path
import tornado.ioloop
import tornado.web
import tornado.wsgi
import HTMLParser
import requests
import json
import base64
import re
import time
from random import random
from urlparse import urlparse
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
def get_html(url):
r = requests.get(url)
return r.text
get_decoded_html = get_html
class SohuExtractor:
def __init__(self, url):
self.url = url
def real_url(self, host, vid, tvid, new, clipURL, ck):
url = 'http://'+host+'/?prot=9&prod=flash&pt=1&file='+clipURL+'&new='+new +'&key='+ ck+'&vid='+str(vid)+'&uid='+str(int(time.time()*1000))+'&t='+str(random())+'&rb=1'
return json.loads(get_html(url))['url']
def extract(self, meta, quality="highVid"):
if re.match(r'http://share.vrs.sohu.com', self.url):
vid = r1('id=(\d+)', url)
else:
html = get_html(self.url)
vid = r1(r'\Wvid\s*[\:=]\s*[\'"]?(\d+)[\'"]?', html)
assert vid
infos = []
if re.match(r'http://tv.sohu.com/', self.url):
info = json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % vid))
for qtyp in ["oriVid","superVid","highVid" ,"norVid","relativeId"]:
hqvid = info['data'][qtyp]
if hqvid != 0 and hqvid != vid :
info = json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % hqvid))
host = info['allot']
prot = info['prot']
tvid = info['tvid']
urls = []
data = info['data']
title = data['tvName']
size = sum(data['clipsBytes'])
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
if meta:
item = {
"quality": qtyp,
"totalDuration": data["totalDuration"],
"totalBytes": data["totalBytes"],
"clipsBytes": data["clipsBytes"],
"clipsDuration": data["clipsDuration"]
}
infos.append(item)
continue
if quality == qtyp:
for new,clip,ck, in zip(data['su'], data['clipsURL'], data['ck']):
clipURL = urlparse(clip).path
urls.append(self.real_url(host,hqvid,tvid,new,clipURL,ck))
item = {
"urls": urls,
"quality": qtyp,
"totalDuration": data["totalDuration"],
"totalBytes": data["totalBytes"],
"clipsBytes": data["clipsBytes"],
"clipsDuration": data["clipsDuration"]
}
return item
return infos
class DetailInfoHtmlParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.current_key = {"name": "", "deep": 0}
self.detail = {}
self.urls = []
self.attr_url_name = False
self.current_url = {}
self.current_source = {}
self.is_end = False
def output(self):
return self.detail
def is_tag_start(self, tag, attrs, d_tag, d_variable, d_attr):
return tag == d_tag and len(filter(lambda va: len(va) >= 2 and va[0] == d_variable and va[1] == d_attr, attrs)) == 1
def get_attr_by(self, tag, attrs, d_tag, d_variable):
if tag == d_tag:
ret = filter(lambda va: len(va) >= 2 and va[0] == d_variable, attrs)
if len(ret) == 1:
return ret[0][1]
return ""
def add_sources(self, source, urls):
u = filter(lambda url: url["site"]==source["site"], urls)
if not self.detail.get("sources", None):
self.detail["sources"] = []
self.detail.setdefault("status", source["status"])
s = {"site": source["site"], "name": source["title"], "icon": source["icon"], "status": source["status"]}
s["episodes"] = []
for i in u:
s["episodes"].append({"url": i["url"], "title": i["name"]})
self.detail["sources"].append(s)
def handle_starttag(self, tag, attrs):
if self.is_end:
return
if self.is_tag_start(tag, attrs, "li", "class", "base_name"):
self.current_key["name"] = "base_name"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "base_pub"):
self.current_key["name"] = "base_pub"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "base_what"):
self.current_key["name"] = "base_what"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "p_thumb"):
self.current_key["name"] = "p_thumb"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "long"):
if not self.detail.get("actors", None):
self.current_key["name"] = "long"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "ul", "class", "linkpanel"):
self.current_key["name"] = "linkpanel"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "ul", "class", "other"):
self.current_key["name"] = "source"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "div", "class", "source source_one"):
self.current_key["name"] = "source_one"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "div", "class", "rating"):
self.current_key["name"] = "rating"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "div", "class", "intro"):
self.is_end = True
return
else:
if self.current_key["deep"] > 0:
self.current_key["deep"] += 1
if self.current_key["name"] == "p_thumb" and self.current_key["deep"] > 1:
img = self.get_attr_by(tag, attrs, "img", "src")
if img != "":
self.detail["thumb"] = img
elif self.current_key["name"] == "linkpanel" and self.current_key["deep"] == 3:
href = self.get_attr_by(tag, attrs, "a", "href")
site = self.get_attr_by(tag, attrs, "a", "site")
if href != "" and site != "":
self.attr_url_name = True
self.current_url = {"url": href, "site": site}
elif self.current_key["name"] == "source_one" and self.current_key["deep"] > 1:
site = self.get_attr_by(tag, attrs, "div", "name")
title = self.get_attr_by(tag, attrs, "div", "title")
src = self.get_attr_by(tag, attrs, "label", "_src")
status = self.get_attr_by(tag, attrs, "input", "title")
if site != "":
self.current_source["site"] = site
if title != "":
self.current_source["title"] = title
if status != "":
self.current_source["status"] = status
if src != "":
self.current_source["icon"] = src
self.add_sources(self.current_source, self.urls)
self.current_source = {}
elif self.current_key["name"] == "source" and self.current_key["deep"] > 1:
site = self.get_attr_by(tag, attrs, "li", "name")
title = self.get_attr_by(tag, attrs, "img", "title")
src = self.get_attr_by(tag, attrs, "img", "src")
status = self.get_attr_by(tag, attrs, "label", "title")
if site != "":
self.current_source["site"] = site
if title != "":
self.current_source["title"] = title
if src != "":
self.current_source["icon"] = src
if status != "":
self.current_source["status"] = status
self.add_sources(self.current_source, self.urls)
self.current_source = {}
def handle_endtag(self, tag):
if self.is_end:
return
if self.current_key["deep"] > 0:
self.current_key["deep"] -= 1
def handle_data(self, data):
if self.is_end:
return
if self.current_key["deep"] > 1 and self.current_key["name"] == "base_name":
self.detail["name"] = data.strip()
elif self.current_key["deep"] >= 1 and self.current_key["name"] == "base_pub":
self.detail["pub"] = data.strip()
elif self.current_key["deep"] >= 1 and self.current_key["name"] == "base_what":
self.detail["sum"] = data.strip()
elif self.current_key["deep"] > 1 and self.current_key["name"] == "long":
if data.strip() == "/" or data.strip() == "":
return
if self.detail.get("actors", None):
self.detail["actors"].append(data)
else:
self.detail["actors"] = [data,]
elif self.current_key["deep"] > 1 and self.current_key["name"] == "rating":
self.detail.setdefault("rating", data)
elif self.attr_url_name:
self.current_url["name"] = data
self.urls.append(self.current_url)
self.attr_url_name = False
self.current_url = {}
class TeleListHtmlParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.tele_list = []
self.current_tele = {}
self.current_key = {"name": "", "deep": 0}
def output(self):
return self.tele_list
def is_tag_start(self, tag, attrs, d_tag, d_variable, d_attr):
return tag == d_tag and len(filter(lambda va: len(va) >= 2 and va[0] == d_variable and va[1] == d_attr, attrs)) == 1
def get_attr_by(self, tag, attrs, d_tag, d_variable):
if tag == d_tag:
ret = filter(lambda va: len(va) >= 2 and va[0] == d_variable, attrs)
if len(ret) == 1:
return ret[0][1]
return ""
def handle_starttag(self, tag, attrs):
if self.is_tag_start(tag, attrs, "li", "class", "p_title"):
self.current_key["name"] = "p_title"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "p_thumb"):
self.current_key["name"] = "p_thumb"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "p_actor"):
self.current_key["name"] = "p_actor"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "p_rating"):
self.current_key["name"] = "p_rating"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "li", "class", "p_link"):
self.current_key["name"] = "p_link"
self.current_key["deep"] = 1
elif self.is_tag_start(tag, attrs, "div", "class", "source source_one"):
self.current_key["name"] = "source source_one"
self.current_key["deep"] = 1
else:
if self.current_key["deep"] > 0:
self.current_key["deep"] += 1
if self.current_key["name"] == "p_thumb" and self.current_key["deep"] > 1:
img = self.get_attr_by(tag, attrs, "img", "original")
if img != "":
self.current_tele["thumb"] = img
elif self.current_key["name"] == "p_link" and self.current_key["deep"] > 1:
link = self.get_attr_by(tag, attrs, "a", "href")
if link != "":
self.current_tele["detail"] = link
elif self.current_key["name"] == "source source_one" and self.current_key["deep"] > 1:
if self.current_tele.get("status", None):
if self.current_tele.get("title", None):
self.tele_list.append(self.current_tele)
self.current_tele = {}
else:
status = self.get_attr_by(tag, attrs, "a", "status")
if status != "" and self.current_tele.get("title", None):
self.current_tele["status"] = status
def handle_endtag(self, tag):
if self.current_key["deep"] > 0:
self.current_key["deep"] -= 1
# if self.current_key["deep"] > 0:
# print("handle_endtag: \n" + str(self.current_key))
def handle_data(self, data):
if self.current_key["deep"] > 1 and self.current_key["name"] == "p_title":
self.current_tele["title"] = data
elif self.current_key["deep"] > 1 and self.current_key["name"] == "p_actor":
if self.current_tele.get("actors", None):
self.current_tele["actors"].append(data)
else:
self.current_tele["actors"] = [data,]
elif self.current_key["deep"] >= 1 and self.current_key["name"] == "p_rating":
if self.current_tele.get("rating", None):
self.current_tele["rating"] += data
else:
self.current_tele["rating"] = data
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class APIHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world api")
class SearchHandler(tornado.web.RequestHandler):
'''Search http://www.soku.com
'''
def get(self):
self.write("Hello, world Search")
class TeleplayListHandler(tornado.web.RequestHandler):
def get(self):
r = requests.get("http://www.soku.com/channel/teleplaylist_0_0_0_1_"+str(self.get_query_argument("page",1))+".html")
parser = TeleListHtmlParser()
parser.feed(r.text)
parser.close()
self.set_header('Content-Type', 'application/json')
response_str = json.dumps({"err":0, "msg":"", "data":parser.output()})
self.set_header("Content-Length", str(len(response_str)))
self.write(response_str)
class AnimeListHandler(tornado.web.RequestHandler):
def get(self):
r = requests.get("http://www.soku.com/channel/animelist_0_0_0_1_"+str(self.get_query_argument("page",1))+".html")
parser = TeleListHtmlParser()
parser.feed(r.text)
parser.close()
self.set_header('Content-Type', 'application/json')
response_str = json.dumps({"err":0, "msg":"", "data":parser.output()})
self.set_header("Content-Length", str(len(response_str)))
self.write(response_str)
class DetailHandler(tornado.web.RequestHandler):
def get(self, detail):
r = requests.get("http://www.soku.com/detail/show/" + detail)
parser = DetailInfoHtmlParser()
parser.feed(r.text)
parser.close()
self.set_header('Content-Type', 'application/json')
response_str = json.dumps({"err":0, "msg":"", "data":parser.output()})
self.set_header("Content-Length", str(len(response_str)))
self.write(response_str)
class VideosHandler(tornado.web.RequestHandler):
'''Extract videos api
https://youplay.avosapps.com/api/v1/videos/{base64 url}
example:
https://youplay.avosapps.com/api/v1/videos/aHR0cDovL3R2LnNvaHUuY29tLzIwMTUxMTAzL240MjUxNTgwMDQuc2h0bWw=
'''
def get(self, url):
origin_url = base64.b64decode(url.split('/')[0])
meta = int(self.get_query_argument("meta", "0"))
quality = self.get_query_argument("quality", None)
self.set_header('Content-Type', 'application/json')
sohu = SohuExtractor(origin_url)
response_str = json.dumps({"err":0, "msg":"", "data":sohu.extract(meta, quality)})
self.set_header("Content-Length", str(len(response_str)))
self.write(response_str)
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "public"),
"template_path": os.path.join(os.path.dirname(__file__), "views"),
"gzip": True,
"debug": True
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/api/v1", APIHandler),
(r"/api/v1/videos/(.*)", VideosHandler),
(r"/api/v1/search", SearchHandler),
(r"/api/v1/detail/show/(.*)", DetailHandler),
(r"/api/v1/channel/teleplaylist", TeleplayListHandler),
(r"/api/v1/channel/animelist", AnimeListHandler)
], **settings)
app = tornado.wsgi.WSGIAdapter(application)
def main():
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import supervisor
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
return summary_iterator.summary_iterator(event_paths[-1])
class SupervisorTest(test.TestCase):
def _test_dir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if checkpoint_management.checkpoint_exists(pattern):
return
else:
if len(gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = self._test_dir("basics")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._test_dir("managed_session")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._test_dir("managed_user_error")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._test_dir("managed_out_of_range")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise errors_impl.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
sess.run(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._test_dir("managed_not_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEquals(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = summary_iterator.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = self._test_dir("managed_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._test_dir("session_config")
with ops.Graph().as_default():
with ops.device("/cpu:1"):
my_op = constant_op.constant([1.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=config_pb2.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._test_dir("can_write")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._test_dir("explicit_no_summary_writer")
with ops.Graph().as_default():
variables.Variable([1.0], name="foo")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._test_dir("explicit_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sw = writer.FileWriter(logdir)
sv = supervisor.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with ops.Graph().as_default():
variables.Variable([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with ops.Graph().as_default():
variables.Variable([1.0, 2.0, 3.0])
sm = session_manager_lib.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = supervisor.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
def testInitOp(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitFn(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = supervisor.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpWithFeedDict(self):
logdir = self._test_dir("feed_dict_init_op")
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sv = supervisor.Supervisor(
logdir=logdir,
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testReadyForLocalInitOp(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.Variable(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.Variable(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.Variable(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = self._test_dir("default_local_init_op")
with ops.Graph().as_default():
# A local variable.
v = variables.Variable(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = variables.Variable([4, 5, 6], trainable=False, collections=[])
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._test_dir("default_local_init_op_non_chief")
with ops.Graph().as_default():
with ops.device("/job:localhost"):
# A local variable.
v = variables.Variable(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails")
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
variables.Variable([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails_for_local_variable")
with ops.Graph().as_default():
v = variables.Variable(
[1.0, 2.0, 3.0],
name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
variables.Variable(
[1.0, 2.0, 3.0],
name="w",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testSetupFail(self):
logdir = self._test_dir("setup_fail")
with ops.Graph().as_default():
variables.Variable([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
supervisor.Supervisor(logdir=logdir, is_chief=False)
with ops.Graph().as_default(), ops.device("/job:ps"):
variables.Variable([1.0, 2.0, 3.0], name="v")
supervisor.Supervisor(logdir=logdir, is_chief=False)
def testDefaultGlobalStep(self):
logdir = self._test_dir("default_global_step")
with ops.Graph().as_default():
variables.Variable(287, name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
def testRestoreFromMetaGraph(self):
logdir = self._test_dir("restore_from_meta_graph")
with ops.Graph().as_default():
variables.Variable(1, name="v0")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with ops.Graph().as_default():
new_saver = saver_lib.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = supervisor.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
def testStandardServicesWithoutGlobalStep(self):
logdir = self._test_dir("standard_services_without_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.Variable([1.0], name="foo")
summary.scalar("v", v[0])
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([10.10], name="foo")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, v.eval()[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
def testStandardServicesWithGlobalStep(self):
logdir = self._test_dir("standard_services_with_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.Variable([123], name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(event_pb2.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", first.summary)
self.assertEquals(123, second.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", second.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([-12], name="global_step")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, v.eval()[0])
def testNoQueueRunners(self):
with ops.Graph().as_default(), self.cached_session() as sess:
sv = supervisor.Supervisor(logdir=self._test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._test_dir("prepare_after_stop_chief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._test_dir("prepare_after_stop_nonchief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
test.main()
| |
from datetime import datetime
from StringIO import StringIO
import requests
from dateutil import parser
from granoclient.loader import Loader
BOOL_TRUISH = ['t', 'true', 'yes', 'y', '1']
def is_empty(value):
if value is None or not len(value.strip()):
return True
return False
class RowException(Exception):
pass
class MappingException(Exception):
pass
class ObjectMapper(object):
def __init__(self, name, model):
self.name = name
self.model = model
def convert_type(self, value, spec):
""" Some well-educated format guessing. """
data_type = spec.get('type', 'string').lower().strip()
if data_type in ['bool', 'boolean']:
return value.lower() in BOOL_TRUISH
elif data_type in ['int', 'integer']:
try:
return int(value)
except (ValueError, TypeError):
return None
elif data_type in ['float', 'decimal', 'real']:
try:
return float(value)
except (ValueError, TypeError):
return None
elif data_type in ['date', 'datetime', 'timestamp']:
if 'format' in spec:
format_list = self._get_date_format_list(spec.get('format'))
if format_list is None:
raise MappingException(
'%s format mapping is not valid: %r' %
(spec.get('column'), spec.get('format'))
)
for format, precision in format_list:
try:
return {'value': datetime.strptime(value, format),
'value_precision': precision}
except (ValueError, TypeError):
pass
return None
else:
try:
return parser.parse(value)
except (ValueError, TypeError):
return None
elif data_type == 'file':
try:
return self._get_file(value)
except:
raise
return value
def _get_date_format_list(self, format_value, precision=None):
if isinstance(format_value, basestring):
return [(format_value, precision)]
elif isinstance(format_value, list):
return [(fv, precision) for fv in format_value]
elif isinstance(format_value, dict):
format_list = []
# try the most precise format first
for key in ('time', 'day', 'month', 'year'):
if key not in format_value:
continue
format_list.extend(self._get_date_format_list(
format_value[key],
precision=key
))
return format_list
return None
@property
def columns(self):
for column in self.model.get('columns'):
if 'default' in column:
column['required'] = False
if column.get('skip_empty'):
column['required'] = False
yield self._patch_column(column)
def _patch_column(self, column):
return column
def _get_file(self, url):
response = requests.get(url)
file_like_obj = StringIO(response.content)
file_like_obj.name = url
return file_like_obj
def get_value(self, spec, row):
""" Returns the value or a dict with a 'value' entry plus extra fields. """
column = spec.get('column')
default = spec.get('default')
if column is None:
if default is not None:
return self.convert_type(default, spec)
return
value = row.get(column)
if is_empty(value):
if default is not None:
return self.convert_type(default, spec)
return None
return self.convert_type(value, spec)
def get_source(self, spec, row):
""" Sources can be specified as plain strings or as a reference to a column. """
value = self.get_value({'column': spec.get('source_url_column')}, row)
if value is not None:
return value
return spec.get('source_url')
def load_properties(self, obj, row):
source_url = self.get_source(self.model, row)
for column in self.columns:
col_source_url = self.get_source(column, row)
col_source_url = col_source_url or source_url
value = self.get_value(column, row)
extra_fields = {}
if isinstance(value, dict):
extra_fields = value
value = extra_fields.get('value')
del extra_fields['value']
if value is None and column.get('required', True):
raise RowException('%s is not valid: %s' % (
column.get('column'), row.get(column.get('column'))))
if value is None and column.get('skip_empty', False):
continue
obj.set(column.get('property'), value,
source_url=source_url, **extra_fields)
if column.get('unique', False):
obj.unique(column.get('property'),
only_active=column.get('unique_active', True))
class EntityMapper(ObjectMapper):
def load(self, loader, row):
source_url = self.get_source(self.model, row)
entity = loader.make_entity(self.model.get('schema'),
source_url=source_url)
self.load_properties(entity, row)
if entity.properties.get('name', None) is None:
return
entity.save()
return entity
def _patch_column(self, column):
if column.get('property') == 'name':
column['unique'] = True
column['unique_active'] = False
return column
class RelationMapper(ObjectMapper):
def load(self, loader, row, objs):
source_url = self.get_source(self.model, row)
source = objs.get(self.model.get('source'))
target = objs.get(self.model.get('target'))
if None in [source, target]:
return
relation = loader.make_relation(self.model.get('schema'),
source, target,
source_url=source_url)
self.load_properties(relation, row)
relation.save()
return relation
class MappingLoader(object):
def __init__(self, grano, model):
self.grano = grano
self.loader = Loader(grano, source_url=model.get('source_url'))
self.model = model
@property
def entities(self):
for name, model in self.model.get('entities', {}).items():
yield EntityMapper(name, model)
@property
def relations(self):
for name, model in self.model.get('relations', {}).items():
yield RelationMapper(name, model)
def load(self, data):
""" Load a single row of data and convert it into entities and
relations. """
objs = {}
for mapper in self.entities:
objs[mapper.name] = mapper.load(self.loader, data)
for mapper in self.relations:
objs[mapper.name] = mapper.load(self.loader, data, objs)
| |
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translate the Ubuntu image on a GCE VM.
Parameters (retrieved from instance metadata):
ubuntu_release: The nickname of the distro (eg: trusty).
install_gce_packages: True if GCE agent and SDK should be installed
"""
from difflib import Differ
import logging
import re
import guestfs
import utils
from utils.apt import Apt
import utils.diskutils as diskutils
from utils.guestfsprocess import run
# Google Cloud SDK
#
# The official images provide the Google Cloud SDK.
#
# Starting at 18, it's installed using snap. Since guestfs
# issues commands via a chroot, we don't have access to the
# snapd daemon. Therefore we schedule the SDK to be installed
# using cloud-init on the first boot.
#
# Prior to 18, the official images installed the cloud SDK
# using a partner apt repo.
cloud_init_cloud_sdk = '''
snap:
commands:
00: snap install google-cloud-sdk --classic
'''
# Ubuntu standard path, from https://wiki.ubuntu.com/PATH.
std_path = (
'/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin')
# systemd directive to include /snap/bin on the path for a systemd service.
# This path was included in the Python guest agent's unit files, but
# was removed in the NGA's unit files.
snap_env_directive = '\n'.join([
'[Service]',
'Environment=PATH=' + std_path
])
apt_cloud_sdk = '''
# Enabled for Google Cloud SDK
deb http://archive.canonical.com/ubuntu {ubuntu_release} partner
'''
# Cloud init config
#
# This provides cloud-init with the configurations from the official
# GCP images.
cloud_init_config = '''
## The following config files are the official GCP Ubuntu image,
## ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20211118
## Source:
## /etc/cloud/cloud.cfg.d/91-gce-system.cfg
#############################################
# CLOUD_IMG: This file was created/modified by the Cloud Image build process
system_info:
package_mirrors:
- arches: [i386, amd64]
failsafe:
primary: http://archive.ubuntu.com/ubuntu
security: http://security.ubuntu.com/ubuntu
search:
primary:
- http://%(region)s.gce.archive.ubuntu.com/ubuntu/
- http://%(availability_zone)s.gce.clouds.archive.ubuntu.com/ubuntu/
- http://gce.clouds.archive.ubuntu.com/ubuntu/
security: []
- arches: [armhf, armel, default]
failsafe:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ntp:
enabled: true
ntp_client: chrony
servers:
- metadata.google.internal
## Source:
## /etc/cloud/cloud.cfg.d/91-gce.cfg
######################################
# Use the GCE data source for cloud-init
datasource_list: [ GCE ]
## Source:
## /etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg
#############################################################
# Disable network activation to prevent `cloud-init` from making network
# changes that conflict with `google-guest-agent`.
# See: https://github.com/canonical/cloud-init/pull/1048
disable_network_activation: true
'''
# Network configs
#
# cloud-init will overwrite these after performing its
# network detection. They're required, however, so that
# cloud init can reach the metadata server to determine
# that it's running on GCE.
#
# https://cloudinit.readthedocs.io/en/latest/topics/boot.html
#
# netplan is default starting at 18.
# https://ubuntu.com/blog/ubuntu-bionic-netplan
network_trusty = '''
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto eth0
iface eth0 inet dhcp
source /etc/network/interfaces.d/*.cfg
'''
network_xenial = '''
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto ens4
iface ens4 inet dhcp
source /etc/network/interfaces.d/*.cfg
'''
network_netplan = '''
network:
version: 2
renderer: networkd
ethernets:
ens4:
dhcp4: true
'''
def install_cloud_sdk(g: guestfs.GuestFS, ubuntu_release: str) -> None:
""" Installs Google Cloud SDK, supporting apt and snap.
Args:
g: A mounted GuestFS instance.
ubuntu_release: The release nickname (eg: trusty).
"""
try:
run(g, 'gcloud --version')
logging.info('Found gcloud. Skipping installation of Google Cloud SDK.')
return
except RuntimeError:
logging.info('Did not find previous install of gcloud.')
if g.gcp_image_major < '18':
g.write('/etc/apt/sources.list.d/partner.list',
apt_cloud_sdk.format(ubuntu_release=ubuntu_release))
utils.update_apt(g)
utils.install_apt_packages(g, 'google-cloud-sdk')
logging.info('Installed Google Cloud SDK with apt.')
return
# Starting at 18.04, Canonical installs the sdk using snap.
# Running `snap install` directly is not an option here since it
# requires the snapd daemon to be running on the guest.
g.write('/etc/cloud/cloud.cfg.d/91-google-cloud-sdk.cfg',
cloud_init_cloud_sdk)
logging.info(
'Google Cloud SDK will be installed using snap with cloud-init.')
# Include /snap/bin in the PATH for startup and shutdown scripts.
# This was present in the old guest agent, but lost in the new guest
# agent.
for p in ['/lib/systemd/system/google-shutdown-scripts.service',
'/lib/systemd/system/google-startup-scripts.service']:
logging.debug('[%s] Checking whether /bin/snap is on PATH.', p)
if not g.exists(p):
logging.debug('[%s] Skipping: Unit not found.', p)
continue
original_unit = g.cat(p)
# Check whether the PATH is already set; if so, skip patching to avoid
# overwriting existing directive.
match = re.search('Environment=[\'"]?PATH.*', original_unit,
flags=re.IGNORECASE)
if match:
logging.debug('[%s] Skipping: PATH already defined in unit file: %s.', p,
match.group())
continue
# Add Environment directive to unit file, and show diff in debug log.
patched_unit = original_unit.replace('[Service]', snap_env_directive)
g.write(p, patched_unit)
diff = '\n'.join(Differ().compare(original_unit.splitlines(),
patched_unit.splitlines()))
logging.debug('[%s] PATH not defined. Added:\n%s', p, diff)
def install_osconfig_agent(g: guestfs.GuestFS):
try:
utils.install_apt_packages(g, 'google-osconfig-agent')
except RuntimeError:
logging.info(
'Failed to install the OS Config agent. '
'For manual install instructions, see '
'https://cloud.google.com/compute/docs/manage-os#agent-install .')
def setup_cloud_init(g: guestfs.GuestFS):
""" Install cloud-init if not present, and configure to the cloud provider.
Args:
g: A mounted GuestFS instance.
"""
a = Apt(run)
curr_version = a.get_package_version(g, 'cloud-init')
available_versions = a.list_available_versions(g, 'cloud-init')
# Try to avoid installing 21.3-1, which conflicts which the guest agent.
# On first boot, systemd reaches a deadlock deadlock and doest start
# its unit. If a version other than 21.3-1 isn't explicitly found, *and*
# cloud-init isn't currently installed, then this allows apt to pick the
# version to install.
version_to_install = Apt.determine_version_to_install(
curr_version, available_versions, {'21.3-1'})
pkg_to_install = ''
if version_to_install:
pkg_to_install = 'cloud-init=' + version_to_install
elif curr_version == '':
pkg_to_install = 'cloud-init'
# If this block doesn't execute, it means that cloud-init was found
# on the system, but there wasn't an upgrade candidate. Therefore
# leave the version that's currently installed.
if pkg_to_install:
logging.info(pkg_to_install)
utils.install_apt_packages(g, pkg_to_install)
# Ubuntu 14.04's version of cloud-init doesn't have `clean`.
if g.gcp_image_major > '14':
run(g, 'cloud-init clean')
# Remove cloud-init configs that may conflict with GCE's.
#
# - subiquity disables automatic network configuration
# https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1871975
for cfg in [
'azure', 'curtin', 'waagent', 'walinuxagent', 'aws', 'amazon',
'subiquity'
]:
run(g, 'rm -f /etc/cloud/cloud.cfg.d/*%s*' % cfg)
g.write('/etc/cloud/cloud.cfg.d/91-gce-system.cfg', cloud_init_config)
def DistroSpecific(g):
ubuntu_release = utils.GetMetadataAttribute('ubuntu_release')
install_gce = utils.GetMetadataAttribute('install_gce_packages')
# If present, remove any hard coded DNS settings in resolvconf.
# This is a common workaround to include permanent changes:
# https://askubuntu.com/questions/157154
if g.exists('/etc/resolvconf/resolv.conf.d/base'):
logging.info('Resetting resolvconf base.')
run(g, 'echo "" > /etc/resolvconf/resolv.conf.d/base')
# Reset the network to DHCP.
if ubuntu_release == 'trusty':
g.write('/etc/network/interfaces', network_trusty)
elif ubuntu_release == 'xenial':
g.write('/etc/network/interfaces', network_xenial)
elif g.is_dir('/etc/netplan'):
run(g, 'rm -f /etc/netplan/*.yaml')
g.write('/etc/netplan/config.yaml', network_netplan)
run(g, 'netplan apply')
if install_gce == 'true':
utils.update_apt(g)
setup_cloud_init(g)
remove_azure_agents(g)
if g.gcp_image_major > '14':
install_osconfig_agent(g)
utils.install_apt_packages(g, 'gce-compute-image-packages')
install_cloud_sdk(g, ubuntu_release)
# Update grub config to log to console.
run(g, [
'sed', '-i',
r's#^\(GRUB_CMDLINE_LINUX=".*\)"$#\1 console=ttyS0,38400n8"#',
'/etc/default/grub'
])
run(g, ['update-grub2'])
def remove_azure_agents(g):
try:
run(g, ['apt-get', 'remove', '-y', '-f', 'walinuxagent'])
except Exception as e:
logging.debug(str(e))
try:
run(g, ['apt-get', 'remove', '-y', '-f', 'waagent'])
except Exception as e:
logging.debug(str(e))
def main():
g = diskutils.MountDisk('/dev/sdb')
DistroSpecific(g)
utils.CommonRoutines(g)
diskutils.UnmountDisk(g)
if __name__ == '__main__':
utils.RunTranslate(main, run_with_tracing=False)
| |
from . import collision, entity
class Map:
"""
Map which houses the current game information/metadata.
:ivar my_id: Current player id associated with the map
:ivar width: Map width
:ivar height: Map height
"""
def __init__(self, my_id, width, height):
"""
:param my_id: User's id (tag)
:param width: Map width
:param height: Map height
"""
self.my_id = my_id
self.width = width
self.height = height
self._players = {}
self._planets = {}
def get_me(self):
"""
:return: The user's player
:rtype: Player
"""
return self._players.get(self.my_id)
def get_player(self, player_id):
"""
:param int player_id: The id of the desired player
:return: The player associated with player_id
:rtype: Player
"""
return self._players.get(player_id)
def all_players(self):
"""
:return: List of all players
:rtype: list[Player]
"""
return list(self._players.values())
def get_planet(self, planet_id):
"""
:param int planet_id:
:return: The planet associated with planet_id
:rtype: entity.Planet
"""
return self._planets.get(planet_id)
def all_planets(self):
"""
:return: List of all planets
:rtype: list[entity.Planet]
"""
return list(self._planets.values())
def nearby_entities_by_distance(self, entity):
"""
:param entity: The source entity to find distances from
:return: Dict containing all entities with their designated distances
:rtype: dict
"""
result = {}
for foreign_entity in self._all_ships() + self.all_planets():
if entity == foreign_entity:
continue
result.setdefault(entity.calculate_angle_between(foreign_entity), []).append(foreign_entity)
return result
def _link(self):
"""
Updates all the entities with the correct ship and planet objects
:return:
"""
for celestial_object in self.all_planets() + self._all_ships():
celestial_object._link(self._players, self._planets)
def _parse(self, map_string):
"""
Parse the map description from the game.
:param map_string: The string which the Halite engine outputs
:return: nothing
"""
tokens = map_string.split()
self._players, tokens = Player._parse(tokens)
self._planets, tokens = entity.Planet._parse(tokens)
assert(len(tokens) == 0) # There should be no remaining tokens at this point
self._link()
def _all_ships(self):
"""
Helper function to extract all ships from all players
:return: List of ships
:rtype: List[Ship]
"""
all_ships = []
for player in self.all_players():
all_ships.extend(player.all_ships())
return all_ships
def _intersects_entity(self, target):
"""
Check if the specified entity (x, y, r) intersects any planets. Entity is assumed to not be a planet.
:param entity.Entity target: The entity to check intersections with.
:return: The colliding entity if so, else None.
:rtype: entity.Entity
"""
for celestial_object in self._all_ships() + self.all_planets():
if celestial_object is target:
continue
d = celestial_object.calculate_distance_between(target)
if d <= celestial_object.radius + target.radius + 0.1:
return celestial_object
return None
def obstacles_between(self, ship, target):
"""
Check whether there is a straight-line path to the given point, without planetary obstacles in between.
:param entity.Ship ship: Source entity
:param entity.Entity target: Target entity
:return: The list of obstacles between the ship and target
:rtype: list[entity.Entity]
"""
obstacles = []
for foreign_entity in self.all_planets() + self._all_ships():
if foreign_entity == ship or foreign_entity == target:
continue
if collision.intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):
obstacles.append(foreign_entity)
return obstacles
class Player:
"""
:ivar id: The player's unique id
"""
def __init__(self, player_id, ships={}):
"""
:param player_id: User's id
:param ships: Ships user controls (optional)
"""
self.id = player_id
self._ships = ships
def all_ships(self):
"""
:return: A list of all ships which belong to the user
:rtype: list[entity.Ship]
"""
return list(self._ships.values())
def get_ship(self, ship_id):
"""
:param int ship_id: The ship id of the desired ship.
:return: The ship designated by ship_id belonging to this user.
:rtype: entity.Ship
"""
return self._ships.get(ship_id)
@staticmethod
def _parse_single(tokens):
"""
Parse one user given an input string from the Halite engine.
:param list[str] tokens: The input string as a list of str from the Halite engine.
:return: The parsed player id, player object, and remaining tokens
:rtype: (int, Player, list[str])
"""
player_id, *remainder = tokens
player_id = int(player_id)
ships, remainder = entity.Ship._parse(player_id, remainder)
player = Player(player_id, ships)
return player_id, player, remainder
@staticmethod
def _parse(tokens):
"""
Parse an entire user input string from the Halite engine for all users.
:param list[str] tokens: The input string as a list of str from the Halite engine.
:return: The parsed players in the form of player dict, and remaining tokens
:rtype: (dict, list[str])
"""
num_players, *remainder = tokens
num_players = int(num_players)
players = {}
for _ in range(num_players):
player, players[player], remainder = Player._parse_single(remainder)
return players, remainder
def __str__(self):
return "Player {} with ships {}".format(self.id, self.all_ships())
def __repr__(self):
return self.__str__()
| |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
News Crawler
"""
# pylint: disable=line-too-long, bare-except
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
from datetime import date
import StringIO
import sqlite3
import urllib2
import tldextract
import feedparser
from bs4 import BeautifulSoup
from twisted.protocols.ftp import FileNotFoundError
import gflags
__author__ = 'Dongjoon Hyun (dongjoon@apache.org)'
__copyright__ = 'Copyright (c) 2015-2016'
__license__ = 'Apache License'
__version__ = '0.1'
FLAGS = gflags.FLAGS
gflags.DEFINE_string('crawl', None, 'Feed-list file name.', short_name='c')
gflags.DEFINE_boolean('parse', None, 'Parse.', short_name='p')
gflags.DEFINE_string('feed', None, 'Day and domain.', short_name='f')
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
class News(object):
"""
News
"""
def __init__(self):
pass
@staticmethod
def crawl(feed_list_file):
"""
Read feed list file and call crawl_feed for each feed
"""
with open(feed_list_file, 'r') as feeds:
for feed in feeds.readlines():
url = feed.split()[-1]
News.crawl_feed(url)
time.sleep(1)
@staticmethod
def crawl_feed(url):
"""
Crawl a single feed
"""
domain = tldextract.extract(url).domain
day = date.today().isoformat()
try:
os.mkdir(day)
except OSError:
print "Path exists: " + day
try:
print url
data = feedparser.parse(url)
except:
print 'Error', url
return
print day, domain
with sqlite3.connect("%(day)s/%(domain)s.db" % locals()) as conn:
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS news ('
'ID INTEGER PRIMARY KEY AUTOINCREMENT, '
'url TEXT UNIQUE, '
'content TEXT)')
try:
rows = [(u'%s' % e.link,
u'%s' % unicode(' '.join(BeautifulSoup(e.description, "html.parser")
.get_text().split())).encode('utf8'))
for e in data.entries]
cur.executemany("INSERT OR IGNORE INTO news(url,content) VALUES(?,?)", rows)
except:
pass
conn.commit()
def parse_feed(self, day_and_domain):
"""
Parse feed.
"""
try:
# ex) day_and_domain = '2015-01-23_etnews'
day = day_and_domain[:10]
domain = day_and_domain[11:]
db_path = "%s/%s.db" % (day, domain)
if not os.path.isfile(db_path):
raise FileNotFoundError
except:
print 'Invalid day and domain: ', db_path
return
print day, domain
rows = []
with sqlite3.connect("%(day)s/%(domain)s.db" % locals()) as conn:
cur = conn.cursor()
cur.execute('SELECT url FROM news')
rows = cur.fetchall()
for index, row in enumerate(rows, start=1):
try:
url = row[0]
print '%s : (%s/%s)' % (url, index, len(rows))
response = urllib2.urlopen(url)
html = response.read()
self.parse(StringIO.StringIO(html))
time.sleep(1)
except Exception as ex:
print ex
print "TOTAL: %s pages" % len(rows)
def parse(self, ins=sys.stdin):
"""
Parse news article with opengraph protocol
"""
html = ''.join(ins.readlines())
soup = BeautifulSoup(html, "html.parser")
url = self.get_url(soup)
domain = tldextract.extract(url).domain
title = self.get_title(soup)
article = self.get_article(soup)
if FLAGS.debug:
print domain, url
print title
print article
with sqlite3.connect("%(domain)s.db" % locals()) as conn:
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS news (' +
'ID INTEGER PRIMARY KEY AUTOINCREMENT, ' +
'url TEXT UNIQUE, ' +
'title TEXT, ' +
'article TEXT)')
cur.execute('INSERT OR IGNORE INTO news(url,title,article) VALUES(?,?,?)',
(url, title, article))
conn.commit()
@staticmethod
def get_url(soup):
"""Get open-graph permanent url."""
url = soup.find('meta', {'property': 'og:url'}).get('content')
return url
@staticmethod
def get_title(soup):
"""Get title of article"""
title = soup.find('meta', {'property': 'og:title'}).get('content')
if title is None:
title = soup.title.get_text()
return title
@staticmethod
def is_empty(articles):
"""Check if the set of article is empty."""
return len(articles) == 0 or len(''.join([' '.join(a.get_text().split())
for a in articles
if len(a.text.strip()) > 0])) == 0
def get_article(self, soup):
"""Get article."""
[s.extract() for s in soup(['script', 'iframe', 'style'])]
articles = soup.findAll('div', {'class': 'article'})
if self.is_empty(articles): # joins
articles = soup.findAll('div', {'class': 'article_content'})
if self.is_empty(articles): # hani
articles = soup.findAll('div', {'class': 'article-contents'})
if self.is_empty(articles): # hani
articles = soup.findAll('div', {'class': 'article-text'})
if self.is_empty(articles): # etnews
articles = soup.findAll('div', {'class': 'article_body'})
if self.is_empty(articles): # kbs
articles = soup.findAll('div', {'id': 'cont_newstext'})
if self.is_empty(articles): # mk
articles = soup.findAll('div', {'id': 'artText'})
if self.is_empty(articles): # chosun
articles = soup.findAll('div', {'id': 'news_body_id'})
return ''.join([' '.join(a.get_text().split())
for a in articles
if len(a.text.strip()) > 0])
def main(argv):
"""Main."""
try:
FLAGS(argv)
except gflags.FlagsError, ex:
print '%s' % ex
sys.exit(1)
news = News()
if FLAGS.crawl:
news.crawl(feed_list_file=FLAGS.crawl)
if FLAGS.parse:
news.parse()
if FLAGS.feed:
news.parse_feed(day_and_domain=FLAGS.feed)
if __name__ == '__main__':
main(sys.argv)
| |
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_almost_equal(roc_auc,
ignore_warnings(auc_score)(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
f = ignore_warnings(auc_score)
roc_auc = f(y_true, probas_pred)
roc_auc_scaled = f(y_true, 100 * probas_pred)
roc_auc_shifted = f(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
| |
from datetime import datetime
from django.conf import settings
from django.db.models import (
DateField, DateTimeField, DurationField, Field, IntegerField, TimeField,
Transform,
)
from django.db.models.lookups import (
YearExact, YearGt, YearGte, YearLt, YearLte,
)
from django.utils import timezone
class TimezoneMixin:
tzinfo = None
def get_tzname(self):
# Timezone conversions must happen to the input datetime *before*
# applying a function. 2015-12-31 23:00:00 -02:00 is stored in the
# database as 2016-01-01 01:00:00 +00:00. Any results should be
# based on the input datetime not the stored datetime.
tzname = None
if settings.USE_TZ:
if self.tzinfo is None:
tzname = timezone.get_current_timezone_name()
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return tzname
class Extract(TimezoneMixin, Transform):
lookup_name = None
output_field = IntegerField()
def __init__(self, expression, lookup_name=None, tzinfo=None, **extra):
if self.lookup_name is None:
self.lookup_name = lookup_name
if self.lookup_name is None:
raise ValueError('lookup_name must be provided')
self.tzinfo = tzinfo
super().__init__(expression, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
lhs_output_field = self.lhs.output_field
if isinstance(lhs_output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname)
elif isinstance(lhs_output_field, DateField):
sql = connection.ops.date_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, TimeField):
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, DurationField):
if not connection.features.has_native_duration_field:
raise ValueError('Extract requires native DurationField database support.')
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
else:
# resolve_expression has already validated the output_field so this
# assert should never be hit.
assert False, "Tried to Extract from an invalid type."
return sql, params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)):
raise ValueError(
'Extract input expression must be DateField, DateTimeField, '
'TimeField, or DurationField.'
)
# Passing dates to functions expecting datetimes is most likely a mistake.
if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'):
raise ValueError(
"Cannot extract time component '%s' from DateField '%s'. " % (copy.lookup_name, field.name)
)
return copy
class ExtractYear(Extract):
lookup_name = 'year'
class ExtractMonth(Extract):
lookup_name = 'month'
class ExtractDay(Extract):
lookup_name = 'day'
class ExtractWeek(Extract):
"""
Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the
week.
"""
lookup_name = 'week'
class ExtractWeekDay(Extract):
"""
Return Sunday=1 through Saturday=7.
To replicate this in Python: (mydatetime.isoweekday() % 7) + 1
"""
lookup_name = 'week_day'
class ExtractQuarter(Extract):
lookup_name = 'quarter'
class ExtractHour(Extract):
lookup_name = 'hour'
class ExtractMinute(Extract):
lookup_name = 'minute'
class ExtractSecond(Extract):
lookup_name = 'second'
DateField.register_lookup(ExtractYear)
DateField.register_lookup(ExtractMonth)
DateField.register_lookup(ExtractDay)
DateField.register_lookup(ExtractWeekDay)
DateField.register_lookup(ExtractWeek)
DateField.register_lookup(ExtractQuarter)
TimeField.register_lookup(ExtractHour)
TimeField.register_lookup(ExtractMinute)
TimeField.register_lookup(ExtractSecond)
DateTimeField.register_lookup(ExtractHour)
DateTimeField.register_lookup(ExtractMinute)
DateTimeField.register_lookup(ExtractSecond)
ExtractYear.register_lookup(YearExact)
ExtractYear.register_lookup(YearGt)
ExtractYear.register_lookup(YearGte)
ExtractYear.register_lookup(YearLt)
ExtractYear.register_lookup(YearLte)
class TruncBase(TimezoneMixin, Transform):
kind = None
tzinfo = None
def __init__(self, expression, output_field=None, tzinfo=None, **extra):
self.tzinfo = tzinfo
super().__init__(expression, output_field=output_field, **extra)
def as_sql(self, compiler, connection):
inner_sql, inner_params = compiler.compile(self.lhs)
# Escape any params because trunc_sql will format the string.
inner_sql = inner_sql.replace('%s', '%%s')
if isinstance(self.output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname)
elif isinstance(self.output_field, DateField):
sql = connection.ops.date_trunc_sql(self.kind, inner_sql)
elif isinstance(self.output_field, TimeField):
sql = connection.ops.time_trunc_sql(self.kind, inner_sql)
else:
raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.')
return sql, inner_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
# DateTimeField is a subclass of DateField so this works for both.
assert isinstance(field, (DateField, TimeField)), (
"%r isn't a DateField, TimeField, or DateTimeField." % field.name
)
# If self.output_field was None, then accessing the field will trigger
# the resolver to assign it to self.lhs.output_field.
if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)):
raise ValueError('output_field must be either DateField, TimeField, or DateTimeField')
# Passing dates or times to functions expecting datetimes is most
# likely a mistake.
class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None
output_field = class_output_field or copy.output_field
has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__
if type(field) == DateField and (
isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')):
raise ValueError("Cannot truncate DateField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
elif isinstance(field, TimeField) and (
isinstance(output_field, DateTimeField) or copy.kind in ('year', 'quarter', 'month', 'day', 'date')):
raise ValueError("Cannot truncate TimeField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
return copy
def convert_value(self, value, expression, connection):
if isinstance(self.output_field, DateTimeField):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid datetime value. "
"Are time zone definitions for your database installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
elif isinstance(value, datetime):
if isinstance(self.output_field, DateField):
value = value.date()
elif isinstance(self.output_field, TimeField):
value = value.time()
return value
class Trunc(TruncBase):
def __init__(self, expression, kind, output_field=None, tzinfo=None, **extra):
self.kind = kind
super().__init__(expression, output_field=output_field, tzinfo=tzinfo, **extra)
class TruncYear(TruncBase):
kind = 'year'
class TruncQuarter(TruncBase):
kind = 'quarter'
class TruncMonth(TruncBase):
kind = 'month'
class TruncDay(TruncBase):
kind = 'day'
class TruncDate(TruncBase):
kind = 'date'
lookup_name = 'date'
output_field = DateField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_date_sql(lhs, tzname)
return sql, lhs_params
class TruncTime(TruncBase):
kind = 'time'
lookup_name = 'time'
output_field = TimeField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_time_sql(lhs, tzname)
return sql, lhs_params
class TruncHour(TruncBase):
kind = 'hour'
class TruncMinute(TruncBase):
kind = 'minute'
class TruncSecond(TruncBase):
kind = 'second'
DateTimeField.register_lookup(TruncDate)
DateTimeField.register_lookup(TruncTime)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Base AWS Hook.
.. seealso::
For more information on how to use this hook, take a look at the guide:
:ref:`howto/connection:AWSHook`
"""
import configparser
import datetime
import logging
from functools import wraps
from typing import Any, Callable, Dict, Optional, Tuple, Union
import boto3
import botocore
import botocore.session
import tenacity
from botocore.config import Config
from botocore.credentials import ReadOnlyCredentials
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
from dateutil.tz import tzlocal
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models.connection import Connection
from airflow.utils.log.logging_mixin import LoggingMixin
class _SessionFactory(LoggingMixin):
def __init__(self, conn: Connection, region_name: Optional[str], config: Config) -> None:
super().__init__()
self.conn = conn
self.region_name = region_name
self.config = config
self.extra_config = self.conn.extra_dejson
def create_session(self) -> boto3.session.Session:
"""Create AWS session."""
session_kwargs = {}
if "session_kwargs" in self.extra_config:
self.log.info(
"Retrieving session_kwargs from Connection.extra_config['session_kwargs']: %s",
self.extra_config["session_kwargs"],
)
session_kwargs = self.extra_config["session_kwargs"]
session = self._create_basic_session(session_kwargs=session_kwargs)
role_arn = self._read_role_arn_from_extra_config()
# If role_arn was specified then STS + assume_role
if role_arn is None:
return session
return self._impersonate_to_role(role_arn=role_arn, session=session, session_kwargs=session_kwargs)
def _create_basic_session(self, session_kwargs: Dict[str, Any]) -> boto3.session.Session:
aws_access_key_id, aws_secret_access_key = self._read_credentials_from_connection()
aws_session_token = self.extra_config.get("aws_session_token")
region_name = self.region_name
if self.region_name is None and 'region_name' in self.extra_config:
self.log.info("Retrieving region_name from Connection.extra_config['region_name']")
region_name = self.extra_config["region_name"]
self.log.info(
"Creating session with aws_access_key_id=%s region_name=%s",
aws_access_key_id,
region_name,
)
return boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
aws_session_token=aws_session_token,
**session_kwargs,
)
def _impersonate_to_role(
self, role_arn: str, session: boto3.session.Session, session_kwargs: Dict[str, Any]
) -> boto3.session.Session:
assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
assume_role_method = self.extra_config.get('assume_role_method')
self.log.info("assume_role_method=%s", assume_role_method)
if not assume_role_method or assume_role_method == 'assume_role':
sts_client = session.client("sts", config=self.config)
sts_response = self._assume_role(
sts_client=sts_client, role_arn=role_arn, assume_role_kwargs=assume_role_kwargs
)
elif assume_role_method == 'assume_role_with_saml':
sts_client = session.client("sts", config=self.config)
sts_response = self._assume_role_with_saml(
sts_client=sts_client, role_arn=role_arn, assume_role_kwargs=assume_role_kwargs
)
elif assume_role_method == 'assume_role_with_web_identity':
botocore_session = self._assume_role_with_web_identity(
role_arn=role_arn,
assume_role_kwargs=assume_role_kwargs,
base_session=session._session, # pylint: disable=protected-access
)
return boto3.session.Session(
region_name=session.region_name,
botocore_session=botocore_session,
**session_kwargs,
)
else:
raise NotImplementedError(
f'assume_role_method={assume_role_method} in Connection {self.conn.conn_id} Extra.'
'Currently "assume_role" or "assume_role_with_saml" are supported.'
'(Exclude this setting will default to "assume_role").'
)
# Use credentials retrieved from STS
credentials = sts_response["Credentials"]
aws_access_key_id = credentials["AccessKeyId"]
aws_secret_access_key = credentials["SecretAccessKey"]
aws_session_token = credentials["SessionToken"]
self.log.info(
"Creating session with aws_access_key_id=%s region_name=%s",
aws_access_key_id,
session.region_name,
)
return boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=session.region_name,
aws_session_token=aws_session_token,
**session_kwargs,
)
def _read_role_arn_from_extra_config(self) -> Optional[str]:
aws_account_id = self.extra_config.get("aws_account_id")
aws_iam_role = self.extra_config.get("aws_iam_role")
role_arn = self.extra_config.get("role_arn")
if role_arn is None and aws_account_id is not None and aws_iam_role is not None:
self.log.info("Constructing role_arn from aws_account_id and aws_iam_role")
role_arn = f"arn:aws:iam::{aws_account_id}:role/{aws_iam_role}"
self.log.info("role_arn is %s", role_arn)
return role_arn
def _read_credentials_from_connection(self) -> Tuple[Optional[str], Optional[str]]:
aws_access_key_id = None
aws_secret_access_key = None
if self.conn.login:
aws_access_key_id = self.conn.login
aws_secret_access_key = self.conn.password
self.log.info("Credentials retrieved from login")
elif "aws_access_key_id" in self.extra_config and "aws_secret_access_key" in self.extra_config:
aws_access_key_id = self.extra_config["aws_access_key_id"]
aws_secret_access_key = self.extra_config["aws_secret_access_key"]
self.log.info("Credentials retrieved from extra_config")
elif "s3_config_file" in self.extra_config:
aws_access_key_id, aws_secret_access_key = _parse_s3_config(
self.extra_config["s3_config_file"],
self.extra_config.get("s3_config_format"),
self.extra_config.get("profile"),
)
self.log.info("Credentials retrieved from extra_config['s3_config_file']")
else:
self.log.info("No credentials retrieved from Connection")
return aws_access_key_id, aws_secret_access_key
def _assume_role(
self, sts_client: boto3.client, role_arn: str, assume_role_kwargs: Dict[str, Any]
) -> Dict:
if "external_id" in self.extra_config: # Backwards compatibility
assume_role_kwargs["ExternalId"] = self.extra_config.get("external_id")
role_session_name = f"Airflow_{self.conn.conn_id}"
self.log.info(
"Doing sts_client.assume_role to role_arn=%s (role_session_name=%s)",
role_arn,
role_session_name,
)
return sts_client.assume_role(
RoleArn=role_arn, RoleSessionName=role_session_name, **assume_role_kwargs
)
def _assume_role_with_saml(
self, sts_client: boto3.client, role_arn: str, assume_role_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
saml_config = self.extra_config['assume_role_with_saml']
principal_arn = saml_config['principal_arn']
idp_auth_method = saml_config['idp_auth_method']
if idp_auth_method == 'http_spegno_auth':
saml_assertion = self._fetch_saml_assertion_using_http_spegno_auth(saml_config)
else:
raise NotImplementedError(
f'idp_auth_method={idp_auth_method} in Connection {self.conn.conn_id} Extra.'
'Currently only "http_spegno_auth" is supported, and must be specified.'
)
self.log.info("Doing sts_client.assume_role_with_saml to role_arn=%s", role_arn)
return sts_client.assume_role_with_saml(
RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=saml_assertion, **assume_role_kwargs
)
def _fetch_saml_assertion_using_http_spegno_auth(self, saml_config: Dict[str, Any]) -> str:
import requests
# requests_gssapi will need paramiko > 2.6 since you'll need
# 'gssapi' not 'python-gssapi' from PyPi.
# https://github.com/paramiko/paramiko/pull/1311
import requests_gssapi
from lxml import etree
idp_url = saml_config["idp_url"]
self.log.info("idp_url= %s", idp_url)
idp_request_kwargs = saml_config["idp_request_kwargs"]
auth = requests_gssapi.HTTPSPNEGOAuth()
if 'mutual_authentication' in saml_config:
mutual_auth = saml_config['mutual_authentication']
if mutual_auth == 'REQUIRED':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.REQUIRED)
elif mutual_auth == 'OPTIONAL':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.OPTIONAL)
elif mutual_auth == 'DISABLED':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.DISABLED)
else:
raise NotImplementedError(
f'mutual_authentication={mutual_auth} in Connection {self.conn.conn_id} Extra.'
'Currently "REQUIRED", "OPTIONAL" and "DISABLED" are supported.'
'(Exclude this setting will default to HTTPSPNEGOAuth() ).'
)
# Query the IDP
idp_response = requests.get(idp_url, auth=auth, **idp_request_kwargs)
idp_response.raise_for_status()
# Assist with debugging. Note: contains sensitive info!
xpath = saml_config['saml_response_xpath']
log_idp_response = 'log_idp_response' in saml_config and saml_config['log_idp_response']
if log_idp_response:
self.log.warning(
'The IDP response contains sensitive information, but log_idp_response is ON (%s).',
log_idp_response,
)
self.log.info('idp_response.content= %s', idp_response.content)
self.log.info('xpath= %s', xpath)
# Extract SAML Assertion from the returned HTML / XML
xml = etree.fromstring(idp_response.content)
saml_assertion = xml.xpath(xpath)
if isinstance(saml_assertion, list):
if len(saml_assertion) == 1:
saml_assertion = saml_assertion[0]
if not saml_assertion:
raise ValueError('Invalid SAML Assertion')
return saml_assertion
def _assume_role_with_web_identity(self, role_arn, assume_role_kwargs, base_session):
base_session = base_session or botocore.session.get_session()
client_creator = base_session.create_client
federation = self.extra_config.get('assume_role_with_web_identity_federation')
if federation == 'google':
web_identity_token_loader = self._get_google_identity_token_loader()
else:
raise AirflowException(
f'Unsupported federation: {federation}. Currently "google" only are supported.'
)
fetcher = botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=client_creator,
web_identity_token_loader=web_identity_token_loader,
role_arn=role_arn,
extra_args=assume_role_kwargs or {},
)
aws_creds = botocore.credentials.DeferredRefreshableCredentials(
method='assume-role-with-web-identity',
refresh_using=fetcher.fetch_credentials,
time_fetcher=lambda: datetime.datetime.now(tz=tzlocal()),
)
botocore_session = botocore.session.Session()
botocore_session._credentials = aws_creds # pylint: disable=protected-access
return botocore_session
def _get_google_identity_token_loader(self):
from google.auth.transport import requests as requests_transport
from airflow.providers.google.common.utils.id_token_credentials import (
get_default_id_token_credentials,
)
audience = self.extra_config.get('assume_role_with_web_identity_federation_audience')
google_id_token_credentials = get_default_id_token_credentials(target_audience=audience)
def web_identity_token_loader():
if not google_id_token_credentials.valid:
request_adapter = requests_transport.Request()
google_id_token_credentials.refresh(request=request_adapter)
return google_id_token_credentials.token
return web_identity_token_loader
class AwsBaseHook(BaseHook):
"""
Interact with AWS.
This class is a thin wrapper around the boto3 python library.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:type verify: Union[bool, str, None]
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:type region_name: Optional[str]
:param client_type: boto3.client client_type. Eg 's3', 'emr' etc
:type client_type: Optional[str]
:param resource_type: boto3.resource resource_type. Eg 'dynamodb' etc
:type resource_type: Optional[str]
:param config: Configuration for botocore client.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html)
:type config: Optional[botocore.client.Config]
"""
conn_name_attr = 'aws_conn_id'
default_conn_name = 'aws_default'
conn_type = 'aws'
hook_name = 'Amazon Web Services'
def __init__(
self,
aws_conn_id: Optional[str] = default_conn_name,
verify: Union[bool, str, None] = None,
region_name: Optional[str] = None,
client_type: Optional[str] = None,
resource_type: Optional[str] = None,
config: Optional[Config] = None,
) -> None:
super().__init__()
self.aws_conn_id = aws_conn_id
self.verify = verify
self.client_type = client_type
self.resource_type = resource_type
self.region_name = region_name
self.config = config
if not (self.client_type or self.resource_type):
raise AirflowException('Either client_type or resource_type must be provided.')
def _get_credentials(self, region_name: Optional[str]) -> Tuple[boto3.session.Session, Optional[str]]:
if not self.aws_conn_id:
session = boto3.session.Session(region_name=region_name)
return session, None
self.log.info("Airflow Connection: aws_conn_id=%s", self.aws_conn_id)
try:
# Fetch the Airflow connection object
connection_object = self.get_connection(self.aws_conn_id)
extra_config = connection_object.extra_dejson
endpoint_url = extra_config.get("host")
# https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config
if "config_kwargs" in extra_config:
self.log.info(
"Retrieving config_kwargs from Connection.extra_config['config_kwargs']: %s",
extra_config["config_kwargs"],
)
self.config = Config(**extra_config["config_kwargs"])
session = _SessionFactory(
conn=connection_object, region_name=region_name, config=self.config
).create_session()
return session, endpoint_url
except AirflowException:
self.log.warning("Unable to use Airflow Connection for credentials.")
self.log.info("Fallback on boto3 credential strategy")
# http://boto3.readthedocs.io/en/latest/guide/configuration.html
self.log.info(
"Creating session using boto3 credential strategy region_name=%s",
region_name,
)
session = boto3.session.Session(region_name=region_name)
return session, None
def get_client_type(
self,
client_type: str,
region_name: Optional[str] = None,
config: Optional[Config] = None,
) -> boto3.client:
"""Get the underlying boto3 client using boto3 session"""
session, endpoint_url = self._get_credentials(region_name)
# No AWS Operators use the config argument to this method.
# Keep backward compatibility with other users who might use it
if config is None:
config = self.config
return session.client(client_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
def get_resource_type(
self,
resource_type: str,
region_name: Optional[str] = None,
config: Optional[Config] = None,
) -> boto3.resource:
"""Get the underlying boto3 resource using boto3 session"""
session, endpoint_url = self._get_credentials(region_name)
# No AWS Operators use the config argument to this method.
# Keep backward compatibility with other users who might use it
if config is None:
config = self.config
return session.resource(resource_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
@cached_property
def conn(self) -> Union[boto3.client, boto3.resource]:
"""
Get the underlying boto3 client/resource (cached)
:return: boto3.client or boto3.resource
:rtype: Union[boto3.client, boto3.resource]
"""
if self.client_type:
return self.get_client_type(self.client_type, region_name=self.region_name)
elif self.resource_type:
return self.get_resource_type(self.resource_type, region_name=self.region_name)
else:
# Rare possibility - subclasses have not specified a client_type or resource_type
raise NotImplementedError('Could not get boto3 connection!')
def get_conn(self) -> Union[boto3.client, boto3.resource]:
"""
Get the underlying boto3 client/resource (cached)
Implemented so that caching works as intended. It exists for compatibility
with subclasses that rely on a super().get_conn() method.
:return: boto3.client or boto3.resource
:rtype: Union[boto3.client, boto3.resource]
"""
# Compat shim
return self.conn
def get_session(self, region_name: Optional[str] = None) -> boto3.session.Session:
"""Get the underlying boto3.session."""
session, _ = self._get_credentials(region_name)
return session
def get_credentials(self, region_name: Optional[str] = None) -> ReadOnlyCredentials:
"""
Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token.
"""
session, _ = self._get_credentials(region_name)
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
return session.get_credentials().get_frozen_credentials()
def expand_role(self, role: str) -> str:
"""
If the IAM role is a role name, get the Amazon Resource Name (ARN) for the role.
If IAM role is already an IAM role ARN, no change is made.
:param role: IAM role name or ARN
:return: IAM role ARN
"""
if "/" in role:
return role
else:
return self.get_client_type("iam").get_role(RoleName=role)["Role"]["Arn"]
@staticmethod
def retry(should_retry: Callable[[Exception], bool]):
"""
A decorator that provides a mechanism to repeat requests in response to exceeding a temporary quote
limit.
"""
def retry_decorator(fun: Callable):
@wraps(fun)
def decorator_f(self, *args, **kwargs):
retry_args = getattr(self, 'retry_args', None)
if retry_args is None:
return fun(self)
multiplier = retry_args.get('multiplier', 1)
min_limit = retry_args.get('min', 1)
max_limit = retry_args.get('max', 1)
stop_after_delay = retry_args.get('stop_after_delay', 10)
tenacity_logger = tenacity.before_log(self.log, logging.DEBUG) if self.log else None
default_kwargs = {
'wait': tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit),
'retry': tenacity.retry_if_exception(should_retry),
'stop': tenacity.stop_after_delay(stop_after_delay),
'before': tenacity_logger,
'after': tenacity_logger,
}
return tenacity.retry(**default_kwargs)(fun)(self)
return decorator_f
return retry_decorator
def _parse_s3_config(
config_file_name: str, config_format: Optional[str] = "boto", profile: Optional[str] = None
) -> Tuple[Optional[str], Optional[str]]:
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
config = configparser.ConfigParser()
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException(f"Couldn't read {config_file_name}")
# Setting option names depending on file format
if config_format is None:
config_format = "boto"
conf_format = config_format.lower()
if conf_format == "boto": # pragma: no cover
if profile is not None and "profile " + profile in sections:
cred_section = "profile " + profile
else:
cred_section = "Credentials"
elif conf_format == "aws" and profile is not None:
cred_section = profile
else:
cred_section = "default"
# Option names
if conf_format in ("boto", "aws"): # pragma: no cover
key_id_option = "aws_access_key_id"
secret_key_option = "aws_secret_access_key"
# security_token_option = 'aws_security_token'
else:
key_id_option = "access_key"
secret_key_option = "secret_key"
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
except Exception:
logging.warning("Option Error in parsing s3 config file")
raise
return access_key, secret_key
| |
"""Usage:
constituentretrofit_word2vec.py -v <vectorsFile> [options]
constituentretrofit_word2vec.py (-h | --help)
Arguments:
-v <vectorsFile> to specify word2vec input file
Options:
-t <testPhrase> specify test phrases to leave out
-o <outputFile> set output word sense vectors file (<vectorsFile>.sense)
-n <numiters> set the number of retrofitting iterations [default: 8]
-e <epsilon> set the convergence threshold [default: 0.001]
--phrase <phraseSeparator> phrase separator [default: |]
-f <inputFormat> can be set to gensim, binary, or txt [default: gensim]
-h --help (this message is displayed)
Copyright (C) 2015 Sujay Kumar Jauhar <sjauhar@cs.cmu.edu>
Modified to include constituent by Han-Chin Shing <shing@cs.umd.edu>
Licenced under the Apache Licence, v2.0 - http://www.apache.org/licenses/LICENSE-2.0
"""
import sys
import numpy
# import gzip
import json
from scipy.sparse import lil_matrix
# from copy import deepcopy
from itertools import izip
import re
from docopt import docopt
phraseSeparator = '|'
def writeWordVectors(wordVectors, vectorDim, filename):
sys.stderr.write('Writing vectors to file...\n')
wordVectors.save(filename)
sys.stderr.write('Finished writing vectors.\n')
def selectTestVocab(vocab, testPhraseFile):
phrase = set([word for word in vocab if phraseSeparator in word and
sum([1 for token in word.split(phraseSeparator) if token in vocab]) ==
len(word.split(phraseSeparator))])
sys.stderr.write('possible phrases count is ' + str(len(phrase)) + '.\n')
if testPhraseFile:
sys.stderr.write('generating test phrases...\n')
with open(testPhraseFile, 'r') as fp:
testph = json.load(fp)
testPhrase = set([re.sub(' ', '|', ph) for ph in testph]) & phrase
sys.stderr.write('test phrases count is ' + str(len(testPhrase)) + '.\n')
return testPhrase
else:
return set()
def lowercase(s):
if len(s) == 0:
return s
else:
return s[0].lower() + s[1:]
# link constituent
def linkConstituent(vocab, testVocab, vocabLength, fixPhrase=False):
missWordCount = 0
linksCount = 0
sys.stderr.write('Building linkage between phrase and tokens...\n')
constituentMatrix = lil_matrix((vocabLength, vocabLength))
for word in vocab:
if phraseSeparator in word and word not in testVocab:
buildLink = True
phraseIndex = vocab[word]
tokenList = word.split(phraseSeparator)
tokenIndexList = []
for token in tokenList:
if token in vocab:
tokenIndexList.append(vocab[token])
# elif lowercase(token) in vocab:
# tokenIndexList.append(vocab[lowercase(token)])
else:
missWordCount += 1
buildLink = False
if buildLink:
linksCount += 1
weightOfConstituent = 1.0 / float(len(tokenIndexList) + 1.0)
weightOfIdentity = 1
if not fixPhrase:
constituentMatrix[phraseIndex, phraseIndex] = weightOfIdentity
for tokenIndex in tokenIndexList:
if not fixPhrase:
constituentMatrix[phraseIndex, tokenIndex] = weightOfConstituent / 2.0
constituentMatrix[tokenIndex, tokenIndex] = weightOfIdentity
for tokenIndex2 in tokenIndexList:
if not tokenIndex == tokenIndex2:
constituentMatrix[tokenIndex, tokenIndex2] = -1 * weightOfConstituent / 4.0
constituentMatrix[tokenIndex, phraseIndex] = weightOfConstituent / 2.0
sys.stderr.write('Finished building linkage.\n')
sys.stderr.write('missing ' + str(missWordCount) + ' words\n')
sys.stderr.write('built ' + str(linksCount) + ' links\n')
return constituentMatrix.tocoo()
# Return the maximum differential between old and new vectors to check for convergence.
def maxVectorDiff(newVecs, oldVecs):
maxDiff = 0.0
for k in newVecs:
diff = numpy.linalg.norm(newVecs[k] - oldVecs[k])
if diff > maxDiff:
maxDiff = diff
return maxDiff
def update(wordVectors, newSenseVectors):
for word in newSenseVectors:
numpy.put(wordVectors[word], range(len(newSenseVectors[word])), newSenseVectors[word])
# Run the retrofitting procedure.
def retrofit(wordVectors, vectorDim, vocab, constituentMatrix, numIters, epsilon, fixPhrase=False):
sys.stderr.write('Starting the retrofitting procedure...\n')
# map index to word/phrase
senseVocab = {vocab[k]: k for k in vocab}
# initialize sense vectors
newSenseVectors = {}
# old sense vectors to check for convergence
# oldSenseVectors = wordVectors
# run for a maximum number of iterations
for it in range(numIters):
newVector = None
normalizer = None
prevRow = None
isPhrase = None
sys.stderr.write('Running retrofitting iter ' + str(it + 1) + '... ')
# loop through all the non-zero weights in the adjacency matrix
for row, col, val in izip(constituentMatrix.row, constituentMatrix.col, constituentMatrix.data):
# a new sense has started
if row != prevRow:
if prevRow and (not fixPhrase or phraseSeparator not in senseVocab[prevRow]):
newSenseVectors[senseVocab[prevRow]] = newVector / normalizer
newVector = numpy.zeros(vectorDim, dtype=float)
normalizer = 0.0
prevRow = row
isPhrase = phraseSeparator in senseVocab[row]
# in the case that senseVocab[row] is not a phrase
if not isPhrase:
# add the identity vector
if row == col:
newVector += val * wordVectors[senseVocab[row]]
normalizer += val
# add the constituent vector
else:
if senseVocab[col] not in newSenseVectors:
newSenseVectors[senseVocab[col]] = wordVectors[senseVocab[col]]
newVector += val * newSenseVectors[senseVocab[col]]
if val >= 0:
normalizer += val / 2
# in the case that senseVocab[row] is a phrase
elif not fixPhrase:
# add the identity vector
if row == col:
newVector += val * wordVectors[senseVocab[row]]
normalizer += val
# add the constituent vector
else:
if senseVocab[col] not in newSenseVectors:
newSenseVectors[senseVocab[col]] = wordVectors[senseVocab[col]]
newVector += val * newSenseVectors[senseVocab[col]]
normalizer += val
# diffScore = maxVectorDiff(newSenseVectors, oldSenseVectors)
# sys.stderr.write('Max vector differential is '+str(diffScore)+'\n')
sys.stderr.write('Done!\n')
# if diffScore <= epsilon:
# break
# oldSenseVectors = deepcopy(newSenseVectors)
update(wordVectors, newSenseVectors)
sys.stderr.write('Finished running retrofitting.\n')
return wordVectors
def setup(commandParse):
outputFile = commandParse["-o"] if commandParse["-o"] else commandParse["-v"] + ".cons"
global phraseSeparator
phraseSeparator = commandParse["--phrase"]
if commandParse["-f"] == "txt":
import constituentretrofit_fixed as consfit
elif commandParse["-f"] == "binary":
import constituentretrofit_fixed_word2vec as consfit
elif commandParse["-f"] == "gensim":
import constituentretrofit_fixed_word2vec_native as consfit
else:
sys.stderr.write('unknown format specify, use gensim format instead\n')
import constituentretrofit_fixed_word2vec_native as consfit
return outputFile, consfit
if __name__ == "__main__":
# parse command line input
commandParse = docopt(__doc__)
print commandParse
outputFile, consfit = setup(commandParse)
print outputFile
print consfit
# try opening the specified files
vocab, vectors, vectorDim = consfit.readWordVectors(commandParse["-v"])
# vocab is {word: frequency rank}
# vectors is {word: vector}
sys.stderr.write('vocab length is ' + str(len(vocab.keys())) + '\n')
testVocab = selectTestVocab(vocab, commandParse["-t"])
constituentMatrix = linkConstituent(vocab, testVocab, len(vocab.keys()))
numIters = int(commandParse["-n"])
epsilon = float(commandParse["-e"])
# run retrofitting and write to output file
vectors = retrofit(vectors, vectorDim, vocab, constituentMatrix, numIters, epsilon)
writeWordVectors(vectors, vectorDim, outputFile)
sys.stderr.write('All done!\n')
| |
import numpy as np
from .cython.bbox import bbox_overlaps_cython
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def bbox_overlaps(boxes, query_boxes):
return bbox_overlaps_cython(boxes, query_boxes)
def bbox_overlaps_py(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def nonlinear_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
dw = np.minimum(dw, BBOX_XFORM_CLIP)
dh = np.minimum(dh, BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_boxes
def iou_transform(ex_rois, gt_rois):
""" return bbox targets, IoU loss uses gt_rois as gt """
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
return gt_rois
def iou_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
dx1 = box_deltas[:, 0::4]
dy1 = box_deltas[:, 1::4]
dx2 = box_deltas[:, 2::4]
dy2 = box_deltas[:, 3::4]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = dx1 + x1[:, np.newaxis]
# y1
pred_boxes[:, 1::4] = dy1 + y1[:, np.newaxis]
# x2
pred_boxes[:, 2::4] = dx2 + x2[:, np.newaxis]
# y2
pred_boxes[:, 3::4] = dy2 + y2[:, np.newaxis]
return pred_boxes
def flip_boxes(boxes, im_width):
"""Flip boxes horizontally."""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
def box_voting(top_dets, all_dets, thresh=0.5, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Maxwell Morais and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, encode
import os
import sys
import inspect
import traceback
import linecache
import pydoc
import cgitb
import types
import datetime
import json
def make_error_snapshot(exception):
if frappe.conf.disable_error_snapshot:
return
logger = frappe.logger(__name__, with_more_info=False)
try:
error_id = '{timestamp:s}-{ip:s}-{hash:s}'.format(
timestamp=cstr(datetime.datetime.now()),
ip=frappe.local.request_ip or '127.0.0.1',
hash=frappe.generate_hash(length=3)
)
snapshot_folder = get_error_snapshot_path()
frappe.create_folder(snapshot_folder)
snapshot_file_path = os.path.join(snapshot_folder, "{0}.json".format(error_id))
snapshot = get_snapshot(exception)
with open(encode(snapshot_file_path), 'wb') as error_file:
error_file.write(encode(frappe.as_json(snapshot)))
logger.error('New Exception collected with id: {}'.format(error_id))
except Exception as e:
logger.error('Could not take error snapshot: {0}'.format(e), exc_info=True)
def get_snapshot(exception, context=10):
"""
Return a dict describing a given traceback (based on cgitb.text)
"""
etype, evalue, etb = sys.exc_info()
if isinstance(etype, types.ClassType):
etype = etype.__name__
# creates a snapshot dict with some basic information
s = {
'pyver': 'Python {version:s}: {executable:s} (prefix: {prefix:s})'.format(
version = sys.version.split()[0],
executable = sys.executable,
prefix = sys.prefix
),
'timestamp': cstr(datetime.datetime.now()),
'traceback': traceback.format_exc(),
'frames': [],
'etype': cstr(etype),
'evalue': cstr(`evalue`),
'exception': {},
'locals': {}
}
# start to process frames
records = inspect.getinnerframes(etb, 5)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '={}'.format(pydoc.text.repr(value)))
# basic frame information
f = {'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum}
def reader(lnum=[lnum]):
try:
return linecache.getline(file, lnum[0])
finally:
lnum[0] += 1
vars = cgitb.scanvars(reader, frame, locals)
# if it is a view, replace with generated code
# if file.endswith('html'):
# lmin = lnum > context and (lnum - context) or 0
# lmax = lnum + context
# lines = code.split("\n")[lmin:lmax]
# index = min(context, lnum) - 1
if index is not None:
i = lnum - index
for line in lines:
f['lines'][i] = line.rstrip()
i += 1
# dump local variable (referenced in current line only)
f['dump'] = {}
for name, where, value in vars:
if name in f['dump']:
continue
if value is not cgitb.__UNDEF__:
if where == 'global':
name = 'global {name:s}'.format(name=name)
elif where != 'local':
name = where + ' ' + name.split('.')[-1]
f['dump'][name] = pydoc.text.repr(value)
else:
f['dump'][name] = 'undefined'
s['frames'].append(f)
# add exception type, value and attributes
if isinstance(evalue, BaseException):
for name in dir(evalue):
# prevent py26 DeprecationWarning
if (name != 'messages' or sys.version_info < (2.6)) and not name.startswith('__'):
value = pydoc.text.repr(getattr(evalue, name))
# render multilingual string properly
if type(value)==str and value.startswith(b"u'"):
value = eval(value)
s['exception'][name] = encode(value)
# add all local values (of last frame) to the snapshot
for name, value in locals.items():
if type(value)==str and value.startswith(b"u'"):
value = eval(value)
s['locals'][name] = pydoc.text.repr(value)
return s
def collect_error_snapshots():
"""Scheduled task to collect error snapshots from files and push into Error Snapshot table"""
if frappe.conf.disable_error_snapshot:
return
try:
path = get_error_snapshot_path()
if not os.path.exists(path):
return
for fname in os.listdir(path):
fullpath = os.path.join(path, fname)
try:
with open(fullpath, 'rb') as filedata:
data = json.load(filedata)
except ValueError:
# empty file
os.remove(fullpath)
continue
for field in ['locals', 'exception', 'frames']:
data[field] = frappe.as_json(data[field])
doc = frappe.new_doc('Error Snapshot')
doc.update(data)
doc.save()
frappe.db.commit()
os.remove(fullpath)
clear_old_snapshots()
except Exception as e:
make_error_snapshot(e)
# prevent creation of unlimited error snapshots
raise
def clear_old_snapshots():
"""Clear snapshots that are older than a month"""
frappe.db.sql("""delete from `tabError Snapshot`
where creation < date_sub(now(), interval 1 month)""")
path = get_error_snapshot_path()
today = datetime.datetime.now()
for file in os.listdir(path):
p = os.path.join(path, file)
ctime = datetime.datetime.fromtimestamp(os.path.getctime(p))
if (today - ctime).days > 31:
os.remove(os.path.join(path, p))
def get_error_snapshot_path():
return frappe.get_site_path('error-snapshots')
| |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cassandra.cqlengine import operators
from cassandra.cqlengine.named import NamedKeyspace
from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator
from cassandra.cqlengine.query import ResultObject
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage
class TestQuerySetOperation(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestQuerySetOperation, cls).setUpClass()
cls.keyspace = NamedKeyspace('cqlengine_test')
cls.table = cls.keyspace.table('test_model')
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = self.table.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = self.table.filter(self.table.column('test_id') == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(self.table.column('expected_result') >= 1)
assert len(query2._where) == 2
op = query2._where[1]
assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_filter_method_where_clause_generation(self):
"""
Tests the where clause creation
"""
query1 = self.table.objects(test_id=5)
self.assertEqual(len(query1._where), 1)
where = query1._where[0]
self.assertEqual(where.field, 'test_id')
self.assertEqual(where.value, 5)
query2 = query1.filter(expected_result__gte=1)
self.assertEqual(len(query2._where), 2)
where = query2._where[0]
self.assertEqual(where.field, 'test_id')
self.assertIsInstance(where.operator, EqualsOperator)
self.assertEqual(where.value, 5)
where = query2._where[1]
self.assertEqual(where.field, 'expected_result')
self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)
self.assertEqual(where.value, 1)
def test_query_expression_where_clause_generation(self):
"""
Tests the where clause creation
"""
query1 = self.table.objects(self.table.column('test_id') == 5)
self.assertEqual(len(query1._where), 1)
where = query1._where[0]
self.assertEqual(where.field, 'test_id')
self.assertEqual(where.value, 5)
query2 = query1.filter(self.table.column('expected_result') >= 1)
self.assertEqual(len(query2._where), 2)
where = query2._where[0]
self.assertEqual(where.field, 'test_id')
self.assertIsInstance(where.operator, EqualsOperator)
self.assertEqual(where.value, 5)
where = query2._where[1]
self.assertEqual(where.field, 'expected_result')
self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)
self.assertEqual(where.value, 1)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
@classmethod
def setUpClass(cls):
super(TestQuerySetCountSelectionAndIteration, cls).setUpClass()
from tests.integration.cqlengine.query.test_queryset import TestModel
ks,tn = TestModel.column_family_name().split('.')
cls.keyspace = NamedKeyspace(ks)
cls.table = cls.keyspace.table(tn)
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert self.table.objects.count() == 12
q = self.table.objects(test_id=0)
assert q.count() == 4
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert self.table.objects.count() == 12
q = self.table.objects(self.table.column('test_id') == 0)
assert q.count() == 4
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = self.table.objects(test_id=0)
#tuple of expected attempt_id, expected_result values
compare_set = set([(0,5), (1,10), (2,15), (3,20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = self.table.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0,20), (1,20), (2,75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = self.table.objects(self.table.column('attempt_id') == 3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0,20), (1,20), (2,75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):
#tuple of expected attempt_id, expected_result values
compare_set = set([(0,5), (1,10), (2,15), (3,20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
#try it again
compare_set = set([(0,5), (1,10), (2,15), (3,20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):
q = q.order_by('attempt_id')
expected_order = [0,1,2,3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = self.table.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = self.table.get(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)
m = q.get()
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(self.table.column('test_id') == 0)
m = q.get(self.table.column('attempt_id') == 0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(self.table.DoesNotExist):
self.table.objects.get(test_id=100)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(self.table.MultipleObjectsReturned):
self.table.objects.get(test_id=1)
| |
"""
ColorHelper.
Copyright (c) 2015 - 2017 Isaac Muse <isaacmuse@gmail.com>
License: MIT
"""
import sublime
import sublime_plugin
from .lib.coloraide import Color
import threading
from time import time, sleep
import re
import os
import mdpopups
from .lib import colorbox
from . import ch_util as util
import traceback
from .lib.multiconf import get as qualify_settings
from collections import namedtuple
PREVIEW_IMG = (
'<style>'
'html, body {{margin: 0; padding: 0;}} a {{line-height: 0;}}'
'</style>'
'<a href="{}"{}>{}</a>'
)
PREVIEW_BORDER_SIZE = 1
reload_flag = False
ch_last_updated = None
ch_settings = None
unloading = False
if 'ch_preview_thread' not in globals():
ch_preview_thread = None
def preview_is_on_left():
"""Return boolean for positioning preview on left/right."""
return ch_settings.get('inline_preview_position') != 'right'
class ColorSwatch(namedtuple('ColorSwatch', ['start', 'end', 'pid', 'uid'])):
"""Color swatch."""
class Extent(namedtuple('Extent', ['start', 'end'])):
"""Range of dimension."""
class Dimensions(namedtuple('Dimensions', ['x', 'y'])):
"""Dimensions."""
class ColorHelperPreviewOverrideCommand(sublime_plugin.TextCommand):
"""Override current scanning state."""
def run(self, edit):
"""Override the current scan "allow" state for this specific view."""
override = self.view.settings().get("color_helper.scan_override", "Remove override")
if override == "Force enable":
self.options = ["Force disable", "Remove override"]
elif override == "Force disable":
self.options = ["Force enable", "Remove override"]
else:
self.options = ["Force enable", "Force disable"]
self.view.window().show_quick_panel(self.options, self.done)
def done(self, value):
"""Check the selection."""
if value != -1:
option = self.options[value]
self.view.settings().set("color_helper.scan_override", option)
if option == "Force enable":
self.view.settings().set("color_helper.scan_override", option)
ch_preview_thread.modified = True
ch_preview_thread.time = time()
elif option == "Force disable":
self.view.settings().set("color_helper.scan_override", option)
self.view.window().run_command("color_helper_preview", {"clear": True})
else:
self.view.settings().erase("color_helper.scan_override")
self.view.window().run_command("color_helper_preview", {"clear": True})
class ColorHelperPreviewCommand(sublime_plugin.WindowCommand):
"""Color Helper preview with phantoms."""
def __init__(self, window):
"""Setup."""
super().__init__(window)
self.previous_region = {}
self.previews = {}
self.color_classes = {}
for view in window.views():
view.erase_phantoms("color_helper")
def on_navigate(self, href):
"""Handle color box click."""
self.view.sel().clear()
for k, v in self.previews[self.view.buffer_id()].items():
if href == v.uid:
phantom = self.view.query_phantom(v.pid)
if phantom:
self.view.sel().add(sublime.Region(int(v.end), int(v.start)))
sublime.set_timeout(
lambda cmd="color_helper", args={"mode": "info"}: self.view.run_command(cmd, args),
100
)
break
def calculate_box_size(self):
"""Calculate the preview box size."""
# Calculate size of preview boxes
settings = self.view.settings()
size_offset = int(qualify_settings(ch_settings, 'inline_preview_offset', 0))
top_pad = settings.get('line_padding_top', 0)
bottom_pad = settings.get('line_padding_bottom', 0)
# Sometimes we strangely get None
if top_pad is None:
top_pad = 0
if bottom_pad is None:
bottom_pad = 0
box_height = util.get_line_height(self.view) - int(top_pad + bottom_pad) + size_offset
return box_height
def is_selected(self, region, sels):
"""Check if region is in selections."""
for s in sels:
if region.intersects(s) or s.a == region.a:
return True
return False
def get_selections(self, bounds):
"""Get selections that intersect viewport."""
selections = []
for s in self.view.sel():
b = Dimensions(*self.view.text_to_layout(s.b))
if b.y <= bounds.y.start:
# We are pass the viewport
continue
a = Dimensions(*self.view.text_to_layout(s.a))
if a.y >= bounds.y.end:
# We haven't reached the view port yet
break
if (
(bounds.x.start <= a.x <= bounds.x.end or bounds.x.start <= b.x <= bounds.x.end) or
not (
(a.x >= bounds.x.end and b.x >= a.x and a.y == b.y) or
(b.x <= bounds.x.start and a.x <= b.x and a.y == b.y) or
(a.x >= bounds.x.end and b.x <= bounds.x.start and a.y + 1 == b.y)
)
):
selections.append(s)
return selections
def source_iter(self, visible_region, bounds):
"""
Iterate through source in the viewport.
We don't want to provide all the content in a really wide scrollable view,
so clip each line in the visible region to the visible viewport.
Return content in consecutive chunks.
"""
# Get all the lines
lines = self.view.split_by_newlines(visible_region)
# Calculate regions of consecutive lines that do not have gaps due to clipping
last_start = None
last_end = None
for line in lines:
# Line start
start_clipped = False
start_vector = Dimensions(*self.view.text_to_layout(line.begin()))
if start_vector.x < bounds.x.start:
start_pt = self.view.layout_to_text((bounds.x.start, start_vector.y))
if start_pt == line.begin():
if last_start is not None:
yield sublime.Region(last_start, last_end)
last_start = None
last_end = None
continue
start_clipped = True
else:
start_pt = line.begin()
# Line end
end_clipped = False
end_vector = Dimensions(*self.view.text_to_layout(line.end()))
if end_vector.x > bounds.x.end:
end_pt = self.view.layout_to_text((bounds.x.end, end_vector.y))
end_clipped = True
else:
end_pt = line.end()
# This region should not be included with the last
# as there is a gap between the last content and this content.
if start_clipped and last_start is not None:
yield sublime.Region(last_start, last_end)
last_start = None
last_end = None
# This content has been clipped and will have a gap between
# this and the next region, so just send it now.
if end_clipped:
yield sublime.Region(last_start if last_start is not None else start_pt, end_pt)
last_start = None
last_end = None
continue
# Track this region to see if we can include more in one chunk
# If we already have a start, just update the end.
if last_start is None:
last_start = start_pt
last_end = end_pt
# Return anything we haven't already
if last_start is not None:
yield sublime.Region(last_start, last_end)
def get_color_class(self, pt, classes):
"""Get color class based on selection scope."""
view_id = self.view.buffer_id()
if not self.color_classes[view_id] or self.view.settings().get('color_helper.refresh', True):
util.debug("Clear color class stash")
self.view.settings().set('color_helper.refresh', False)
self.color_classes[view_id] = util.get_settings_colors()
# Check if the first point within the color matches our scope rules
# and load up the appropriate color class
color_class = None
filters = []
for item in classes:
try:
value = self.view.score_selector(pt, item["scopes"])
if not value:
continue
else:
class_options = self.color_classes[view_id].get(item["class"])
if class_options is None:
continue
module = class_options.get("class", "ColorHelper.lib.coloraide.Color")
if isinstance(module, str):
# Initialize the color module and cache it for this view
color_class = util.import_color(module)
class_options["class"] = color_class
else:
color_class = module
filters = class_options.get("filters", [])
break
except Exception:
pass
return color_class, filters
def setup_gamut_options(self):
"""Setup gamut options."""
self.show_out_of_gamut_preview = ch_settings.get('show_out_of_gamut_preview', True)
self.gamut_space = ch_settings.get('gamut_space', 'srgb')
if self.gamut_space not in util.GAMUT_SPACES:
self.gamut_space = 'srgb'
self.out_of_gamut = Color("transparent").convert(self.gamut_space)
self.out_of_gamut_border = Color(self.view.style().get('redish', "red")).convert(self.gamut_space)
def do_search(self, force=False):
"""
Perform the search for the highlighted word.
TODO: This function is a big boy. We should look into breaking it up.
With that said, this is low priority.
"""
# Since the plugin has been reloaded, force update.
global reload_flag
settings = self.view.settings()
colors = []
view_id = self.view.buffer_id()
# Allow per view scan override
option = settings.get("color_helper.scan_override", None)
if option in ("Force enable", "Force disable"):
override = option == "Force enable"
else:
override = None
# Get the rules and use them to get the needed scopes.
# The scopes will be used to get the searchable regions.
rules = util.get_rules(self.view)
# Bail if this if this view has no valid rule or scanning is disabled.
if (
rules is None or not rules.get("enabled", False) or
(not rules.get("allow_scanning", True) and not override) or
override is False
):
self.erase_phantoms()
return
if reload_flag:
reload_flag = False
force = True
# Calculate size of preview boxes
box_height = self.calculate_box_size()
check_size = int((box_height - 2) / 4)
if check_size < 2:
check_size = 2
# If desired preview boxes are different than current,
# we need to reload the boxes.
old_box_height = int(settings.get('color_helper.box_height', 0))
current_color_scheme = settings.get('color_scheme')
if (
force or old_box_height != box_height or
current_color_scheme != settings.get('color_helper.color_scheme', '') or
settings.get('color_helper.refresh')
):
self.erase_phantoms()
settings.set('color_helper.color_scheme', current_color_scheme)
settings.set('color_helper.box_height', box_height)
force = True
# Get viewable bounds so we can constrain both vertically and horizontally.
visible_region = self.view.visible_region()
position = self.view.viewport_position()
dimensions = self.view.viewport_extent()
bounds = Dimensions(
Extent(position[0], position[0] + dimensions[0] - 1),
Extent(position[1], position[1] + dimensions[1] - 1)
)
# If we don't need to force previews,
# quit if visible region is the same as last time
if not force and self.previous_region[view_id] == bounds:
return
self.previous_region[view_id] = bounds
# Setup "preview on select"
preview_on_select = ch_settings.get("preview_on_select", False)
show_preview = True
sels = []
if preview_on_select:
show_preview = False
sels = self.get_selections(bounds)
if sels:
show_preview = True
# Get the scan scopes
scanning = rules.get("scanning")
classes = rules.get("color_class", "css-level-4")
if show_preview and visible_region.size() and scanning and classes:
# Get out of gamut related options
self.setup_gamut_options()
# Get triggers that identify where colors are likely
color_trigger = re.compile(rules.get("color_trigger", util.RE_COLOR_START))
# Find source content in the visible region.
# We will return consecutive content, but if the lines are too wide
# horizontally, they will be clipped and returned as separate chunks.
for src_region in self.source_iter(visible_region, bounds):
source = self.view.substr(src_region)
start = 0
# Find colors in this source chunk.
for m in color_trigger.finditer(source):
# Test if we have found a valid color
start = m.start()
src_start = src_region.begin() + start
# Check if the first point within the color matches our scope rules
# and load up the appropriate color class
color_class, filters = self.get_color_class(src_start, classes)
if color_class is None:
continue
# Check if scope matches for scanning
try:
value = self.view.score_selector(src_start, scanning)
if not value:
continue
except Exception:
continue
obj = color_class.match(source, start=start, filters=filters)
if obj is not None:
# Calculate true start and end of the color source
src_end = src_region.begin() + obj.end
region = sublime.Region(src_start, src_end)
# If "preview on select" is enabled, only show preview if within a selection
# or if the selection as no width and the color comes right after.
if preview_on_select and not self.is_selected(region, sels):
continue
else:
continue
# Calculate point at which we which to insert preview
position_on_left = preview_is_on_left()
pt = src_start if position_on_left else src_end
if str(region.begin()) in self.previews[view_id]:
# Already exists
continue
# Calculate a reasonable border color for our image at this location and get color strings
hsl = Color(
mdpopups.scope2style(self.view, self.view.scope_name(pt))['background'],
filters=util.CSS_SRGB_SPACES
).convert("hsl")
hsl.lightness = hsl.lightness + (0.3 if hsl.luminance() < 0.5 else -0.3)
preview_border = hsl.convert(self.gamut_space, fit=True).set('alpha', 1)
color = Color(obj.color)
title = ''
if self.gamut_space == 'srgb':
check_space = self.gamut_space if color.space() not in util.SRGB_SPACES else color.space()
else:
check_space = self.gamut_space
if not color.in_gamut(check_space):
title = ' title="Preview out of gamut"'
if self.show_out_of_gamut_preview:
pcolor = color.convert(self.gamut_space, fit=True)
preview1 = pcolor.clone().set('alpha', 1)
preview2 = pcolor
else:
preview1 = self.out_of_gamut
preview2 = self.out_of_gamut
preview_border = self.out_of_gamut_border
else:
pcolor = color.convert(self.gamut_space, fit=True)
preview1 = pcolor.clone().set('alpha', 1)
preview2 = pcolor
# Create preview
unique_id = str(time()) + str(region)
html = PREVIEW_IMG.format(
unique_id,
title,
colorbox.color_box(
[preview1, preview2], preview_border,
height=box_height, width=box_height,
border_size=PREVIEW_BORDER_SIZE, check_size=check_size,
gamut_space=self.gamut_space
)
)
colors.append(
(
html,
pt,
region.begin(),
region.end(),
unique_id
)
)
# Add all previews
self.add_phantoms(colors)
# The phantoms may have altered the viewable region,
# so set previous region to the current viewable region
visible_region = self.view.visible_region()
position = self.view.viewport_position()
dimensions = self.view.viewport_extent()
bounds = Dimensions(
Extent(position[0], position[0] + dimensions[0] - 1),
Extent(position[1], position[1] + dimensions[1] - 1)
)
self.previous_region[view_id] = bounds
def add_phantoms(self, colors):
"""Add phantoms."""
i = self.view.buffer_id()
for html, pt, start, end, unique_id in colors:
pid = self.view.add_phantom(
'color_helper',
sublime.Region(pt),
html,
0,
on_navigate=self.on_navigate
)
self.previews[i][str(start)] = ColorSwatch(start, end, pid, unique_id)
def reset_previous(self):
"""Reset previous region."""
self.previous_region[self.view.buffer_id()] = sublime.Region(0)
def erase_phantoms(self):
"""Erase phantoms."""
# Obliterate!
self.view.erase_phantoms('color_helper')
self.previews[self.view.buffer_id()].clear()
self.reset_previous()
def run(self, clear=False, force=False):
"""Run."""
self.view = self.window.active_view()
ids = set([view.buffer_id() for view in self.window.views()])
keys = set(self.previews.keys())
diff = keys - ids
for i in diff:
del self.previews[i]
del self.previous_region[i]
del self.color_classes[i]
i = self.view.buffer_id()
if i not in self.previews:
self.previews[i] = {}
if i not in self.previous_region:
self.previous_region[i] = sublime.Region(0, 0)
if i not in self.color_classes:
self.color_classes[i] = {}
if ch_preview_thread.ignore_all:
return
else:
ch_preview_thread.ignore_all = True
try:
if clear:
self.erase_phantoms()
else:
self.do_search(force)
except Exception:
self.erase_phantoms()
util.debug('ColorHelper: \n' + str(traceback.format_exc()))
ch_preview_thread.ignore_all = False
ch_preview_thread.time = time()
class ChPreviewThread(threading.Thread):
"""Load up defaults."""
def __init__(self):
"""Setup the thread."""
self.reset()
threading.Thread.__init__(self)
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.scroll_wait_time = 0.5
self.sleep = 0.25
self.time = time()
self.modified = False
self.ignore_all = False
self.abort = False
self.scroll = False
self.last_view = -1
self.scroll_view = None
def scroll_check(self):
"""Check if we should issue a scroll event."""
view = sublime.active_window().active_view()
if view is None:
return
vid = view.id()
this_scroll = vid
if self.last_view != this_scroll:
self.last_view = this_scroll
self.scroll = True
else:
scroll_view = view.viewport_position(), view.viewport_extent()
if scroll_view != self.scroll_view:
self.scroll = True
self.scroll_view = scroll_view
self.time = time()
def payload(self):
"""Code to run."""
view = sublime.active_window().active_view()
if view is None:
return
if not self.ignore_all:
clear = False
force = False
if self.modified:
force = True
self.modified = False
self.scroll = False
else:
self.scroll = False
# Ignore selection and edit events inside the routine
try:
args = {"clear": clear, "force": force}
window = view.window()
if window is not None:
window.run_command('color_helper_preview', args)
except Exception:
util.debug(str(traceback.format_exc()))
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
self.reset()
def run(self):
"""Thread loop."""
while not self.abort:
if not self.ignore_all:
delta = time() - self.time
if delta > self.wait_time and (self.modified is True or self.scroll is True):
sublime.set_timeout_async(self.payload, 0)
elif delta > self.scroll_wait_time:
sublime.set_timeout_async(self.scroll_check, 0)
sleep(self.sleep)
class ColorHelperListener(sublime_plugin.EventListener):
"""Color Helper listener."""
def on_modified(self, view):
"""Flag that we need to show a tooltip or that we need to add phantoms."""
if self.ignore_event(view):
return
if ch_preview_thread is not None:
ch_preview_thread.modified = True
ch_preview_thread.time = time()
def on_selection_modified(self, view):
"""Flag that we need to show a tooltip."""
if self.ignore_event(view):
return
if ch_preview_thread is not None and ch_settings.get("preview_on_select", False):
# We only render previews when things change or a scroll occurs.
# On selection, we just need to force the change.
ch_preview_thread.time = time()
ch_preview_thread.modified = True
def on_activated(self, view):
"""On activated."""
if self.ignore_event(view):
return
if self.should_update(view):
ch_preview_thread.modified = True
ch_preview_thread.time = time()
self.set_file_scan_rules(view)
def set_file_scan_rules(self, view):
"""Set the scan rules for the current view."""
if ch_preview_thread:
ch_preview_thread.ignore_all = True
view.settings().clear_on_change('color_helper.reload')
view.window().run_command("color_helper_preview", {"clear": True})
file_name = view.file_name()
ext = os.path.splitext(file_name)[1].lower() if file_name is not None else None
s = sublime.load_settings('color_helper.sublime-settings')
rules = util.get_settings_rules()
syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]
# Check if view meets criteria for on of our rule sets
matched = False
for rule in rules:
results = []
# Check if enabled.
if not rule.get("enabled", True):
continue
# Does the base scope match?
passed = True
base_scopes = rule.get("base_scopes", [])
if base_scopes:
passed = False
results.append(False)
for base in rule.get("base_scopes", []):
if view.score_selector(0, base):
passed = True
break
if not passed:
continue
# Does the syntax match?
syntax_files = rule.get("syntax_files", [])
syntax_filter = rule.get("syntax_filter", "allowlist")
syntax_okay = bool(
not syntax_files or (
(syntax_filter == "allowlist" and syntax in syntax_files) or
(syntax_filter == "blocklist" and syntax not in syntax_files)
)
)
if not syntax_okay:
continue
# Does the extension match?
extensions = [e.lower() for e in rule.get("extensions", [])]
passed = not extensions or (ext is not None and ext in extensions)
if not passed:
continue
# Gather options if rule matches
scanning = ','.join(rule.get("scanning", []))
classes = rule.get("color_class", "css-level-4")
if isinstance(classes, str):
classes = [{"class": classes, "scopes": ""}]
allow_scanning = bool(rule.get("allow_scanning", True) and scanning)
color_trigger = rule.get("color_trigger", util.RE_COLOR_START)
matched = True
break
# Couldn't find any explicit options, so associate a generic option set to allow basic functionality..
if not matched:
generic = s.get("generic", {})
scanning = ",".join(generic.get("scanning", []))
classes = generic.get("color_class", "css-level-4")
if not isinstance(classes, str):
classes = []
else:
classes = [{"class": classes, "scopes": ""}]
allow_scanning = bool(generic.get("allow_scanning", True) and scanning)
color_trigger = generic.get("color_trigger", util.RE_COLOR_START)
matched = True
# Add user configuration
if matched:
view.settings().set(
'color_helper.scan',
{
"enabled": True,
"allow_scanning": allow_scanning,
"scanning": scanning,
"current_ext": ext,
"current_syntax": syntax,
"last_updated": ch_last_updated,
"color_trigger": color_trigger,
"color_class": classes
}
)
else:
# Nothing enabled here.
view.settings().set(
'color_helper.scan',
{
"enabled": False,
"current_ext": ext,
"current_syntax": syntax,
"last_updated": ch_last_updated
}
)
# Watch for settings changes so we can update if necessary.
if ch_preview_thread is not None:
if not unloading:
view.settings().add_on_change(
'color_helper.reload', lambda view=view: self.on_view_settings_change(view)
)
ch_preview_thread.ignore_all = False
def should_update(self, view):
"""Check if an update should be performed."""
force_update = False
rules = view.settings().get('color_helper.scan', None)
if rules:
last_updated = rules.get('last_updated', None)
if last_updated is None or last_updated < ch_last_updated:
force_update = True
file_name = view.file_name()
ext = os.path.splitext(file_name)[1].lower() if file_name is not None else None
old_ext = rules.get('current_ext')
if ext != old_ext:
force_update = True
syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]
old_syntax = rules.get("current_syntax")
if old_syntax is None or old_syntax != syntax:
force_update = True
else:
force_update = True
return force_update
def on_view_settings_change(self, view):
"""Post text command event to catch syntax setting."""
if not unloading:
settings = view.settings()
rules = settings.get('color_helper.scan', None)
if rules:
syntax = os.path.splitext(settings.get('syntax').replace('Packages/', '', 1))[0]
old_syntax = rules.get("current_syntax")
if old_syntax is None or old_syntax != syntax:
self.on_activated(view)
if settings.get('color_scheme') != settings.get('color_helper.color_scheme', ''):
ch_preview_thread.modified = True
ch_preview_thread.time = time()
def ignore_event(self, view):
"""Check if event should be ignored."""
return (
view.settings().get('is_widget', False) or
ch_preview_thread is None or
ch_preview_thread.ignore_all or
unloading
)
###########################
# Plugin Initialization
###########################
def settings_reload():
"""Handle settings reload event."""
global ch_last_updated
global reload_flag
reload_flag = True
ch_last_updated = time()
setup_previews()
def setup_previews():
"""Setup previews."""
global ch_preview_thread
global unloading
unloading = True
if ch_preview_thread is not None:
ch_preview_thread.kill()
for w in sublime.windows():
for v in w.views():
v.settings().clear_on_change('color_helper.reload')
v.settings().erase('color_helper.scan')
v.settings().erase('color_helper.scan_override')
v.settings().set('color_helper.refresh', True)
v.erase_phantoms('color_helper')
unloading = False
if ch_settings.get('inline_previews', False):
ch_preview_thread = ChPreviewThread()
ch_preview_thread.start()
def plugin_loaded():
"""Setup plugin."""
global ch_settings
global ch_last_updated
# Setup settings
ch_settings = sublime.load_settings('color_helper.sublime-settings')
# Setup reload events
ch_settings.clear_on_change('reload')
ch_settings.add_on_change('reload', settings_reload)
settings_reload()
# Start event thread
setup_previews()
def plugin_unloaded():
"""Kill threads."""
global unloading
unloading = True
if ch_preview_thread is not None:
ch_preview_thread.kill()
# Clear view events
ch_settings.clear_on_change('reload')
for w in sublime.windows():
for v in w.views():
v.settings().clear_on_change('color_helper.reload')
v.settings().erase('color_helper.scan')
v.settings().erase('color_helper.scan_override')
v.erase_phantoms('color_helper')
unloading = False
| |
#!/usr/bin/env python
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
import traceback
from cliff import command
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from six import moves
from six.moves.urllib import parse as urlparse
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest import config
import tempest.lib.common.http
CONF = config.CONF
CONF_PARSER = None
LOG = logging.getLogger(__name__)
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
path = os.path.join(conf_dir, conf_file)
fd = open(path, 'rw')
return fd
def change_option(option, group, value):
if not CONF_PARSER.has_section(group):
CONF_PARSER.add_section(group)
CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
print('Config option %s in group %s should be changed to: %s'
% (option, group, value))
if update:
change_option(option, group, value)
def contains_version(prefix, versions):
return any([x for x in versions if x.startswith(prefix)])
def verify_glance_api_versions(os, update):
# Check glance api versions
_, versions = os.image_client.get_versions()
if CONF.image_feature_enabled.api_v1 != contains_version('v1.', versions):
print_and_or_update('api_v1', 'image-feature-enabled',
not CONF.image_feature_enabled.api_v1, update)
if CONF.image_feature_enabled.api_v2 != contains_version('v2.', versions):
print_and_or_update('api_v2', 'image-feature-enabled',
not CONF.image_feature_enabled.api_v2, update)
def _get_unversioned_endpoint(base_url):
endpoint_parts = urlparse.urlparse(base_url)
endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
return endpoint
def _get_api_versions(os, service):
client_dict = {
'nova': os.servers_client,
'keystone': os.identity_client,
'cinder': os.volumes_client,
}
client_dict[service].skip_path()
endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
http = tempest.lib.common.http.ClosingHttp(
CONF.identity.disable_ssl_certificate_validation,
CONF.identity.ca_certificates_file)
__, body = http.request(endpoint, 'GET')
client_dict[service].reset_path()
try:
body = json.loads(body)
except ValueError:
LOG.error(
'Failed to get a JSON response from unversioned endpoint %s '
'(versioned endpoint was %s). Response is:\n%s',
endpoint, client_dict[service].base_url, body[:100])
raise
if service == 'keystone':
versions = map(lambda x: x['id'], body['versions']['values'])
else:
versions = map(lambda x: x['id'], body['versions'])
return list(versions)
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
if (CONF.identity_feature_enabled.api_v2 !=
contains_version('v2.', versions)):
print_and_or_update('api_v2', 'identity-feature-enabled',
not CONF.identity_feature_enabled.api_v2, update)
if (CONF.identity_feature_enabled.api_v3 !=
contains_version('v3.', versions)):
print_and_or_update('api_v3', 'identity-feature-enabled',
not CONF.identity_feature_enabled.api_v3, update)
def verify_cinder_api_versions(os, update):
# Check cinder api versions
versions = _get_api_versions(os, 'cinder')
if (CONF.volume_feature_enabled.api_v1 !=
contains_version('v1.', versions)):
print_and_or_update('api_v1', 'volume-feature-enabled',
not CONF.volume_feature_enabled.api_v1, update)
if (CONF.volume_feature_enabled.api_v2 !=
contains_version('v2.', versions)):
print_and_or_update('api_v2', 'volume-feature-enabled',
not CONF.volume_feature_enabled.api_v2, update)
def verify_api_versions(os, service, update):
verify = {
'cinder': verify_cinder_api_versions,
'glance': verify_glance_api_versions,
'keystone': verify_keystone_api_versions,
}
if service not in verify:
return
verify[service](os, update)
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
'cinder': os.volumes_extension_client,
'neutron': os.network_extensions_client,
'swift': os.account_client,
}
# NOTE (e0ne): Use Cinder API v2 by default because v1 is deprecated
if CONF.volume_feature_enabled.api_v2:
extensions_client['cinder'] = os.volumes_v2_extension_client
else:
extensions_client['cinder'] = os.volumes_extension_client
if service not in extensions_client:
print('No tempest extensions client for %s' % service)
sys.exit(1)
return extensions_client[service]
def get_enabled_extensions(service):
extensions_options = {
'nova': CONF.compute_feature_enabled.api_extensions,
'cinder': CONF.volume_feature_enabled.api_extensions,
'neutron': CONF.network_feature_enabled.api_extensions,
'swift': CONF.object_storage_feature_enabled.discoverable_apis,
}
if service not in extensions_options:
print('No supported extensions list option for %s' % service)
sys.exit(1)
return extensions_options[service]
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
if service != 'swift':
resp = extensions_client.list_extensions()
else:
__, resp = extensions_client.list_extensions()
# For Nova, Cinder and Neutron we use the alias name rather than the
# 'name' field because the alias is considered to be the canonical
# name.
if isinstance(resp, dict):
if service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
extensions = map(lambda x: x['alias'], resp['extensions'])
else:
extensions = map(lambda x: x['alias'], resp)
extensions = list(extensions)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
if extensions_opt[0] == 'all':
results[service]['extensions'] = extensions
return results
# Verify that all configured extensions are actually enabled
for extension in extensions_opt:
results[service][extension] = extension in extensions
# Verify that there aren't additional extensions enabled that aren't
# specified in the config list
for extension in extensions:
if extension not in extensions_opt:
results[service][extension] = False
return results
def display_results(results, update, replace):
update_dict = {
'swift': 'object-storage-feature-enabled',
'nova': 'compute-feature-enabled',
'cinder': 'volume-feature-enabled',
'neutron': 'network-feature-enabled',
}
for service in results:
# If all extensions are specified as being enabled there is no way to
# verify this so we just assume this to be true
if results[service].get('extensions'):
if replace:
output_list = results[service].get('extensions')
else:
output_list = ['all']
else:
extension_list = get_enabled_extensions(service)
output_list = []
for extension in results[service]:
if not results[service][extension]:
if extension in extension_list:
print("%s extension: %s should not be included in the "
"list of enabled extensions" % (service,
extension))
else:
print("%s extension: %s should be included in the list"
" of enabled extensions" % (service, extension))
output_list.append(extension)
else:
output_list.append(extension)
if update:
# Sort List
output_list.sort()
# Convert list to a string
output_string = ', '.join(output_list)
if service == 'swift':
change_option('discoverable_apis', update_dict[service],
output_string)
else:
change_option('api_extensions', update_dict[service],
output_string)
def check_service_availability(os, update):
services = []
avail_services = []
codename_match = {
'volume': 'cinder',
'network': 'neutron',
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
'orchestration': 'heat',
'metering': 'ceilometer',
'telemetry': 'ceilometer',
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
_token, auth_data = os.auth_provider.get_auth()
if os.auth_version == 'v2':
catalog_key = 'serviceCatalog'
else:
catalog_key = 'catalog'
for entry in auth_data[catalog_key]:
services.append(entry['type'])
# Pull all catalog types from config file and compare against endpoint list
for cfgname in dir(CONF._config):
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
else:
if cfgname == 'identity':
# Keystone is a required service for tempest
continue
if catalog_type not in services:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', False)
else:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
' set as available in the config file.' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', True)
# If we are going to enable this we should allow
# extension checks.
avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
def _parser_add_args(parser):
parser.add_argument('-u', '--update', action='store_true',
help='Update the config file with results from api '
'queries. This assumes whatever is set in the '
'config file is incorrect. In the case of '
'endpoint checks where it could either be the '
'incorrect catalog type or the service available '
'option the service available option is assumed '
'to be incorrect and is thus changed')
parser.add_argument('-o', '--output',
help="Output file to write an updated config file to. "
"This has to be a separate file from the "
"original config file. If one isn't specified "
"with -u the new config file will be printed to "
"STDOUT")
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
def parse_args():
parser = argparse.ArgumentParser()
_parser_add_args(parser)
opts = parser.parse_args()
return opts
def main(opts=None):
print('Running config verification...')
if opts is None:
print("Use of: 'verify-tempest-config' is deprecated, "
"please use: 'tempest verify-config'")
opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
outfile = sys.stdout
if update:
conf_file = _get_config_file()
CONF_PARSER = moves.configparser.SafeConfigParser()
CONF_PARSER.optionxform = str
CONF_PARSER.readfp(conf_file)
icreds = credentials.get_credentials_provider('verify_tempest_config')
try:
os = clients.Manager(icreds.get_primary_creds())
services = check_service_availability(os, update)
results = {}
for service in ['nova', 'cinder', 'neutron', 'swift']:
if service not in services:
continue
results = verify_extensions(os, service, results)
# Verify API versions of all services in the keystone catalog and
# keystone itself.
services.append('keystone')
for service in services:
verify_api_versions(os, service, update)
display_results(results, update, replace)
if update:
conf_file.close()
if opts.output:
with open(opts.output, 'w+') as outfile:
CONF_PARSER.write(outfile)
finally:
icreds.clear_creds()
class TempestVerifyConfig(command.Command):
"""Verify your current tempest configuration"""
def get_parser(self, prog_name):
parser = super(TempestVerifyConfig, self).get_parser(prog_name)
_parser_add_args(parser)
return parser
def take_action(self, parsed_args):
try:
return main(parsed_args)
except Exception:
LOG.exception("Failure verifying configuration.")
traceback.print_exc()
raise
return 0
if __name__ == "__main__":
main()
| |
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.vpn import tables
class IPSecSiteConnectionsTab(tabs.TableTab):
table_classes = (tables.IPSecSiteConnectionsTable,)
name = _("IPSec Site Connections")
slug = "ipsecsiteconnections"
template_name = ("horizon/common/_detail_table.html")
def get_ipsecsiteconnectionstable_data(self):
try:
tenant_id = self.request.user.tenant_id
ipsecsiteconnections = api.vpn.ipsecsiteconnection_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
ipsecsiteconnections = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve IPSec Site Connections list.'))
for c in ipsecsiteconnections:
c.set_id_as_name_if_empty()
return ipsecsiteconnections
class VPNServicesTab(tabs.TableTab):
table_classes = (tables.VPNServicesTable,)
name = _("VPN Services")
slug = "vpnservices"
template_name = ("horizon/common/_detail_table.html")
def get_vpnservicestable_data(self):
try:
tenant_id = self.request.user.tenant_id
vpnservices = api.vpn.vpnservice_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
vpnservices = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve VPN Services list.'))
for s in vpnservices:
s.set_id_as_name_if_empty()
return vpnservices
class IKEPoliciesTab(tabs.TableTab):
table_classes = (tables.IKEPoliciesTable,)
name = _("IKE Policies")
slug = "ikepolicies"
template_name = ("horizon/common/_detail_table.html")
def get_ikepoliciestable_data(self):
try:
tenant_id = self.request.user.tenant_id
ikepolicies = api.vpn.ikepolicy_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
ikepolicies = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve IKE Policies list.'))
for p in ikepolicies:
p.set_id_as_name_if_empty()
return ikepolicies
class IPSecPoliciesTab(tabs.TableTab):
table_classes = (tables.IPSecPoliciesTable,)
name = _("IPSec Policies")
slug = "ipsecpolicies"
template_name = ("horizon/common/_detail_table.html")
def get_ipsecpoliciestable_data(self):
try:
tenant_id = self.request.user.tenant_id
ipsecpolicies = api.vpn.ipsecpolicy_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
ipsecpolicies = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve IPSec Policies list.'))
for p in ipsecpolicies:
p.set_id_as_name_if_empty()
return ipsecpolicies
class VPNTabs(tabs.TabGroup):
slug = "vpntabs"
tabs = (IKEPoliciesTab, IPSecPoliciesTab,
VPNServicesTab, IPSecSiteConnectionsTab,)
sticky = True
class IKEPolicyDetailsTab(tabs.Tab):
name = _("IKE Policy Details")
slug = "ikepolicydetails"
template_name = "project/vpn/_ikepolicy_details.html"
failure_url = reverse_lazy('horizon:project:vpn:index')
def get_context_data(self, request):
pid = self.tab_group.kwargs['ikepolicy_id']
try:
ikepolicy = api.vpn.ikepolicy_get(request, pid)
except Exception:
msg = _('Unable to retrieve IKE Policy details.')
exceptions.handle(request, msg, redirect=self.failure_url)
return {'ikepolicy': ikepolicy}
class IKEPolicyDetailsTabs(tabs.TabGroup):
slug = "ikepolicytabs"
tabs = (IKEPolicyDetailsTab,)
class IPSecPolicyDetailsTab(tabs.Tab):
name = _("IPSec Policy Details")
slug = "ipsecpolicydetails"
template_name = "project/vpn/_ipsecpolicy_details.html"
failure_url = reverse_lazy('horizon:project:vpn:index')
def get_context_data(self, request):
pid = self.tab_group.kwargs['ipsecpolicy_id']
try:
ipsecpolicy = api.vpn.ipsecpolicy_get(request, pid)
except Exception:
msg = _('Unable to retrieve IPSec Policy details.')
exceptions.handle(request, msg, redirect=self.failure_url)
return {'ipsecpolicy': ipsecpolicy}
class IPSecPolicyDetailsTabs(tabs.TabGroup):
slug = "ipsecpolicytabs"
tabs = (IPSecPolicyDetailsTab,)
class VPNServiceDetailsTab(tabs.Tab):
name = _("VPN Service Details")
slug = "vpnservicedetails"
template_name = "project/vpn/_vpnservice_details.html"
failure_url = reverse_lazy('horizon:project:vpn:index')
def get_context_data(self, request):
sid = self.tab_group.kwargs['vpnservice_id']
try:
vpnservice = api.vpn.vpnservice_get(request, sid)
except Exception:
msg = _('Unable to retrieve VPN Service details.')
exceptions.handle(request, msg, redirect=self.failure_url)
try:
connections = api.vpn.ipsecsiteconnection_list(
request, vpnservice_id=sid)
vpnservice.vpnconnections = connections
except Exception:
vpnservice.vpnconnections = []
return {'vpnservice': vpnservice}
class VPNServiceDetailsTabs(tabs.TabGroup):
slug = "vpnservicetabs"
tabs = (VPNServiceDetailsTab,)
class IPSecSiteConnectionDetailsTab(tabs.Tab):
name = _("IPSec Site Connection Details")
slug = "ipsecsiteconnectiondetails"
template_name = "project/vpn/_ipsecsiteconnection_details.html"
failure_url = reverse_lazy('horizon:project:vpn:index')
def get_context_data(self, request):
cid = self.tab_group.kwargs['ipsecsiteconnection_id']
try:
ipsecsiteconn = api.vpn.ipsecsiteconnection_get(request, cid)
except Exception:
msg = _('Unable to retrieve IPSec Site Connection details.')
exceptions.handle(request, msg, redirect=self.failure_url)
return {'ipsecsiteconnection': ipsecsiteconn}
class IPSecSiteConnectionDetailsTabs(tabs.TabGroup):
slug = "ipsecsiteconnectiontabs"
tabs = (IPSecSiteConnectionDetailsTab,)
| |
#!/usr/bin/env python
from spider import *
import re
sys.path.append("..")
from record import Record
class VideolecturesSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.school = 'videolectures'
self.type_map = {'Lecture ' : 'vl',\
'Tutorial' : 'vtt',\
'Keynote' : 'vkn',\
'Interview' : 'viv',\
'Other' : '__'}
self.subject_cid_map = {'Machine Learning' : '16',\
'Data Mining' : '36',\
'Computer Vision' : '71',\
'Network Analysis' : '28',\
'Data Visualisation' : '41',\
'Natural Language Processing' : '144',\
'Pattern Recognition' : '395',\
'Text Mining' : '37',\
'Web Mining' : '127',\
'Robotics' : '69',\
'Artificial Intelligence' : '136',\
'Big Data' : '602',\
'Semantic Web' : '27',\
'Web Search' : '163',\
'Optimization Methods' : '232'}
def findLastPage(self, soup):
max_page = 1
for a in soup.find_all('a'):
if a.text == ' Last ':
max_page = int(a['href'][a['href'].find('(') + 1 : a['href'].find(')')])
break
return max_page
def processEventData(self, subject):
r = requests.get('http://videolectures.net/site/ajax/drilldown/?t=evt&cid=13&w=5')
soup = BeautifulSoup(r.text)
max_page = self.findLastPage(soup)
file_name = self.get_file_name('eecs/' + self.school + '/' + subject, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
urls_list = []
for page in range(1, max_page + 1):
r = requests.get('http://videolectures.net/site/ajax/drilldown/?o=top&t=evt&p=' + str(page) + '&cid=13&w=5')
soup = BeautifulSoup(r.text)
for a in soup.find_all('a'):
if a.attrs.has_key('lang'):
urls_list.append('http://videolectures.net' + a['href'])
i = 0
title = ''
desc = ''
for span in soup.find_all('span'):
i += 1
if i == 1:
print title
title = span.text.strip()
if i == 2:
desc = 'description:' + span.text.strip() + ' '
if i == 3:
desc += span.text.strip()
self.count += 1
self.write_db(f, subject + '-' + str(self.count), title, urls_list[self.count - 1], desc)
i = 0
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processData(self, subject):
file_name = self.get_file_name('eecs/' + self.school + '/' + subject, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
print 'processing ' + subject
for s in self.type_map.keys():
r = requests.get('http://videolectures.net/site/ajax/drilldown/?t=' + self.type_map.get(s) + '&cid=' + self.subject_cid_map.get(subject) + '&w=5')
soup = BeautifulSoup(r.text)
max_page = self.findLastPage(soup)
for page in range(1, max_page + 1):
r = requests.get('http://videolectures.net/site/ajax/drilldown/?o=top&t=' + self.type_map.get(s) + '&p=' + str(page) + '&cid=' + self.subject_cid_map.get(subject) + '&w=5')
soup = BeautifulSoup(r.text)
for div in soup.find_all('div', class_='lec_thumb'):
instructors = ''
title = div.a.span.span.text.strip()
url = 'http://videolectures.net' + div.a['href']
soup1 = BeautifulSoup(div.prettify())
div = soup1.find('div', class_='author')
if div != None and div.span != None:
instructors = 'instructors:' + div.span.text.strip()
self.count += 1
vl_num = 'vl-' + str(self.subject_cid_map.get(subject)) + '-' + str(self.count)
print vl_num + ' ' + title
self.write_db(f, vl_num, title, url, instructors)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def upFirstChar(self, text):
result = ''
for i in range(0, len(text)):
if (i > 0 and text[i - 1] == ' ') or i == 0:
result += str(text[i]).upper()
else:
result += text[i]
return result.strip()
def getNameAndDescription(self, url):
name = ''
homepage = ''
desc = ''
r = requests.get(url)
soup = BeautifulSoup(r.text)
span_name = soup.find('span', class_='auth_name')
span_desc = soup.find("span", id="auth_desc_edit")
if span_name != None and span_name.a != None:
name = span_name.a.text.replace(' ',' ').strip()
homepage = span_name.a['href']
desc += 'homepage:' + homepage + ' '
if span_desc != None:
desc += 'description:' + span_desc.text.replace('\n', ' ').strip()
return name, desc
def processUserData(self):
print 'processing user data'
file_name = self.get_file_name('eecs/' + self.school + '/user', self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
user_dict= {}
for page in range(1, 24):
r = requests.get('http://videolectures.net/site/list/authors/?page=' + str(page))
soup = BeautifulSoup(r.text)
for tr in soup.find_all('tr'):
if tr.text.find('Author') == -1:
soup1 = BeautifulSoup(tr.prettify())
video_pos = tr.text.find('video')
views_pos = tr.text.find('views')
url = 'http://videolectures.net' + soup1.find('a')['href']
desc = ''
vl_id = ''
title = self.upFirstChar(soup1.find('a')['href'][1:].replace('/','').replace('_', ' '))
self.count += 1
if tr.text.find('videos') != -1:
vl_id = str(tr.text[video_pos + 6 : views_pos].strip()) + '-' + str(self.count)
else:
vl_id = str(tr.text[video_pos + 5 : views_pos].strip()) + '-' + str(self.count)
desc = 'organization:' + tr.text[views_pos + 5 :]
if views_pos == -1:
vl_id = '0' + '-' + str(self.count)
desc = 'organization:' + tr.text[video_pos + 5 :]
print vl_id + ' ' + title
user_dict[vl_id] = Record(self.get_storage_format(vl_id, title, url, desc))
self.count = 0
for item in sorted(user_dict.items(), key=lambda user_dict:int(user_dict[1].get_id()[0 : user_dict[1].get_id().find('-')].strip()), reverse=True):
self.count += 1
name = ''
desc = ''
if self.count <= 100 and item[1].get_url().strip().startswith('http'):
name, desc = self.getNameAndDescription(item[1].get_url().strip())
uid = 'vl-' + item[1].get_id()[0 : item[1].get_id().find('-')] + '-' + str(self.count)
if name == '':
name = item[1].get_title().strip()
#print uid + ' ' + name
self.write_db(f, uid, name, item[1].get_url().strip(), item[1].get_describe().strip() + ' ' + desc)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def doWork(self):
self.processEventData('event')
for subject in self.subject_cid_map.keys():
self.processData(subject)
self.processUserData()
start = VideolecturesSpider()
start.doWork()
| |
import copy
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase
from django.test.client import Client
from guardian.admin import GuardedModelAdmin
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_perms_for_model
class ContentTypeGuardedAdmin(GuardedModelAdmin):
pass
admin.site.register(ContentType, ContentTypeGuardedAdmin)
class AdminTests(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser('admin', 'admin@example.com',
'admin')
self.user = User.objects.create_user('joe', 'joe@example.com', 'joe')
self.group = Group.objects.create(name='group')
self.client = Client()
self.obj = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.obj_info = self.obj._meta.app_label, self.obj._meta.module_name
def tearDown(self):
self.client.logout()
def _login_superuser(self):
self.client.login(username='admin', password='admin')
def test_view_manage_wrong_obj(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
kwargs={'object_pk': -10, 'user_id': self.user.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object'], self.obj)
def test_view_manage_wrong_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
kwargs={'object_pk': self.obj.pk, 'user_id': -10})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_manage_user_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': self.user.username, 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_user' %
self.obj_info, kwargs={'object_pk': self.obj.pk,
'user_id': self.user.id})
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_negative_user_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
self.user = User.objects.create(username='negative_id_user', id=-2010)
data = {'user': self.user.username, 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_user' %
self.obj_info, args=[self.obj.pk, self.user.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_user_form_wrong_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': 'wrong-user', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_form_wrong_field(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': '<xss>', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_form_empty_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': '', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_wrong_perms(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
args=[self.obj.pk, self.user.id])
perms = ['change_user'] # This is not self.obj related permission
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('permissions' in response.context['form'].errors)
def test_view_manage_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
args=[self.obj.pk, self.user.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = set([c[0] for c in
response.context['form'].fields['permissions'].choices])
self.assertEqual(
set([ p.codename for p in get_perms_for_model(self.obj)]),
choices,
)
# Add some perms and check if changes were persisted
perms = ['change_%s' % self.obj_info[1], 'delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ['change_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
def test_view_manage_group_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_negative_group_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
self.group = Group.objects.create(name='neagive_id_group', id=-2010)
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_group_form_wrong_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': 'wrong-group', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_form_wrong_field(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': '<xss>', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_form_empty_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': '', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_wrong_perms(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
perms = ['change_user'] # This is not self.obj related permission
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('permissions' in response.context['form'].errors)
def test_view_manage_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = set([c[0] for c in
response.context['form'].fields['permissions'].choices])
self.assertEqual(
set([ p.codename for p in get_perms_for_model(self.obj)]),
choices,
)
# Add some perms and check if changes were persisted
perms = ['change_%s' % self.obj_info[1], 'delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ['delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
if 'django.contrib.admin' not in settings.INSTALLED_APPS:
# Skip admin tests if admin app is not registered
# we simpy clean up AdminTests class ...
AdminTests = type('AdminTests', (TestCase,), {})
class GuardedModelAdminTests(TestCase):
def _get_gma(self, attrs=None, name=None, model=None):
"""
Returns ``GuardedModelAdmin`` instance.
"""
attrs = attrs or {}
name = name or 'GMA'
model = model or User
GMA = type(name, (GuardedModelAdmin,), attrs)
gma = GMA(model, admin.site)
return gma
def test_obj_perms_manage_template_attr(self):
attrs = {'obj_perms_manage_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_template(), 'foobar.html')
def test_obj_perms_manage_user_template_attr(self):
attrs = {'obj_perms_manage_user_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_user_template(), 'foobar.html')
def test_obj_perms_manage_user_form_attr(self):
attrs = {'obj_perms_manage_user_form': forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_user_form(), forms.Form)
def test_obj_perms_manage_group_template_attr(self):
attrs = {'obj_perms_manage_group_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_group_template(),
'foobar.html')
def test_obj_perms_manage_group_form_attr(self):
attrs = {'obj_perms_manage_group_form': forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_group_form(), forms.Form)
def test_user_can_acces_owned_objects_only(self):
attrs = {
'user_can_access_owned_objects_only': True,
'user_owned_objects_field': 'user',
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_user('joe', 'joe@example.com', 'joe')
jane = User.objects.create_user('jane', 'jane@example.com', 'jane')
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(user=joe, content_type=ctype,
object_id=joe.id, action_flag=1, change_message='foo')
LogEntry.objects.create(user=jane, content_type=ctype,
object_id=jane.id, action_flag=1, change_message='bar')
request = HttpRequest()
request.user = joe
qs = gma.queryset(request)
self.assertEqual([e.pk for e in qs], [joe_entry.pk])
def test_user_can_acces_owned_objects_only_unless_superuser(self):
attrs = {
'user_can_access_owned_objects_only': True,
'user_owned_objects_field': 'user',
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_superuser('joe', 'joe@example.com', 'joe')
jane = User.objects.create_user('jane', 'jane@example.com', 'jane')
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(user=joe, content_type=ctype,
object_id=joe.id, action_flag=1, change_message='foo')
jane_entry = LogEntry.objects.create(user=jane, content_type=ctype,
object_id=jane.id, action_flag=1, change_message='bar')
request = HttpRequest()
request.user = joe
qs = gma.queryset(request)
self.assertItemsEqual([e.pk for e in qs], [joe_entry.pk, jane_entry.pk])
class GrappelliGuardedModelAdminTests(TestCase):
org_settings = copy.copy(settings)
def _get_gma(self, attrs=None, name=None, model=None):
"""
Returns ``GuardedModelAdmin`` instance.
"""
attrs = attrs or {}
name = name or 'GMA'
model = model or User
GMA = type(name, (GuardedModelAdmin,), attrs)
gma = GMA(model, admin.site)
return gma
def setUp(self):
settings.INSTALLED_APPS = ['grappelli'] + list(settings.INSTALLED_APPS)
def tearDown(self):
globals()['settings'] = copy.copy(self.org_settings)
def test_get_obj_perms_manage_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage.html')
def test_get_obj_perms_manage_user_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_user_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage_user.html')
def test_get_obj_perms_manage_group_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_group_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage_group.html')
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the bindings for command line integration and dynamic loading of tasks
If you don't want to run luigi from the command line. You may use the methods
defined in this module to programatically run luigi.
"""
import logging
import logging.config
import os
import sys
import tempfile
import signal
import warnings
from luigi import configuration
from luigi import lock
from luigi import parameter
from luigi import rpc
from luigi import scheduler
from luigi import task
from luigi import worker
from luigi import execution_summary
from luigi.cmdline_parser import CmdlineParser
def setup_interface_logging(conf_file='', level_name='DEBUG'):
# use a variable in the function object to determine if it has run before
if getattr(setup_interface_logging, "has_run", False):
return
if conf_file == '':
# no log config given, setup default logging
level = getattr(logging, level_name, logging.DEBUG)
logger = logging.getLogger('luigi-interface')
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
formatter = logging.Formatter('%(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
else:
logging.config.fileConfig(conf_file, disable_existing_loggers=False)
setup_interface_logging.has_run = True
class core(task.Config):
''' Keeps track of a bunch of environment params.
Uses the internal luigi parameter mechanism.
The nice thing is that we can instantiate this class
and get an object with all the environment variables set.
This is arguably a bit of a hack.
'''
use_cmdline_section = False
local_scheduler = parameter.BoolParameter(
default=False,
description='Use an in-memory central scheduler. Useful for testing.',
always_in_help=True)
scheduler_host = parameter.Parameter(
default='localhost',
description='Hostname of machine running remote scheduler',
config_path=dict(section='core', name='default-scheduler-host'))
scheduler_port = parameter.IntParameter(
default=8082,
description='Port of remote scheduler api process',
config_path=dict(section='core', name='default-scheduler-port'))
scheduler_url = parameter.Parameter(
default='',
description='Full path to remote scheduler',
config_path=dict(section='core', name='default-scheduler-url'),
)
lock_size = parameter.IntParameter(
default=1,
description="Maximum number of workers running the same command")
no_lock = parameter.BoolParameter(
default=False,
description='Ignore if similar process is already running')
lock_pid_dir = parameter.Parameter(
default=os.path.join(tempfile.gettempdir(), 'luigi'),
description='Directory to store the pid file')
take_lock = parameter.BoolParameter(
default=False,
description='Signal other processes to stop getting work if already running')
workers = parameter.IntParameter(
default=1,
description='Maximum number of parallel tasks to run')
logging_conf_file = parameter.Parameter(
default='',
description='Configuration file for logging')
log_level = parameter.ChoiceParameter(
default='DEBUG',
choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
description="Default log level to use when logging_conf_file is not set")
module = parameter.Parameter(
default='',
description='Used for dynamic loading of modules',
always_in_help=True)
parallel_scheduling = parameter.BoolParameter(
default=False,
description='Use multiprocessing to do scheduling in parallel.')
parallel_scheduling_processes = parameter.IntParameter(
default=0,
description='The number of processes to use for scheduling in parallel.'
' By default the number of available CPUs will be used')
assistant = parameter.BoolParameter(
default=False,
description='Run any task from the scheduler.')
help = parameter.BoolParameter(
default=False,
description='Show most common flags and all task-specific flags',
always_in_help=True)
help_all = parameter.BoolParameter(
default=False,
description='Show all command line flags',
always_in_help=True)
class _WorkerSchedulerFactory(object):
def create_local_scheduler(self):
return scheduler.Scheduler(prune_on_get_work=True, record_task_history=False)
def create_remote_scheduler(self, url):
return rpc.RemoteScheduler(url)
def create_worker(self, scheduler, worker_processes, assistant=False):
return worker.Worker(
scheduler=scheduler, worker_processes=worker_processes, assistant=assistant)
def _schedule_and_run(tasks, worker_scheduler_factory=None, override_defaults=None):
"""
:param tasks:
:param worker_scheduler_factory:
:param override_defaults:
:return: True if all tasks and their dependencies were successfully run (or already completed);
False if any error occurred.
"""
if worker_scheduler_factory is None:
worker_scheduler_factory = _WorkerSchedulerFactory()
if override_defaults is None:
override_defaults = {}
env_params = core(**override_defaults)
# search for logging configuration path first on the command line, then
# in the application config file
logging_conf = env_params.logging_conf_file
if logging_conf != '' and not os.path.exists(logging_conf):
raise Exception(
"Error: Unable to locate specified logging configuration file!"
)
if not configuration.get_config().getboolean(
'core', 'no_configure_logging', False):
setup_interface_logging(logging_conf, env_params.log_level)
kill_signal = signal.SIGUSR1 if env_params.take_lock else None
if (not env_params.no_lock and
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))):
raise PidLockAlreadyTakenExit()
if env_params.local_scheduler:
sch = worker_scheduler_factory.create_local_scheduler()
else:
if env_params.scheduler_url != '':
url = env_params.scheduler_url
else:
url = 'http://{host}:{port:d}/'.format(
host=env_params.scheduler_host,
port=env_params.scheduler_port,
)
sch = worker_scheduler_factory.create_remote_scheduler(url=url)
worker = worker_scheduler_factory.create_worker(
scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant)
success = True
logger = logging.getLogger('luigi-interface')
with worker:
for t in tasks:
success &= worker.add(t, env_params.parallel_scheduling, env_params.parallel_scheduling_processes)
logger.info('Done scheduling tasks')
success &= worker.run()
logger.info(execution_summary.summary(worker))
return dict(success=success, worker=worker)
class PidLockAlreadyTakenExit(SystemExit):
"""
The exception thrown by :py:func:`luigi.run`, when the lock file is inaccessible
"""
pass
def run(*args, **kwargs):
return _run(*args, **kwargs)['success']
def _run(cmdline_args=None, main_task_cls=None,
worker_scheduler_factory=None, use_dynamic_argparse=None, local_scheduler=False):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param cmdline_args:
:param main_task_cls:
:param worker_scheduler_factory:
:param use_dynamic_argparse: Deprecated and ignored
:param local_scheduler:
"""
if use_dynamic_argparse is not None:
warnings.warn("use_dynamic_argparse is deprecated, don't set it.",
DeprecationWarning, stacklevel=2)
if cmdline_args is None:
cmdline_args = sys.argv[1:]
if main_task_cls:
cmdline_args.insert(0, main_task_cls.task_family)
if local_scheduler:
cmdline_args.append('--local-scheduler')
with CmdlineParser.global_instance(cmdline_args) as cp:
return _schedule_and_run([cp.get_task_obj()], worker_scheduler_factory)
def build(tasks, worker_scheduler_factory=None, **env_params):
"""
Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example:
.. code-block:: python
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process lock. Otherwise, `build` would only be
callable once from each process.
:param tasks:
:param worker_scheduler_factory:
:param env_params:
:return: True if there were no scheduling errors, even if tasks may fail.
"""
if "no_lock" not in env_params:
env_params["no_lock"] = True
return _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params)['success']
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates windows and posix stub files for a given set of signatures.
For libraries that need to be loaded outside of the standard executable startup
path mechanism, stub files need to be generated for the wanted functions. In
windows, this is done via "def" files and the delay load mechanism. On a posix
system, a set of stub functions need to be generated that dispatch to functions
found via dlsym.
This script takes a set of files, where each file is a list of C-style
signatures (one signature per line). The output is either a windows def file,
or a header + implementation file of stubs suitable for use in a posix system.
This script also handles varidiac functions, e.g.
void printf(const char* s, ...);
TODO(hclam): Fix the situation for varidiac functions.
Stub for the above function will be generated and inside the stub function it
is translated to:
void printf(const char* s, ...) {
printf_ptr(s, (void*)arg1);
}
Only one argument from the varidiac arguments is used and it will be used as
type void*.
"""
__author__ = 'ajwong@chromium.org (Albert J. Wong)'
import optparse
import os
import re
import string
import subprocess
import sys
class Error(Exception):
pass
class BadSignatureError(Error):
pass
class SubprocessError(Error):
def __init__(self, message, error_code):
Error.__init__(self)
self.message = message
self.error_code = error_code
def __str__(self):
return 'Failed with code %s: %s' % (self.message, repr(self.error_code))
# Regular expression used to parse function signatures in the input files.
# The regex is built around identifying the "identifier" for the function name.
# We consider the identifier to be the string that follows these constraints:
#
# 1) Starts with [_a-ZA-Z] (C++ spec 2.10).
# 2) Continues with [_a-ZA-Z0-9] (C++ spec 2.10).
# 3) Preceeds an opening parenthesis by 0 or more whitespace chars.
#
# From that, all preceeding characters are considered the return value.
# Trailing characters should have a substring matching the form (.*). That
# is considered the arguments.
SIGNATURE_REGEX = re.compile('(?P<return_type>.+?)'
'(?P<name>[_a-zA-Z][_a-zA-Z0-9]+)\s*'
'\((?P<params>.*?)\)')
# Used for generating C++ identifiers.
INVALID_C_IDENT_CHARS = re.compile('[^_a-zA-Z0-9]')
# Constants defning the supported file types options.
FILE_TYPE_WIN_X86 = 'windows_lib'
FILE_TYPE_WIN_X64 = 'windows_lib_x64'
FILE_TYPE_POSIX_STUB = 'posix_stubs'
FILE_TYPE_WIN_DEF = 'windows_def'
# Template for generating a stub function definition. Includes a forward
# declaration marking the symbol as weak. This template takes the following
# named parameters.
# return_type: The return type.
# name: The name of the function.
# params: The parameters to the function.
# return_prefix: 'return ' if this function is not void. '' otherwise.
# arg_list: The arguments used to call the stub function.
STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(name)s(%(params)s) {
%(return_prefix)s%(name)s_ptr(%(arg_list)s);
}""")
# Template for generating a variadic stub function definition with return
# value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# return_type: The return type.
# name: The name of the function.
# params: The parameters to the function.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(return_type)s ret___ = %(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
return ret___;
}""")
# Template for generating a variadic stub function definition without
# return value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# name: The name of the function.
# params: The parameters to the function.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VOID_VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern void %(name)s(%(params)s) __attribute__((weak));
void %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
}""")
# Template for the preamble for the stub header file with the header guards,
# standard set of includes, and namespace opener. This template takes the
# following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_PREAMBLE = """// This is generated file. Do not modify directly.
#ifndef %(guard_name)s
#define %(guard_name)s
#include <map>
#include <string>
#include <vector>
#include "base/logging.h"
namespace %(namespace)s {
"""
# Template for the end of the stub header. This closes the namespace and the
# header guards. This template takes the following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_CLOSER = """} // namespace %(namespace)s
#endif // %(guard_name)s
"""
# The standard includes needed for the stub implementation file. Takes one
# string substition with the path to the associated stub header file.
IMPLEMENTATION_PREAMBLE = """// This is generated file. Do not modify directly.
#include "%s"
#include <stdlib.h> // For NULL.
#include <dlfcn.h> // For dysym, dlopen.
#include <map>
#include <vector>
"""
# The start and end templates for the enum definitions used by the Umbrella
# initializer.
UMBRELLA_ENUM_START = """// Enum and typedef for umbrella initializer.
enum StubModules {
"""
UMBRELLA_ENUM_END = """ kNumStubModules
};
"""
# Start and end of the extern "C" section for the implementation contents.
IMPLEMENTATION_CONTENTS_C_START = """extern "C" {
"""
IMPLEMENTATION_CONTENTS_C_END = """
} // extern "C"
"""
# Templates for the start and end of a namespace. Takes one parameter, the
# namespace name.
NAMESPACE_START = """namespace %s {
"""
NAMESPACE_END = """} // namespace %s
"""
# Comment to include before the section declaring all the function pointers
# used by the stub functions.
FUNCTION_POINTER_SECTION_COMMENT = (
"""// Static pointers that will hold the location of the real function
// implementations after the module has been loaded.
""")
# Template for the module initialization check function. This template
# takes two parameteres: the function name, and the conditional used to
# verify the module's initialization.
MODULE_INITIALIZATION_CHECK_FUNCTION = (
"""// Returns true if all stubs have been properly initialized.
bool %s() {
if (%s) {
return true;
} else {
return false;
}
}
""")
# Template for the line that initialize the stub pointer. This template takes
# the following named parameters:
# name: The name of the function.
# return_type: The return type.
# params: The parameters to the function.
STUB_POINTER_INITIALIZER = """ %(name)s_ptr =
reinterpret_cast<%(return_type)s (*)(%(parameters)s)>(
dlsym(module, "%(name)s"));
VLOG_IF(1, !%(name)s_ptr) << "Couldn't load %(name)s, dlerror() says:\\n"
<< dlerror();
"""
# Template for module initializer function start and end. This template takes
# one parameter which is the initializer function name.
MODULE_INITIALIZE_START = """// Initializes the module stubs.
void %s(void* module) {
"""
MODULE_INITIALIZE_END = """}
"""
# Template for module uninitializer function start and end. This template
# takes one parameter which is the initializer function name.
MODULE_UNINITIALIZE_START = (
"""// Uninitialize the module stubs. Reset pointers to NULL.
void %s() {
""")
MODULE_UNINITIALIZE_END = """}
"""
# Open namespace and add typedef for internal data structures used by the
# umbrella initializer.
UMBRELLA_INITIALIZER_START = """namespace %s {
typedef std::map<StubModules, void*> StubHandleMap;
"""
# Function close DSOs on error and clean up dangling references.
UMBRELLA_INITIALIZER_CLEANUP_FUNCTION = (
"""static void CloseLibraries(StubHandleMap* stub_handles) {
for (StubHandleMap::const_iterator it = stub_handles->begin();
it != stub_handles->end();
++it) {
dlclose(it->second);
}
stub_handles->clear();
}
""")
# Function to initialize each DSO for the given paths.
UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START = (
"""bool InitializeStubs(const StubPathMap& path_map) {
StubHandleMap opened_libraries;
for (int i = 0; i < kNumStubModules; ++i) {
StubModules cur_module = static_cast<StubModules>(i);
// If a module is missing, we fail.
StubPathMap::const_iterator it = path_map.find(cur_module);
if (it == path_map.end()) {
CloseLibraries(&opened_libraries);
return false;
}
// Otherwise, attempt to dlopen the library.
const std::vector<std::string>& paths = it->second;
bool module_opened = false;
for (std::vector<std::string>::const_iterator dso_path = paths.begin();
!module_opened && dso_path != paths.end();
++dso_path) {
void* handle = dlopen(dso_path->c_str(), RTLD_LAZY);
if (handle != NULL) {
module_opened = true;
opened_libraries[cur_module] = handle;
} else {
VLOG(1) << "dlopen(" << dso_path->c_str() << ") failed, "
<< "dlerror() says:\\n" << dlerror();
}
}
if (!module_opened) {
CloseLibraries(&opened_libraries);
return false;
}
}
""")
# Template to generate code to check if each module initializer correctly
# completed, and cleanup on failures. This template takes the following
# named parameters.
# conditional: The conditional expression for successful initialization.
# uninitializers: The statements needed to uninitialize the modules.
UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP = (
""" // Check that each module is initialized correctly.
// Close all previously opened libraries on failure.
if (%(conditional)s) {
%(uninitializers)s;
CloseLibraries(&opened_libraries);
return false;
}
return true;
}
""")
# Template for Initialize, Unininitialize, and IsInitialized functions for each
# module. This template takes the following named parameters:
# initialize: Name of the Initialize function.
# uninitialize: Name of the Uninitialize function.
# is_initialized: Name of the IsInitialized function.
MODULE_FUNCTION_PROTOTYPES = """bool %(is_initialized)s();
void %(initialize)s(void* module);
void %(uninitialize)s();
"""
# Template for umbrella initializer declaration and associated datatypes.
UMBRELLA_INITIALIZER_PROTOTYPE = (
"""typedef std::map<StubModules, std::vector<std::string> > StubPathMap;
// Umbrella initializer for all the modules in this stub file.
bool InitializeStubs(const StubPathMap& path_map);
""")
def ExtractModuleName(infile_path):
"""Infers the module name from the input file path.
The input filename is supposed to be in the form "ModuleName.sigs".
This function splits the filename from the extention on that basename of
the path and returns that as the module name.
Args:
infile_path: String holding the path to the input file.
Returns:
The module name as a string.
"""
basename = os.path.basename(infile_path)
# This loop continously removes suffixes of the filename separated by a "."
# character.
while 1:
new_basename = os.path.splitext(basename)[0]
if basename == new_basename:
break
else:
basename = new_basename
return basename
def ParseSignatures(infile):
"""Parses function signatures in the input file.
This function parses a file of signatures into a list of dictionaries that
represent the function signatures in the input file. Each dictionary has
the following keys:
return_type: A string with the return type.
name: A string with the name of the function.
params: A list of each function parameter declaration (type + name)
The format of the input file is one C-style function signature per line, no
trailing semicolon. Empty lines are allowed. An empty line is a line that
consists purely of whitespace. Lines that begin with a # are considered
comment lines and are ignored.
We assume that "int foo(void)" is the same as "int foo()", which is not
true in C where "int foo()" is equivalent to "int foo(...)". Our generated
code is C++, and we do not handle varargs, so this is a case that can be
ignored for now.
Args:
infile: File object holding a text file of function signatures.
Returns:
A list of dictionaries, where each dictionary represents one function
signature.
Raises:
BadSignatureError: A line could not be parsed as a signature.
"""
signatures = []
for line in infile:
line = line.strip()
if line and line[0] != '#':
m = SIGNATURE_REGEX.match(line)
if m is None:
raise BadSignatureError('Unparsable line: %s' % line)
signatures.append(
{'return_type': m.group('return_type').strip(),
'name': m.group('name').strip(),
'params': [arg.strip() for arg in m.group('params').split(',')]})
return signatures
def WriteWindowsDefFile(module_name, signatures, outfile):
"""Writes a windows def file to the given output file object.
The def file format is basically a list of function names. Generation is
simple. After outputting the LIBRARY and EXPORTS lines, print out each
function name, one to a line, preceeded by 2 spaces.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
outfile: File handle to populate with definitions.
"""
outfile.write('LIBRARY %s\n' % module_name)
outfile.write('EXPORTS\n')
for sig in signatures:
outfile.write(' %s\n' % sig['name'])
def QuietRun(args, filter=None, write_to=sys.stdout):
"""Invoke |args| as command via subprocess.Popen, filtering lines starting
with |filter|."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
for line in out.splitlines():
if not filter or not line.startswith(filter):
write_to.write(line + '\n')
return popen.returncode
def CreateWindowsLib(module_name, signatures, intermediate_dir, outdir_path,
machine):
"""Creates a windows library file.
Calling this function will create a lib file in the outdir_path that exports
the signatures passed into the object. A temporary def file will be created
in the intermediate_dir.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
intermediate_dir: The directory where the generated .def files should go.
outdir_path: The directory where generated .lib files should go.
machine: String holding the machine type, 'X86' or 'X64'.
Raises:
SubprocessError: If invoking the windows "lib" tool fails, this is raised
with the error code.
"""
def_file_path = os.path.join(intermediate_dir,
module_name + '.def')
lib_file_path = os.path.join(outdir_path,
module_name + '.lib')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
# Invoke the "lib" program on Windows to create stub .lib files for the
# generated definitions. These .lib files can then be used during
# delayloading of the dynamic libraries.
ret = QuietRun(['lib', '/nologo',
'/machine:' + machine,
'/def:' + def_file_path,
'/out:' + lib_file_path],
filter=' Creating library')
if ret != 0:
raise SubprocessError(
'Failed creating %s for %s' % (lib_file_path, def_file_path),
ret)
class PosixStubWriter(object):
"""Creates a file of stub functions for a library that is opened via dlopen.
Windows provides a function in their compiler known as delay loading, which
effectively generates a set of stub functions for a dynamic library that
delays loading of the dynamic library/resolution of the symbols until one of
the needed functions are accessed.
In posix, RTLD_LAZY does something similar with DSOs. This is the default
link mode for DSOs. However, even though the symbol is not resolved until
first usage, the DSO must be present at load time of the main binary.
To simulate the windows delay load procedure, we need to create a set of
stub functions that allow for correct linkage of the main binary, but
dispatch to the dynamically resolved symbol when the module is initialized.
This class takes a list of function signatures, and generates a set of stub
functions plus initialization code for them.
"""
def __init__(self, module_name, signatures):
"""Initializes PosixStubWriter for this set of signatures and module_name.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
"""
self.signatures = signatures
self.module_name = module_name
@classmethod
def CStyleIdentifier(cls, identifier):
"""Generates a C style identifier.
The module_name has all invalid identifier characters removed (anything
that's not [_a-zA-Z0-9]) and is run through string.capwords to try
and approximate camel case.
Args:
identifier: The string with the module name to turn to C-style.
Returns:
A string that can be used as part of a C identifier.
"""
return string.capwords(re.sub(INVALID_C_IDENT_CHARS, '', identifier))
@classmethod
def EnumName(cls, module_name):
"""Gets the enum name for the module.
Takes the module name and creates a suitable enum name. The module_name
is munged to be a valid C identifier then prefixed with the string
"kModule" to generate a Google style enum name.
Args:
module_name: The name of the module to generate an enum name for.
Returns:
A string with the name of the enum value representing this module.
"""
return 'kModule%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def IsInitializedName(cls, module_name):
"""Gets the name of function that checks initialization of this module.
The name is in the format IsModuleInitialized. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization check function.
"""
return 'Is%sInitialized' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def InitializeModuleName(cls, module_name):
"""Gets the name of the function that initializes this module.
The name is in the format InitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization function.
"""
return 'Initialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def UninitializeModuleName(cls, module_name):
"""Gets the name of the function that uninitializes this module.
The name is in the format UninitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the uninitialization function.
"""
return 'Uninitialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def StubFunctionPointer(cls, signature):
"""Generates a function pointer declaration for the given signature.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the declaration of the function pointer for the signature.
"""
return 'static %s (*%s_ptr)(%s) = NULL;' % (signature['return_type'],
signature['name'],
', '.join(signature['params']))
@classmethod
def StubFunction(cls, signature):
"""Generates a stub function definition for the given signature.
The function definitions are created with __attribute__((weak)) so that
they may be overridden by a real static link or mock versions to be used
when testing.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the stub function definition.
"""
return_prefix = ''
if signature['return_type'] != 'void':
return_prefix = 'return '
# Generate the argument list.
arguments = [re.split('[\*& ]', arg)[-1].strip() for arg in
signature['params']]
arg_list = ', '.join(arguments)
if arg_list == 'void':
arg_list = ''
if arg_list != '' and len(arguments) > 1 and arguments[-1] == '...':
# If the last argment is ... then this is a variadic function.
if return_prefix != '':
return VARIADIC_STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2]}
else:
return VOID_VARIADIC_STUB_FUNCTION_DEFINITION % {
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2]}
else:
# This is a regular function.
return STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'return_prefix': return_prefix,
'arg_list': arg_list}
@classmethod
def WriteImplementationPreamble(cls, header_path, outfile):
"""Write the necessary includes for the implementation file.
Args:
header_path: The path to the header file.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_PREAMBLE % header_path)
@classmethod
def WriteUmbrellaInitializer(cls, module_names, namespace, outfile):
"""Writes a single function that will open + initialize each module.
This intializer will take in an stl map of that lists the correct
dlopen target for each module. The map type is
std::map<enum StubModules, vector<std::string>> which matches one module
to a list of paths to try in dlopen.
This function is an all-or-nothing function. If any module fails to load,
all other modules are dlclosed, and the function returns. Though it is
not enforced, this function should only be called once.
Args:
module_names: A list with the names of the modules in this stub file.
namespace: The namespace these functions should be in.
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(UMBRELLA_INITIALIZER_START % namespace)
outfile.write(UMBRELLA_INITIALIZER_CLEANUP_FUNCTION)
# Create the initializaiton function that calls all module initializers,
# checks if they succeeded, and backs out module loads on an error.
outfile.write(UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START)
outfile.write(
'\n // Initialize each module if we have not already failed.\n')
for module in module_names:
outfile.write(' %s(opened_libraries[%s]);\n' %
(PosixStubWriter.InitializeModuleName(module),
PosixStubWriter.EnumName(module)))
outfile.write('\n')
# Output code to check the initialization status, clean up on error.
initializer_checks = ['!%s()' % PosixStubWriter.IsInitializedName(name)
for name in module_names]
uninitializers = ['%s()' % PosixStubWriter.UninitializeModuleName(name)
for name in module_names]
outfile.write(UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP % {
'conditional': ' ||\n '.join(initializer_checks),
'uninitializers': ';\n '.join(uninitializers)})
outfile.write('\n} // namespace %s\n' % namespace)
@classmethod
def WriteHeaderContents(cls, module_names, namespace, header_guard, outfile):
"""Writes a header file for the stub file generated for module_names.
The header file exposes the following:
1) An enum, StubModules, listing with an entry for each enum.
2) A typedef for a StubPathMap allowing for specification of paths to
search for each module.
3) The IsInitialized/Initialize/Uninitialize functions for each module.
4) An umbrella initialize function for all modules.
Args:
module_names: A list with the names of each module in this stub file.
namespace: The namespace these functions should be in.
header_guard: The macro to use as our header guard.
outfile: The output handle to populate.
"""
outfile.write(STUB_HEADER_PREAMBLE %
{'guard_name': header_guard, 'namespace': namespace})
# Generate the Initializer protoypes for each module.
outfile.write('// Individual module initializer functions.\n')
for name in module_names:
outfile.write(MODULE_FUNCTION_PROTOTYPES % {
'is_initialized': PosixStubWriter.IsInitializedName(name),
'initialize': PosixStubWriter.InitializeModuleName(name),
'uninitialize': PosixStubWriter.UninitializeModuleName(name)})
# Generate the enum for umbrella initializer.
outfile.write(UMBRELLA_ENUM_START)
outfile.write(' %s = 0,\n' % PosixStubWriter.EnumName(module_names[0]))
for name in module_names[1:]:
outfile.write(' %s,\n' % PosixStubWriter.EnumName(name))
outfile.write(UMBRELLA_ENUM_END)
outfile.write(UMBRELLA_INITIALIZER_PROTOTYPE)
outfile.write(STUB_HEADER_CLOSER % {
'namespace': namespace, 'guard_name':
header_guard})
def WriteImplementationContents(self, namespace, outfile):
"""Given a file handle, write out the stub definitions for this module.
Args:
namespace: The namespace these functions should be in.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_CONTENTS_C_START)
self.WriteFunctionPointers(outfile)
self.WriteStubFunctions(outfile)
outfile.write(IMPLEMENTATION_CONTENTS_C_END)
outfile.write(NAMESPACE_START % namespace)
self.WriteModuleInitializeFunctions(outfile)
outfile.write(NAMESPACE_END % namespace)
def WriteFunctionPointers(self, outfile):
"""Write the function pointer declarations needed by the stubs.
We need function pointers to hold the actual location of the function
implementation returned by dlsym. This function outputs a pointer
definition for each signature in the module.
Pointers will be named with the following pattern "FuntionName_ptr".
Args:
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(FUNCTION_POINTER_SECTION_COMMENT)
for sig in self.signatures:
outfile.write('%s\n' % PosixStubWriter.StubFunctionPointer(sig))
outfile.write('\n')
def WriteStubFunctions(self, outfile):
"""Write the function stubs to handle dispatching to real implementations.
Functions that have a return type other than void will look as follows:
ReturnType FunctionName(A a) {
return FunctionName_ptr(a);
}
Functions with a return type of void will look as follows:
void FunctionName(A a) {
FunctionName_ptr(a);
}
Args:
outfile: The file handle to populate.
"""
outfile.write('// Stubs that dispatch to the real implementations.\n')
for sig in self.signatures:
outfile.write('%s\n' % PosixStubWriter.StubFunction(sig))
def WriteModuleInitializeFunctions(self, outfile):
"""Write functions to initialize/query initlialization of the module.
This creates 2 functions IsModuleInitialized and InitializeModule where
"Module" is replaced with the module name, first letter capitalized.
The InitializeModule function takes a handle that is retrieved from dlopen
and attempts to assign each function pointer above via dlsym.
The IsModuleInitialized returns true if none of the required functions
pointers are NULL.
Args:
outfile: The file handle to populate.
"""
ptr_names = ['%s_ptr' % sig['name'] for sig in self.signatures]
# Construct the conditional expression to check the initialization of
# all the function pointers above. It should generate a conjuntion
# with each pointer on its own line, indented by six spaces to match
# the indentation level of MODULE_INITIALIZATION_CHECK_FUNCTION.
initialization_conditional = ' &&\n '.join(ptr_names)
outfile.write(MODULE_INITIALIZATION_CHECK_FUNCTION % (
PosixStubWriter.IsInitializedName(self.module_name),
initialization_conditional))
# Create function that initializes the module.
outfile.write(MODULE_INITIALIZE_START %
PosixStubWriter.InitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(STUB_POINTER_INITIALIZER % {
'name': sig['name'],
'return_type': sig['return_type'],
'parameters': ', '.join(sig['params'])})
outfile.write(MODULE_INITIALIZE_END)
# Create function that uninitializes the module (sets all pointers to
# NULL).
outfile.write(MODULE_UNINITIALIZE_START %
PosixStubWriter.UninitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(' %s_ptr = NULL;\n' % sig['name'])
outfile.write(MODULE_UNINITIALIZE_END)
def CreateOptionParser():
"""Creates an OptionParser for the configuration options of script.
Returns:
A OptionParser object.
"""
parser = optparse.OptionParser(usage='usage: %prog [options] input')
parser.add_option('-o',
'--output',
dest='out_dir',
default=None,
help='Output location.')
parser.add_option('-i',
'--intermediate_dir',
dest='intermediate_dir',
default=None,
help=('Location of intermediate files. Ignored for %s type'
% FILE_TYPE_WIN_DEF))
parser.add_option('-t',
'--type',
dest='type',
default=None,
help=('Type of file. Valid types are "%s" or "%s" or "%s" '
'or "%s"' %
(FILE_TYPE_POSIX_STUB, FILE_TYPE_WIN_X86,
FILE_TYPE_WIN_X64, FILE_TYPE_WIN_DEF)))
parser.add_option('-s',
'--stubfile_name',
dest='stubfile_name',
default=None,
help=('Name of posix_stubs output file. Only valid with '
'%s type.' % FILE_TYPE_POSIX_STUB))
parser.add_option('-p',
'--path_from_source',
dest='path_from_source',
default=None,
help=('The relative path from the project root that the '
'generated file should consider itself part of (eg. '
'third_party/ffmpeg). This is used to generate the '
'header guard and namespace for our initializer '
'functions and does NOT affect the physical output '
'location of the file like -o does. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-e',
'--extra_stub_header',
dest='extra_stub_header',
default=None,
help=('File to insert after the system includes in the '
'generated stub implemenation file. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-m',
'--module_name',
dest='module_name',
default=None,
help=('Name of output DLL or LIB for DEF creation using '
'%s type.' % FILE_TYPE_WIN_DEF))
return parser
def ParseOptions():
"""Parses the options and terminates program if they are not sane.
Returns:
The pair (optparse.OptionValues, [string]), that is the output of
a successful call to parser.parse_args().
"""
parser = CreateOptionParser()
options, args = parser.parse_args()
if not args:
parser.error('No inputs specified')
if options.out_dir is None:
parser.error('Output location not specified')
if (options.type not in
[FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64, FILE_TYPE_POSIX_STUB,
FILE_TYPE_WIN_DEF]):
parser.error('Invalid output file type: %s' % options.type)
if options.type == FILE_TYPE_POSIX_STUB:
if options.stubfile_name is None:
parser.error('Output file name needed for %s' % FILE_TYPE_POSIX_STUB)
if options.path_from_source is None:
parser.error('Path from source needed for %s' % FILE_TYPE_POSIX_STUB)
if options.type == FILE_TYPE_WIN_DEF:
if options.module_name is None:
parser.error('Module name needed for %s' % FILE_TYPE_WIN_DEF)
return options, args
def EnsureDirExists(dir):
"""Creates a directory. Does not use the more obvious 'if not exists: create'
to avoid race with other invocations of the same code, which will error out
on makedirs if another invocation has succeeded in creating the directory
since the existence check."""
try:
os.makedirs(dir)
except:
if not os.path.isdir(dir):
raise
def CreateOutputDirectories(options):
"""Creates the intermediate and final output directories.
Given the parsed options, create the intermediate and final output
directories if they do not exist. Returns the paths to both directories
as a pair.
Args:
options: An OptionParser.OptionValues object with the parsed options.
Returns:
The pair (out_dir, intermediate_dir), both of which are strings.
"""
out_dir = os.path.normpath(options.out_dir)
intermediate_dir = os.path.normpath(options.intermediate_dir)
if intermediate_dir is None:
intermediate_dir = out_dir
EnsureDirExists(out_dir)
EnsureDirExists(intermediate_dir)
return out_dir, intermediate_dir
def CreateWindowsLibForSigFiles(sig_files, out_dir, intermediate_dir, machine):
"""For each signature file, create a windows lib.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated libs go.
intermediate_dir: String holding path to directory generated intermdiate
artifacts.
machine: String holding the machine type, 'X86' or 'X64'.
"""
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
module_name = ExtractModuleName(os.path.basename(input_path))
CreateWindowsLib(module_name, signatures, intermediate_dir, out_dir,
machine)
finally:
infile.close()
def CreateWindowsDefForSigFiles(sig_files, out_dir, module_name):
"""For all signature files, create a single windows def file.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated def goes.
module_name: Name of the output DLL or LIB which will link in the def file.
"""
signatures = []
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures += ParseSignatures(infile)
finally:
infile.close()
def_file_path = os.path.join(
out_dir, os.path.splitext(os.path.basename(module_name))[0] + '.def')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
def CreatePosixStubsForSigFiles(sig_files, stub_name, out_dir,
intermediate_dir, path_from_source,
extra_stub_header):
"""Create a posix stub library with a module for each signature file.
Args:
sig_files: Array of strings with the paths to each signature file.
stub_name: String with the basename of the generated stub file.
out_dir: String holding path to directory for the .h files.
intermediate_dir: String holding path to directory for the .cc files.
path_from_source: String with relative path of generated files from the
project root.
extra_stub_header: String with path to file of extra lines to insert
into the generated header for the stub library.
"""
header_base_name = stub_name + '.h'
header_path = os.path.join(out_dir, header_base_name)
impl_path = os.path.join(intermediate_dir, stub_name + '.cc')
module_names = [ExtractModuleName(path) for path in sig_files]
namespace = path_from_source.replace('/', '_').lower()
header_guard = '%s_' % namespace.upper()
header_include_path = os.path.join(path_from_source, header_base_name)
# First create the implementation file.
impl_file = open(impl_path, 'w')
try:
# Open the file, and create the preamble which consists of a file
# header plus any necessary includes.
PosixStubWriter.WriteImplementationPreamble(header_include_path,
impl_file)
if extra_stub_header is not None:
extra_header_file = open(extra_stub_header, 'r')
try:
impl_file.write('\n')
for line in extra_header_file:
impl_file.write(line)
impl_file.write('\n')
finally:
extra_header_file.close()
# For each signature file, generate the stub population functions
# for that file. Each file represents one module.
for input_path in sig_files:
name = ExtractModuleName(input_path)
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
finally:
infile.close()
writer = PosixStubWriter(name, signatures)
writer.WriteImplementationContents(namespace, impl_file)
# Lastly, output the umbrella function for the file.
PosixStubWriter.WriteUmbrellaInitializer(module_names, namespace,
impl_file)
finally:
impl_file.close()
# Then create the associated header file.
header_file = open(header_path, 'w')
try:
PosixStubWriter.WriteHeaderContents(module_names, namespace,
header_guard, header_file)
finally:
header_file.close()
def main():
options, args = ParseOptions()
out_dir, intermediate_dir = CreateOutputDirectories(options)
if options.type == FILE_TYPE_WIN_X86:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X86')
elif options.type == FILE_TYPE_WIN_X64:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X64')
elif options.type == FILE_TYPE_POSIX_STUB:
CreatePosixStubsForSigFiles(args, options.stubfile_name, out_dir,
intermediate_dir, options.path_from_source,
options.extra_stub_header)
elif options.type == FILE_TYPE_WIN_DEF:
CreateWindowsDefForSigFiles(args, out_dir, options.module_name)
if __name__ == '__main__':
main()
| |
from collections import OrderedDict
from django import forms
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.utils.text import capfirst
from django.contrib.admin.utils import get_deleted_objects
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.util import model_format_dict, model_ngettext
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.base import filter_hook, ModelAdminView
ACTION_CHECKBOX_NAME = '_selected_action'
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
def action_checkbox(obj):
return checkbox.render(ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe(
'<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
action_checkbox.allow_export = False
action_checkbox.is_column = False
class BaseActionView(ModelAdminView):
action_name = None
description = None
icon = 'fa fa-tasks'
model_perm = 'change'
@classmethod
def has_perm(cls, list_view):
return list_view.get_model_perms()[cls.model_perm]
def init_action(self, list_view):
self.list_view = list_view
self.admin_site = list_view.admin_site
@filter_hook
def do_action(self, queryset):
pass
class DeleteSelectedAction(BaseActionView):
action_name = "delete_selected"
description = _(u'Delete selected %(verbose_name_plural)s')
delete_confirmation_template = None
delete_selected_confirmation_template = None
delete_models_batch = True
model_perm = 'delete'
icon = 'fa fa-times'
@filter_hook
def delete_models(self, queryset):
n = queryset.count()
if n:
if self.delete_models_batch:
queryset.delete()
else:
for obj in queryset:
obj.delete()
self.message_user(_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, 'success')
@filter_hook
def do_action(self, queryset):
# Check that the user has delete permission for the actual model
if not self.has_delete_permission():
raise PermissionDenied
using = router.db_for_write(self.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = get_deleted_objects(
queryset, self.opts, self.user, self.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if self.request.POST.get('post'):
if perms_needed:
raise PermissionDenied
self.delete_models(queryset)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(self.opts.verbose_name)
else:
objects_name = force_unicode(self.opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = self.get_context()
context.update({
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": self.app_label,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
})
# Display the confirmation page
return TemplateResponse(self.request, self.delete_selected_confirmation_template or
self.get_template_list('views/model_delete_selected_confirm.html'), context, current_app=self.admin_site.name)
class ActionPlugin(BaseAdminPlugin):
# Actions
actions = []
actions_selection_counter = True
global_actions = [DeleteSelectedAction]
def init_request(self, *args, **kwargs):
self.actions = self.get_actions()
return bool(self.actions)
def get_list_display(self, list_display):
if self.actions:
list_display.insert(0, 'action_checkbox')
self.admin_view.action_checkbox = action_checkbox
return list_display
def get_list_display_links(self, list_display_links):
if self.actions:
if len(list_display_links) == 1 and list_display_links[0] == 'action_checkbox':
return list(self.admin_view.list_display[1:2])
return list_display_links
def get_context(self, context):
if self.actions and self.admin_view.result_count:
av = self.admin_view
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', av.result_count)
new_context = {
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(av.result_list)},
'selection_note_all': selection_note_all % {'total_count': av.result_count},
'action_choices': self.get_action_choices(),
'actions_selection_counter': self.actions_selection_counter,
}
context.update(new_context)
return context
def post_response(self, response, *args, **kwargs):
request = self.admin_view.request
av = self.admin_view
# Actions with no confirmation
if self.actions and 'action' in request.POST:
action = request.POST['action']
if action not in self.actions:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
ac, name, description, icon = self.actions[action]
select_across = request.POST.get('select_across', False) == '1'
selected = request.POST.getlist(ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
queryset = av.list_queryset._clone()
if not select_across:
# Perform the action only on the selected objects
queryset = av.list_queryset.filter(pk__in=selected)
response = self.response_action(ac, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
return response
def response_action(self, ac, queryset):
if isinstance(ac, type) and issubclass(ac, BaseActionView):
action_view = self.get_model_view(ac, self.admin_view.model)
action_view.init_action(self.admin_view)
return action_view.do_action(queryset)
else:
return ac(self.admin_view, self.request, queryset)
def get_actions(self):
if self.actions is None:
return OrderedDict()
actions = [self.get_action(action) for action in self.global_actions]
for klass in self.admin_view.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
if not class_actions:
continue
actions.extend(
[self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a OrderedDict keyed by name.
actions = OrderedDict([
(name, (ac, name, desc, icon))
for ac, name, desc, icon in actions
])
return actions
def get_action_choices(self):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = []
for ac, name, description, icon in self.actions.itervalues():
choice = (name, description % model_format_dict(self.opts), icon)
choices.append(choice)
return choices
def get_action(self, action):
if isinstance(action, type) and issubclass(action, BaseActionView):
if not action.has_perm(self.admin_view):
return None
return action, getattr(action, 'action_name'), getattr(action, 'description'), getattr(action, 'icon')
elif callable(action):
func = action
action = action.__name__
elif hasattr(self.admin_view.__class__, action):
func = getattr(self.admin_view.__class__, action)
else:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description, getattr(func, 'icon', 'tasks')
# View Methods
def result_header(self, item, field_name, row):
if item.attr and field_name == 'action_checkbox':
item.classes.append("action-checkbox-column")
return item
def result_item(self, item, obj, field_name, row):
if item.field is None and field_name == u'action_checkbox':
item.classes.append("action-checkbox")
return item
# Media
def get_media(self, media):
if self.actions and self.admin_view.result_count:
media = media + self.vendor('xadmin.plugin.actions.js', 'xadmin.plugins.css')
return media
# Block Views
def block_results_bottom(self, context, nodes):
if self.actions and self.admin_view.result_count:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_bottom.actions.html',
context=get_context_dict(context)))
site.register_plugin(ActionPlugin, ListAdminView)
| |
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Benchmarks for testing various possible implementations
# of the is_Dict(), is_List() and is_String() functions in
# src/engine/SCons/Util.py.
import types
try:
from collections import UserDict, UserList, UserString
except ImportError:
# No 'collections' module or no UserFoo in collections
exec('from UserDict import UserDict')
exec('from UserList import UserList')
exec('from UserString import UserString')
InstanceType = types.InstanceType
DictType = dict
ListType = list
StringType = str
try: unicode
except NameError:
UnicodeType = None
else:
UnicodeType = unicode
# The original implementations, pretty straightforward checks for the
# type of the object and whether it's an instance of the corresponding
# User* type.
def original_is_Dict(e):
return isinstance(e, (dict,UserDict))
def original_is_List(e):
return isinstance(e, (list,UserList))
if UnicodeType is not None:
def original_is_String(e):
return isinstance(e, (str,unicode,UserString))
else:
def original_is_String(e):
return isinstance(e, (str,UserString))
# New candidates that explicitly check for whether the object is an
# InstanceType before calling isinstance() on the corresponding User*
# type.
def checkInstanceType_is_Dict(e):
return isinstance(e, dict) or \
(isinstance(e, types.InstanceType) and isinstance(e, UserDict))
def checkInstanceType_is_List(e):
return isinstance(e, list) \
or (isinstance(e, types.InstanceType) and isinstance(e, UserList))
if UnicodeType is not None:
def checkInstanceType_is_String(e):
return isinstance(e, str) \
or isinstance(e, unicode) \
or (isinstance(e, types.InstanceType) and isinstance(e, UserString))
else:
def checkInstanceType_is_String(e):
return isinstance(e, str) \
or (isinstance(e, types.InstanceType) and isinstance(e, UserString))
# Improved candidates that cache the type(e) result in a variable
# before doing any checks.
def cache_type_e_is_Dict(e):
t = type(e)
return t is dict or \
(t is types.InstanceType and isinstance(e, UserDict))
def cache_type_e_is_List(e):
t = type(e)
return t is list \
or (t is types.InstanceType and isinstance(e, UserList))
if UnicodeType is not None:
def cache_type_e_is_String(e):
t = type(e)
return t is str \
or t is unicode \
or (t is types.InstanceType and isinstance(e, UserString))
else:
def cache_type_e_is_String(e):
t = type(e)
return t is str \
or (t is types.InstanceType and isinstance(e, UserString))
# Improved candidates that cache the type(e) result in a variable
# before doing any checks, but using the global names for
# DictType, ListType and StringType.
def global_cache_type_e_is_Dict(e):
t = type(e)
return t is DictType or \
(t is InstanceType and isinstance(e, UserDict))
def global_cache_type_e_is_List(e):
t = type(e)
return t is ListType \
or (t is InstanceType and isinstance(e, UserList))
if UnicodeType is not None:
def global_cache_type_e_is_String(e):
t = type(e)
return t is StringType \
or t is UnicodeType \
or (t is InstanceType and isinstance(e, UserString))
else:
def global_cache_type_e_is_String(e):
t = type(e)
return t is StringType \
or (t is InstanceType and isinstance(e, UserString))
# Alternative that uses a myType() function to map the User* objects
# to their corresponding underlying types.
instanceTypeMap = {
UserDict : dict,
UserList : list,
UserString : str,
}
if UnicodeType is not None:
def myType(obj):
t = type(obj)
if t is types.InstanceType:
t = instanceTypeMap.get(obj.__class__, t)
elif t is unicode:
t = str
return t
else:
def myType(obj):
t = type(obj)
if t is types.InstanceType:
t = instanceTypeMap.get(obj.__class__, t)
return t
def myType_is_Dict(e):
return myType(e) is dict
def myType_is_List(e):
return myType(e) is list
def myType_is_String(e):
return myType(e) is str
def Func01(obj):
"""original_is_String"""
for i in IterationList:
original_is_String(obj)
def Func02(obj):
"""original_is_List"""
for i in IterationList:
original_is_List(obj)
def Func03(obj):
"""original_is_Dict"""
for i in IterationList:
original_is_Dict(obj)
def Func04(obj):
"""checkInstanceType_is_String"""
for i in IterationList:
checkInstanceType_is_String(obj)
def Func05(obj):
"""checkInstanceType_is_List"""
for i in IterationList:
checkInstanceType_is_List(obj)
def Func06(obj):
"""checkInstanceType_is_Dict"""
for i in IterationList:
checkInstanceType_is_Dict(obj)
def Func07(obj):
"""cache_type_e_is_String"""
for i in IterationList:
cache_type_e_is_String(obj)
def Func08(obj):
"""cache_type_e_is_List"""
for i in IterationList:
cache_type_e_is_List(obj)
def Func09(obj):
"""cache_type_e_is_Dict"""
for i in IterationList:
cache_type_e_is_Dict(obj)
def Func10(obj):
"""global_cache_type_e_is_String"""
for i in IterationList:
global_cache_type_e_is_String(obj)
def Func11(obj):
"""global_cache_type_e_is_List"""
for i in IterationList:
global_cache_type_e_is_List(obj)
def Func12(obj):
"""global_cache_type_e_is_Dict"""
for i in IterationList:
global_cache_type_e_is_Dict(obj)
#def Func13(obj):
# """myType_is_String"""
# for i in IterationList:
# myType_is_String(obj)
#
#def Func14(obj):
# """myType_is_List"""
# for i in IterationList:
# myType_is_List(obj)
#
#def Func15(obj):
# """myType_is_Dict"""
# for i in IterationList:
# myType_is_Dict(obj)
# Data to pass to the functions on each run. Each entry is a
# three-element tuple:
#
# (
# "Label to print describing this data run",
# ('positional', 'arguments'),
# {'keyword' : 'arguments'},
# ),
class A(object):
pass
Data = [
(
"String",
('',),
{},
),
(
"List",
([],),
{},
),
(
"Dict",
({},),
{},
),
(
"UserString",
(UserString(''),),
{},
),
(
"UserList",
(UserList([]),),
{},
),
(
"UserDict",
(UserDict({}),),
{},
),
(
"Object",
(A(),),
{},
),
]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
################ NLP Runner ##################
# This will be the program that runs all NLP #
# routines. It will call different NLP tools #
# depending on language and function #
##############################################
import csv
import json
import tempfile
import time
from collections import defaultdict
import matplotlib.pyplot as plt
import networkx
from newspaper import Article
from nltk import sentiment
from gat.core.nlp import language_detector, file_io, spacy_nlp
from gat.core.nlp import radar
from gat.core.nlp import sentiment
############ Low Level Functions #############
def getTexts(directory):
# Input: Directory
# Output:List of all text files in the directory fully loaded into memory
texts = []
pathnames = file_io.getFilesRecurse(directory, '.txt')
for pathname in pathnames:
texts.append(file_io.openFile(pathname))
return texts
def getArticles(urls):
# Input: URLS of news articles
# Output: Article objects --> use article.text for parsing etc. with SpaCy, others are useful metadata
articles = []
if isinstance(urls, str):
urls = [urls]
for url in urls:
article = Article(url)
article.download()
article.parse()
articles.append(article)
return articles
def languageDict(texts):
# Input: text documents
# Output: dictionary of documents keyed by language
dic = defaultdict(list)
for text in texts:
language = language_detector.stopword_detect_language(text)
dic[language].append(text)
return dic
def loadAllModels(languages):
# Input: list of languages
# Output: dictionary of models keyed by language
languageModels = defaultdict()
for language in languages:
# Spacy Parsing
if language == 'english':
nlp = spacy_nlp.loadModel('en')
languageModels[language] = nlp
# other languages will need similar branching
# spacy will likely differ from syntaxnet slightly
return languageModels
def parseTexts(texts, models):
# Inputs: dictionaries of language models and texts (same keys)
# Returns a dict of parsed documents keyed by language
languages = texts.keys()
dict = defaultdict(list)
for language in languages:
if language == 'english':
docs = spacy_nlp.pipeline(models[language], texts[language])
dict[language] = docs
return dict
def readLexicon():
# No input because it only works for this one lexicon
# f = open('static/lexicon.txt', 'r')
f = open('static/resources/nlp/lexicon.txt')
raw = f.read().split('\n')
lexicon = {}
for n in range(0, len(raw), 10):
word = raw[n].split('\t')[0]
lexicon[word] = []
lexicon[word].append(int(raw[n + 6].split('\t')[2])) # positive
lexicon[word].append(int(raw[n + 5].split('\t')[2])) # negative
lexicon[word].append(int(raw[n + 0].split('\t')[2])) # anger
lexicon[word].append(int(raw[n + 1].split('\t')[2])) # anticipation
lexicon[word].append(int(raw[n + 2].split('\t')[2])) # disgust
lexicon[word].append(int(raw[n + 3].split('\t')[2])) # fear
lexicon[word].append(int(raw[n + 4].split('\t')[2])) # joy
lexicon[word].append(int(raw[n + 7].split('\t')[2])) # sadness
lexicon[word].append(int(raw[n + 8].split('\t')[2])) # surprise
lexicon[word].append(int(raw[n + 9].split('\t')[2])) # trust
return lexicon
################# JSON Serialization ##################
def marshall(object):
return json.dumps(object)
def unmarshall(object):
return json.loads(object)
def csvInfo(entityUsages):
labels = {}
for key in entityUsages.keys():
for label in entityUsages[key]:
if label in labels:
if len(entityUsages[key][label]) > labels[label]:
labels[label] = len(entityUsages[key][label])
else:
labels[label] = len(entityUsages[key][label])
return labels
def csvWrite(entityUsages):
info = csvInfo(entityUsages)
header = ['Actor']
headerRef = []
for label, columns in info.items():
headerRef.append((label, columns))
i = 0
while i < columns:
header.append(label)
i += 1
with open('sheet.csv', 'w') as csvFile:
writer = csv.writer(csvFile, delimiter=',')
writer.writerow(header)
for key in entityUsages.keys():
row = [key]
for label, columns in headerRef:
added = 0
if label in entityUsages[key].keys():
for entity in entityUsages[key][label].keys():
added += 1
row.append(entity)
while added < columns:
row.append('')
added += 1
writer.writerow(row)
################# Feature Functions ################
def preProcess(texts):
# Input: Texts
# Output: A language-keyed dictionary of fully tokenized, tagged, and parsed documents.
textDict = languageDict(texts)
languages = dict.keys(textDict)
modelDict = loadAllModels(languages)
parsedDocs = parseTexts(textDict, modelDict)
return parsedDocs
def crossEntitySentiment(docs, central_type=None):
# Input: Spacy parsed docs
# Output: Nested dictionary with sentiments between entities
entitySentences = spacy_nlp.crossEntityUsages(docs, central_type)
for key in entitySentences.keys():
for label in entitySentences[key].keys():
for keytwo in entitySentences[key][label].keys():
sentences = [sent.text for sent in entitySentences[key][label][keytwo]]
sentiments = sentiment.VaderSentiment(sentences)
neg, neu, pos, comp = 0, 0, 0, 0
for sent in sentiments:
neg += sent['neg']
neu += sent['neu']
pos += sent['pos']
comp += sent['compound']
uses = len(sentiments)
neg = neg / uses
neu = neu / uses
pos = pos / uses
comp = comp / uses
entitySentences[key][label][keytwo] = (neg, neu, pos, comp, len(sentiments))
return entitySentences
def emotionalMultiValences(docs, lexicon):
entitySentences = spacy_nlp.crossEntityUsages(docs)
for key in entitySentences.keys():
for label in entitySentences[key].keys():
for keytwo in entitySentences[key][label].keys():
sentences = [sent for sent in entitySentences[key][label][keytwo]]
emotion = sentiment.emotionScore(sentences, lexicon)
entitySentences[key][label][keytwo] = emotion
# NOTE: This is an absolute scale. It's based on emotion/word. Display as percentages?
# Scaling it around 1 is a problem. Ask Tony Wednesday.
return entitySentences
################## Display Functions ####################
def docSummary(docs):
# Input: parsed docs
# Output: Basic statistics. Number of docs, (path of) histogram of doc length distribution, total word count
num = len(docs)
lengths = []
totLength = 0
for doc in docs:
length = len(doc)
lengths.append(length)
totLength += length
lengths = sorted(lengths)
max = lengths[0]
min = lengths[(len(lengths) - 1)]
# Histogram Creation
plt.figure(figsize=(12, 9))
# Remove plot frame lines
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# Axis ticks only bottom and left
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Document Length (Words)", fontsize=16)
plt.ylabel("Count (Documents)", fontsize=16)
# Plot histogram. Bin number might need tweaking.
plt.hist(lengths, edgecolor='black', color='#3ad43f', bins=10)
# Save as PNG. bbox_inches="tight" removes all extra whitespace on edges of plot
f = tempfile.NamedTemporaryFile(dir='out/nlp', suffix='.png', delete=False)
# save the figure to the temporary file
plt.savefig(f, bbox_inches='tight')
f.close() # close the file
# get the file's name
# (the template will need that)
path = f.name.split('/')[-1]
path = path.split('\\')[-1]
return num, path, totLength
def entitySentiment(docs):
# Input: Directory of .txt files to analyse
# Output: Dictionary of relevant entities and their associated sentiment
# Methodology: Sentiment towards an entity is approximated as sentiment averaged
# over all of the sentences they appear in
entityInfo = []
entitySentences = spacy_nlp.improvesUsages(docs)
for label in entitySentences.keys():
for entity in entitySentences[label]:
# Take text of sentences, entries in dict are spacy span objects
sentences = [e.text for e in entitySentences[label][entity]]
sentiments = sentiment.VaderSentiment(sentences)
# Build combined sentiment by averaging individual sentence sentiments
neg, neu, pos, comp = 0, 0, 0, 0
for sent in sentiments:
neg += sent['neg']
neu += sent['neu']
pos += sent['pos']
comp += sent['compound']
uses = len(sentiments)
neg = neg / uses
neu = neu / uses
pos = pos / uses
comp = comp / uses
# List of lists will be used for sortable html table
# Filter out ones with low sentiment or low usage so that there's a reasonable number
if uses > 1:
entityInfo.append([entity, "%.2f" % neg, "%.2f" % neu, "%.2f" % pos, "%.2f" % comp, uses])
return entityInfo
def sentimentGraph(docs):
# Input: spacy parsed docs
# Output: graph with cross-entity sentiment on edges
crossEnt = crossEntitySentiment(docs)
graph = networkx.Graph()
for k1 in crossEnt.keys():
for k2 in crossEnt[k1].keys():
graph.add_edge(k1, k2, sentiment=crossEnt[k1][k2][3])
return graph
def emotionalValences(docs, lexicon):
# Formerly what built the tropes
# Input: docs, the emotional lexicon
# Output: Radar charts to be displayed (paths to their pictures)
paths = []
emotionScores = []
emotionLabels = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust']
entitySentences = spacy_nlp.improvesUsages(docs)
max = 0
for label in entitySentences.keys():
for ent in entitySentences[label].keys():
sentences = entitySentences[label][ent]
if (len(sentences) > 1):
scores = sentiment.emotionScore(sentences, lexicon)
# We'll need to scale off max emotion later
for score in scores:
if score > max: max = score
emotionScores.append((ent, scores))
optimum = [max, max, max, max, max, max, max, max]
# Now we have, entities, scores, labels, and optimums. We can create the radar graphs
for ent, scores in emotionScores[:6]:
# Save the file in the temp folder
f = tempfile.NamedTemporaryFile(dir='out/nlp', suffix='.png', delete=False)
radar.graph(trope=ent, values=scores, labels=emotionLabels, optimum=optimum, file_name=f)
# get the file's name (the template will need that)
path = f.name.split('/')[-1]
path = path.split('\\')[-1]
paths.append(path)
return paths
def sourceAnalysis(articles):
return
if __name__ == '__main__':
st = time.time()
texts = getTexts('corpus')
parsedDocs = preProcess(texts)
docs = parsedDocs['english']
sent = crossEntitySentiment(docs, 'PERSON')
csvWrite(sent)
# lexicon = sentiment.readLexicon()
# e = emotionalValences(docs, lexicon)
print(time.time() - st)
# graph = sentimentGraph(docs)
# graph = jgraph.draw(graph)
# f = open("yourpage.html", "w")
# f.write(graph)
# f.close()
# graph = sentimentGraph(docs)
# sna = SNA.SNA
# sna.G = graph
# graph = spacy_nlp.entityGraph(docs)
# print(graph.edges())
# SENTIMENT TESTING
# entities = spacy_nlp.namedEntities(docs, True)
# spacy_nlp.crossEntitySentiment(entities)
# for doc in docs:
# sentences = [s.text for s in doc.sents]
# print(sentiment.VaderSentiment(sentences))
# GRAPH TESTING
# graph = spacyNLP.entityGraph(docs)
# json = marshall(graph)
# TODO: Understand what will go into NLP box, build it all, format it right, etc.
# Overall Summary: Number of Docs. Document length distribution (histogram of document lengths). Total word count.
# FUNCTION: docSummary returns num. docs, doc lengths (potentially direct to histogram later), total word count.
# Entity analysis: Only give 100 per page or something like that
# Open new box with top N refined entities analyzed. Sortable Fields -> Entity Name, Sentiment, Number of appearances
# FUNCTION: EntitySentiment give us entity names, number of uses, sentiment
# Need to turn it into an HTML Table. Include Pictures.
# Visual analysis: Lexical dispersion plot. Radar charts.
# FUNCTION: Current radar chart implementation. LDP implementation existing can be run on the text.
# Source Analysis: No separate box.
# If didn't use url scraper - Message explaining
# Else - Pie chart of docs by source.
# List of sources: Subjectivity score. Date range of articles. Most informative articles?
# Most mentioned entities and sentiment towards (Common subjects).
# Network Analysis: Cross entity sentiment
# MAKE SURE GRAPH IS UNDIRECTED
# List of entity types --> Which should be included as nodes? Color nodes based on type.
# Open new box running JGraph. With sentiment between. Legend if possible of node types.
# Geospatial Analysis: Open separate box
# Depends on mordecai implementation. Location tagging. Test implementation could just be dots that get larger
# as a location is recognized more
# Word clouds?
| |
import json
import requests
from requests.auth import HTTPBasicAuth
from tabpy.tabpy_tools.rest import RequestsNetworkWrapper, ServiceClient
import unittest
from unittest.mock import Mock
class TestRequestsNetworkWrapper(unittest.TestCase):
def test_init(self):
RequestsNetworkWrapper()
def test_init_with_session(self):
session = {}
rnw = RequestsNetworkWrapper(session=session)
self.assertIs(session, rnw.session)
def mock_response(self, status_code):
response = Mock(requests.Response)
response.json.return_value = "json"
response.status_code = status_code
return response
def setUp(self):
session = Mock(requests.Session)
session.get.return_value = self.mock_response(200)
session.post.return_value = self.mock_response(200)
session.put.return_value = self.mock_response(200)
session.delete.return_value = self.mock_response(204)
self.rnw = RequestsNetworkWrapper(session=session)
def test_GET(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.GET(url, data), "json")
self.rnw.session.get.assert_called_once_with(
url, params=data, timeout=None, auth=None
)
def test_GET_InvalidData(self):
url = "abc"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.session.get.return_value = self.mock_response(404)
self.rnw.GET(url, data)
def test_GET_InvalidURL(self):
url = ""
data = {"foo": "bar"}
with self.assertRaises(TypeError):
self.rnw.session.get.return_value = self.mock_response(404)
self.rnw.GET(url, data)
def test_POST(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.POST(url, data), "json")
self.rnw.session.post.assert_called_once_with(
url,
data=json.dumps(data),
headers={"content-type": "application/json"},
timeout=None,
auth=None,
)
def test_POST_InvalidURL(self):
url = ""
data = {"foo": "bar"}
with self.assertRaises(TypeError):
self.rnw.session.post.return_value = self.mock_response(404)
self.rnw.POST(url, data)
def test_POST_InvalidData(self):
url = "url"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.POST(url, data)
def test_PUT(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.PUT(url, data), "json")
self.rnw.session.put.assert_called_once_with(
url,
data=json.dumps(data),
headers={"content-type": "application/json"},
timeout=None,
auth=None,
)
def test_PUT_InvalidData(self):
url = "url"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.PUT(url, data)
def test_PUT_InvalidURL(self):
url = ""
data = {"foo:bar"}
with self.assertRaises(TypeError):
self.rnw.PUT(url, data)
def test_DELETE(self):
url = "abc"
data = {"foo": "bar"}
self.assertIs(self.rnw.DELETE(url, data), None)
self.rnw.session.delete.assert_called_once_with(
url, data=json.dumps(data), timeout=None, auth=None
)
def test_DELETE_InvalidData(self):
url = "abc"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.DELETE(url, data)
def test_DELETE_InvalidURL(self):
url = ""
data = {"foo:bar"}
with self.assertRaises(TypeError):
self.rnw.DELETE(url, data)
def test_set_credentials(self):
expected_auth = None
self.assertEqual(self.rnw.auth, expected_auth)
username, password = "username", "password"
expected_auth = HTTPBasicAuth(username, password)
self.rnw.set_credentials(username, password)
self.assertEqual(self.rnw.auth, expected_auth)
def _test_METHOD_with_credentials(
self,
http_method_function,
http_session_method_function,
headers=None,
params=False,
data=False,
response=None,
):
username, password = "username", "password"
self.rnw.set_credentials(username, password)
url = "url"
_data = {"foo": "bar"}
self.assertEqual(http_method_function(url, _data), response)
pargs = {url}
kwargs = {"timeout": None, "auth": self.rnw.auth}
if data:
kwargs["data"] = json.dumps(_data)
if headers:
kwargs["headers"] = headers
if params:
kwargs["params"] = _data
http_session_method_function.assert_called_once_with(*pargs, **kwargs)
self.assertEqual(self.rnw.auth, HTTPBasicAuth(username, password))
def test_GET_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.GET, self.rnw.session.get, params=True, response="json"
)
def test_POST_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.POST,
self.rnw.session.post,
headers={"content-type": "application/json"},
data=True,
response="json",
)
def test_PUT_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.PUT,
self.rnw.session.put,
data=True,
headers={"content-type": "application/json"},
response="json",
)
def test_DELETE_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.DELETE, self.rnw.session.delete, data=True
)
class TestServiceClient(unittest.TestCase):
def setUp(self):
nw = Mock(RequestsNetworkWrapper())
nw.GET.return_value = "GET"
nw.POST.return_value = "POST"
nw.PUT.return_value = "PUT"
nw.DELETE.return_value = "DELETE"
self.sc = ServiceClient("endpoint/", network_wrapper=nw)
self.scClientDoesNotEndWithSlash = ServiceClient("endpoint", network_wrapper=nw)
def test_GET(self):
self.assertEqual(self.sc.GET("test"), "GET")
self.sc.network_wrapper.GET.assert_called_once_with("endpoint/test", None, None)
def test_POST(self):
self.assertEqual(self.sc.POST("test"), "POST")
self.sc.network_wrapper.POST.assert_called_once_with(
"endpoint/test", None, None
)
def test_PUT(self):
self.assertEqual(self.sc.PUT("test"), "PUT")
self.sc.network_wrapper.PUT.assert_called_once_with("endpoint/test", None, None)
def test_DELETE(self):
self.assertEqual(self.sc.DELETE("test"), None)
self.sc.network_wrapper.DELETE.assert_called_once_with(
"endpoint/test", None, None
)
def test_FixEndpoint(self):
self.assertEqual(self.scClientDoesNotEndWithSlash.GET("test"), "GET")
self.sc.network_wrapper.GET.assert_called_once_with("endpoint/test", None, None)
def test_set_credentials(self):
username, password = "username", "password"
self.sc.set_credentials(username, password)
self.sc.network_wrapper.set_credentials.assert_called_once_with(
username, password
)
| |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, assert_almost_equal, assert_equal,
assert_, assert_raises, run_module_suite,
assert_allclose, assert_array_equal)
import scipy.signal.waveforms as waveforms
# These chirp_* functions are the instantaneous frequencies of the signals
# returned by chirp().
def chirp_linear(t, f0, f1, t1):
f = f0 + (f1 - f0) * t / t1
return f
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
if vertex_zero:
f = f0 + (f1 - f0) * t**2 / t1**2
else:
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
return f
def chirp_geometric(t, f0, f1, t1):
f = f0 * (f1/f0)**(t/t1)
return f
def chirp_hyperbolic(t, f0, f1, t1):
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
return f
def compute_frequency(t, theta):
"""
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
"""
# Assume theta and t are 1D numpy arrays.
# Assume that t is uniformly spaced.
dt = t[1] - t[0]
f = np.diff(theta)/(2*np.pi) / dt
tf = 0.5*(t[1:] + t[:-1])
return tf, f
class TestChirp(TestCase):
def test_linear_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
assert_almost_equal(w, 1.0)
def test_linear_freq_01(self):
method = 'linear'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_linear_freq_02(self):
method = 'linear'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
assert_almost_equal(w, 1.0)
def test_quadratic_at_zero2(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
vertex_zero=False)
assert_almost_equal(w, 1.0)
def test_quadratic_freq_01(self):
method = 'quadratic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_freq_02(self):
method = 'quadratic'
f0 = 20.0
f1 = 10.0
t1 = 10.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
assert_almost_equal(w, 1.0)
def test_logarithmic_freq_01(self):
method = 'logarithmic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_02(self):
method = 'logarithmic'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_03(self):
method = 'logarithmic'
f0 = 100.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_at_zero(self):
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
assert_almost_equal(w, 1.0)
def test_hyperbolic_freq_01(self):
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 10000)
# f0 f1
cases = [[10.0, 1.0],
[1.0, 10.0],
[-10.0, -1.0],
[-1.0, -10.0]]
for f0, f1 in cases:
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
expected = chirp_hyperbolic(tf, f0, f1, t1)
assert_allclose(f, expected)
def test_hyperbolic_zero_freq(self):
# f0=0 or f1=0 must raise a ValueError.
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 5)
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
def test_unknown_method(self):
method = "foo"
f0 = 10.0
f1 = 20.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_integer_t1(self):
f0 = 10.0
f1 = 20.0
t = np.linspace(-1, 1, 11)
t1 = 3.0
float_result = waveforms.chirp(t, f0, t1, f1)
t1 = 3
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 't1=3' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f0(self):
f1 = 20.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f0 = 10.0
float_result = waveforms.chirp(t, f0, t1, f1)
f0 = 10
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f1(self):
f0 = 10.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f1 = 20.0
float_result = waveforms.chirp(t, f0, t1, f1)
f1 = 20
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_all(self):
f0 = 10
t1 = 3
f1 = 20
t = np.linspace(-1, 1, 11)
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestSweepPoly(TestCase):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_linear(self):
p = np.poly1d([-1.0, 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, -2.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic2(self):
"""Use an array of coefficients instead of a poly1d."""
p = np.array([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic3(self):
"""Use a list of coefficients instead of a poly1d."""
p = [2.0, 1.0, 0.0, -2.0]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
class TestGaussPulse(TestCase):
def test_integer_fc(self):
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
int_result = waveforms.gausspulse('cutoff', fc=1000)
err_msg = "Integer input 'fc=1000' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bw(self):
float_result = waveforms.gausspulse('cutoff', bw=1.0)
int_result = waveforms.gausspulse('cutoff', bw=1)
err_msg = "Integer input 'bw=1' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bwr(self):
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
int_result = waveforms.gausspulse('cutoff', bwr=-6)
err_msg = "Integer input 'bwr=-6' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_tpr(self):
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
int_result = waveforms.gausspulse('cutoff', tpr=-60)
err_msg = "Integer input 'tpr=-60' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestUnitImpulse(TestCase):
def test_no_index(self):
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3)),
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_index(self):
assert_array_equal(waveforms.unit_impulse(10, 3),
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
# Broadcasting
imp = waveforms.unit_impulse((4, 4), 2)
assert_array_equal(imp, np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]))
def test_mid(self):
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_array_equal(waveforms.unit_impulse(9, 'mid'),
[0, 0, 0, 0, 1, 0, 0, 0, 0])
def test_dtype(self):
imp = waveforms.unit_impulse(7)
assert_(np.issubdtype(imp.dtype, np.float))
imp = waveforms.unit_impulse(5, 3, dtype=int)
assert_(np.issubdtype(imp.dtype, np.integer))
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
assert_(np.issubdtype(imp.dtype, np.complex))
if __name__ == "__main__":
run_module_suite()
| |
"""Support for Met.no weather service."""
import logging
from random import randrange
import metno
import voluptuous as vol
from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
EVENT_CORE_CONFIG_UPDATE,
LENGTH_FEET,
LENGTH_METERS,
LENGTH_MILES,
PRESSURE_HPA,
PRESSURE_INHG,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_call_later
from homeassistant.util.distance import convert as convert_distance
import homeassistant.util.dt as dt_util
from homeassistant.util.pressure import convert as convert_pressure
from .const import CONF_TRACK_HOME
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = (
"Weather forecast from met.no, delivered by the Norwegian "
"Meteorological Institute."
)
DEFAULT_NAME = "Met.no"
URL = "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/1.9/"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Optional(CONF_ELEVATION): int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Met.no weather platform."""
_LOGGER.warning("Loading Met.no via platform config is deprecated")
# Add defaults.
config = {CONF_ELEVATION: hass.config.elevation, **config}
if config.get(CONF_LATITUDE) is None:
config[CONF_TRACK_HOME] = True
async_add_entities([MetWeather(config, hass.config.units.is_metric)])
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a weather entity from a config_entry."""
async_add_entities([MetWeather(config_entry.data, hass.config.units.is_metric)])
class MetWeather(WeatherEntity):
"""Implementation of a Met.no weather condition."""
def __init__(self, config, is_metric):
"""Initialise the platform with a data instance and site."""
self._config = config
self._is_metric = is_metric
self._unsub_track_home = None
self._unsub_fetch_data = None
self._weather_data = None
self._current_weather_data = {}
self._forecast_data = None
async def async_added_to_hass(self):
"""Start fetching data."""
self._init_data()
await self._fetch_data()
if self._config.get(CONF_TRACK_HOME):
self._unsub_track_home = self.hass.bus.async_listen(
EVENT_CORE_CONFIG_UPDATE, self._core_config_updated
)
@callback
def _init_data(self):
"""Initialize a data object."""
conf = self._config
if self.track_home:
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
elevation = self.hass.config.elevation
else:
latitude = conf[CONF_LATITUDE]
longitude = conf[CONF_LONGITUDE]
elevation = conf[CONF_ELEVATION]
if not self._is_metric:
elevation = int(
round(convert_distance(elevation, LENGTH_FEET, LENGTH_METERS))
)
coordinates = {
"lat": str(latitude),
"lon": str(longitude),
"msl": str(elevation),
}
self._weather_data = metno.MetWeatherData(
coordinates, async_get_clientsession(self.hass), URL
)
async def _core_config_updated(self, _event):
"""Handle core config updated."""
self._init_data()
if self._unsub_fetch_data:
self._unsub_fetch_data()
self._unsub_fetch_data = None
await self._fetch_data()
async def will_remove_from_hass(self):
"""Handle entity will be removed from hass."""
if self._unsub_track_home:
self._unsub_track_home()
self._unsub_track_home = None
if self._unsub_fetch_data:
self._unsub_fetch_data()
self._unsub_fetch_data = None
async def _fetch_data(self, *_):
"""Get the latest data from met.no."""
if not await self._weather_data.fetching_data():
# Retry in 15 to 20 minutes.
minutes = 15 + randrange(6)
_LOGGER.error("Retrying in %i minutes", minutes)
self._unsub_fetch_data = async_call_later(
self.hass, minutes * 60, self._fetch_data
)
return
# Wait between 55-65 minutes. If people update HA on the hour, this
# will make sure it will spread it out.
self._unsub_fetch_data = async_call_later(
self.hass, randrange(55, 65) * 60, self._fetch_data
)
self._update()
def _update(self, *_):
"""Get the latest data from Met.no."""
self._current_weather_data = self._weather_data.get_current_weather()
time_zone = dt_util.DEFAULT_TIME_ZONE
self._forecast_data = self._weather_data.get_forecast(time_zone)
self.async_write_ha_state()
@property
def track_home(self):
"""Return if we are tracking home."""
return self._config.get(CONF_TRACK_HOME, False)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return unique ID."""
if self.track_home:
return "home"
return f"{self._config[CONF_LATITUDE]}-{self._config[CONF_LONGITUDE]}"
@property
def name(self):
"""Return the name of the sensor."""
name = self._config.get(CONF_NAME)
if name is not None:
return name
if self.track_home:
return self.hass.config.location_name
return DEFAULT_NAME
@property
def condition(self):
"""Return the current condition."""
return self._current_weather_data.get("condition")
@property
def temperature(self):
"""Return the temperature."""
return self._current_weather_data.get("temperature")
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
pressure_hpa = self._current_weather_data.get("pressure")
if self._is_metric or pressure_hpa is None:
return pressure_hpa
return round(convert_pressure(pressure_hpa, PRESSURE_HPA, PRESSURE_INHG), 2)
@property
def humidity(self):
"""Return the humidity."""
return self._current_weather_data.get("humidity")
@property
def wind_speed(self):
"""Return the wind speed."""
speed_m_s = self._current_weather_data.get("wind_speed")
if self._is_metric or speed_m_s is None:
return speed_m_s
speed_mi_s = convert_distance(speed_m_s, LENGTH_METERS, LENGTH_MILES)
speed_mi_h = speed_mi_s / 3600.0
return int(round(speed_mi_h))
@property
def wind_bearing(self):
"""Return the wind direction."""
return self._current_weather_data.get("wind_bearing")
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self):
"""Return the forecast array."""
return self._forecast_data
| |
"""Extract wikilinks from pages.
The output format is csv.
"""
import csv
import datetime
import functools
import collections
import mwxml
import jsonable
import more_itertools
import fuzzywuzzy.process
from typing import Iterable, Iterator, Mapping, NamedTuple, Optional
from .. import dumper, extractors, utils
stats_template = '''
<stats>
<performance>
<start_time>${stats['performance']['start_time'] | x}</start_time>
<end_time>${stats['performance']['end_time'] | x}</end_time>
<revisions_analyzed>${stats['performance']['revisions_analyzed'] | x}</revisions_analyzed>
<pages_analyzed>${stats['performance']['pages_analyzed'] | x}</pages_analyzed>
</performance>
</stats>
'''
Revision = NamedTuple('Revision', [
('id', int),
('parent_id', int),
('user', Optional[mwxml.Revision.User]),
('minor', bool),
('comment', str),
('model', str),
('format', str),
('timestamp', jsonable.Type),
('text', str),
('wikilinks', Iterable[extractors.misc.Wikilink])
])
Page = NamedTuple('Page', [
('id', str),
('namespace', int),
('title', str),
('revisions', Iterable[Revision]),
])
def extract_revisions(
mw_page: mwxml.Page,
stats: Mapping,
only_last_revision: bool,
debug: bool) -> Iterator[Revision]:
"""Extract the internall links (wikilinks) from the revisions."""
revisions = more_itertools.peekable(mw_page)
for mw_revision in revisions:
utils.dot()
is_last_revision = not utils.has_next(revisions)
if only_last_revision and not is_last_revision:
continue
text = utils.remove_comments(mw_revision.text or '')
wikilinks = (wikilink
for wikilink, _
in extractors.wikilinks(
page_title=mw_page.title,
source=text,
sections=extractors.sections(text),
debug=debug,
)
)
yield Revision(
id=mw_revision.id,
parent_id=mw_revision.parent_id,
user=mw_revision.user,
minor=mw_revision.minor,
comment=mw_revision.comment,
model=mw_revision.model,
format=mw_revision.format,
timestamp=mw_revision.timestamp.to_json(),
text=text,
wikilinks=wikilinks
)
stats['performance']['revisions_analyzed'] += 1
def extract_pages(
dump: Iterable[mwxml.Page],
stats: Mapping,
only_last_revision: bool,
debug: bool) -> Iterator[Page]:
"""Extract revisions from a page."""
for mw_page in dump:
utils.log("Processing", mw_page.title)
# Skip non-articles
if mw_page.namespace != 0:
utils.log('Skipped (namespace != 0)')
continue
revisions_generator = extract_revisions(
mw_page,
stats=stats,
only_last_revision=only_last_revision,
debug=debug,
)
yield Page(
id=mw_page.id,
namespace=mw_page.namespace,
title=mw_page.title,
revisions=revisions_generator,
)
stats['performance']['pages_analyzed'] += 1
def configure_subparsers(subparsers):
"""Configure a new subparser."""
parser = subparsers.add_parser(
'extract-wikilinks',
help='Extract internal links (wikilinks)',
)
parser.add_argument(
'-d', '--debug',
action='store_true',
help='Activate debug mode.',
)
parser.add_argument(
'--only-last-revision',
action='store_true',
help='Consider only the last revision for each page.',
)
parser.set_defaults(func=main)
def main(
dump: Iterable[mwxml.Page],
features_output_h,
stats_output_h,
args) -> None:
"""Main function that parses the arguments and writes the output."""
stats = {
'performance': {
'start_time': None,
'end_time': None,
'revisions_analyzed': 0,
'pages_analyzed': 0,
},
'section_names': {
'global': collections.Counter(),
'last_revision': collections.Counter(),
},
}
stats['performance']['start_time'] = datetime.datetime.utcnow()
writer = csv.writer(features_output_h)
pages_generator = extract_pages(
dump,
stats=stats,
only_last_revision=args.only_last_revision,
debug=args.debug,
)
writer.writerow((
'page_id',
'page_title',
'revision_id',
'revision_parent_id',
'revision_timestamp',
'user_type',
'user_username',
'user_id',
'revision_minor',
'wikilink.link',
'wikilink.tosection',
'wikilink.anchor',
'wikilink.section_name',
'wikilink.section_level',
'wikilink.section_number'
))
for mw_page in pages_generator:
for revision in mw_page.revisions:
if revision.user is None:
user_type = 'None'
user_username = 'None'
user_id = -2
else:
if revision.user.id is not None:
user_type = 'registered'
user_username = revision.user.text
user_id = revision.user.id
else:
user_type = 'ip'
user_username = revision.user.text
user_id = -1
revision_parent_id = revision.parent_id
if revision.parent_id is None:
revision_parent_id = -1
if revision.minor:
revision_minor = 1
else:
revision_minor = 0
for wikilink in revision.wikilinks:
# project,page.id,page.title,revision.id,revision.parent_id,
# revision.timestamp,contributor_if_exists(revision.user),
# revision.minor,wikilink.link,wikilink.anchor,
# wikilink.section_name,wikilink.section_level,
# wikilink.section_number
writer.writerow((
mw_page.id,
mw_page.title,
revision.id,
revision.parent_id,
revision.timestamp,
user_type,
user_username,
user_id,
revision_minor,
wikilink.link,
wikilink.tosection,
wikilink.anchor,
wikilink.section_name,
wikilink.section_level,
wikilink.section_number
))
stats['performance']['end_time'] = datetime.datetime.utcnow()
with stats_output_h:
dumper.render_template(
stats_template,
stats_output_h,
stats=stats,
)
| |
from __future__ import absolute_import
__all__ = ("Csp", "Hpkp", "ExpectCT", "ExpectStaple")
from six.moves.urllib.parse import urlsplit, urlunsplit
from sentry.interfaces.base import Interface
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.web.helpers import render_to_string
# Default block list sourced from personal experience as well as
# reputable blogs from Twitter and Dropbox
DEFAULT_DISALLOWED_SOURCES = (
"about", # Noise from Chrome about page.
"ms-browser-extension",
"chrome://*",
"chrome-extension://*",
"chromeinvokeimmediate://*",
"chromenull://*",
"data:text/html,chromewebdata",
"safari-extension://*",
"mxaddon-pkg://*",
"jar://*",
"webviewprogressproxy://*",
"ms-browser-extension://*",
"tmtbff://*",
"mbinit://*",
"symres://*",
"resource://*",
"moz-extension://*",
"*.metrext.com",
"static.image2play.com",
"*.tlscdn.com",
"73a5b0806e464be8bd4e694c744624f0.com",
"020dfefc4ac745dab7594f2f771c1ded.com",
"*.superfish.com",
"addons.mozilla.org",
"v.zilionfast.in",
"widgets.amung.us",
"*.superfish.com",
"xls.searchfun.in",
"istatic.datafastguru.info",
"v.zilionfast.in",
"localhost",
"resultshub-a.akamaihd.net",
"pulseadnetwork.com",
"gateway.zscalertwo.net",
"www.passpack.com",
"middlerush-a.akamaihd.net",
"www.websmartcenter.com",
"a.linkluster.com",
"saveyoutime.ru",
"cdncache-a.akamaihd.net",
"x.rafomedia.com",
"savingsslider-a.akamaihd.net",
"injections.adguard.com",
"icontent.us",
"amiok.org",
"connectionstrenth.com",
"siteheart.net",
"netanalitics.space",
"printapplink.com",
"godlinkapp.com",
"devappstor.com",
"hoholikik.club",
"smartlink.cool",
"promfflinkdev.com",
) # yapf: disable
class SecurityReport(Interface):
"""
A browser security violation report.
"""
title = None
class Hpkp(SecurityReport):
"""
A HTTP Public Key Pinning pin validation failure report.
See also: https://tools.ietf.org/html/rfc7469#section-3
>>> {
>>> "date-time": "2014-04-06T13:00:50Z",
>>> "hostname": "www.example.com",
>>> "port": 443,
>>> "effective-expiration-date": "2014-05-01T12:40:50Z",
>>> "include-subdomains": False,
>>> "served-certificate-chain": [],
>>> "validated-certificate-chain": [],
>>> "known-pins": [],
>>> }
"""
score = 1300
display_score = 1300
title = "HPKP Report"
class ExpectStaple(SecurityReport):
"""
An OCSP Stapling violation report
See: https://docs.google.com/document/d/1aISglJIIwglcOAhqNfK-2vtQl-_dWAapc-VLDh-9-BE
>>> {
>>> "date-time": date-time,
>>> "hostname": hostname,
>>> "port": port,
>>> "effective-expiration-date": date-time,
>>> "response-status": ResponseStatus,
>>> "ocsp-response": ocsp,
>>> "cert-status": CertStatus,
>>> "served-certificate-chain": [pem1, ... pemN],(MUST be in the order served)
>>> "validated-certificate-chain": [pem1, ... pemN](MUST be in the order served)
>>> }
"""
score = 1300
display_score = 1300
title = "Expect-Staple Report"
class ExpectCT(SecurityReport):
"""
A Certificate Transparency violation report.
See also: http://httpwg.org/http-extensions/expect-ct.html
>>> {
>>> "date-time": "2014-04-06T13:00:50Z",
>>> "hostname": "www.example.com",
>>> "port": 443,
>>> "effective-expiration-date": "2014-05-01T12:40:50Z",
>>> "served-certificate-chain": [],
>>> "validated-certificate-chain": [],
>>> "scts-pins": [],
>>> }
"""
score = 1300
display_score = 1300
title = "Expect-CT Report"
class Csp(SecurityReport):
"""
A CSP violation report.
See also: http://www.w3.org/TR/CSP/#violation-reports
>>> {
>>> "document_uri": "http://example.com/",
>>> "violated_directive": "style-src cdn.example.com",
>>> "blocked_uri": "http://example.com/style.css",
>>> "effective_directive": "style-src",
>>> }
"""
LOCAL = "'self'"
score = 1300
display_score = 1300
title = "CSP Report"
@classmethod
def to_python(cls, data):
data.setdefault("document_uri", None)
data.setdefault("violated_directive", None)
data.setdefault("blocked_uri", None)
data.setdefault("effective_directive", None)
return cls(**data)
def to_string(self, is_public=False, **kwargs):
return json.dumps({"csp-report": self.get_api_context()}, indent=2)
def to_email_html(self, event, **kwargs):
return render_to_string(
"sentry/partial/interfaces/csp_email.html", {"data": self.get_api_context()}
)
@memoize
def normalized_blocked_uri(self):
return self._normalize_uri(self.blocked_uri)
@memoize
def local_script_violation_type(self):
"""
If this is a locally-sourced script-src error, gives the type.
"""
if (
self.violated_directive
and self.effective_directive == "script-src"
and self.normalized_blocked_uri == self.LOCAL
):
if "'unsafe-inline'" in self.violated_directive:
return "unsafe-inline"
elif "'unsafe-eval'" in self.violated_directive:
return "unsafe-eval"
return None
def _normalize_uri(self, value):
if value in ("", self.LOCAL, self.LOCAL.strip("'")):
return self.LOCAL
# A lot of these values get reported as literally
# just the scheme. So a value like 'data' or 'blob', which
# are valid schemes, just not a uri. So we want to
# normalize it into a uri.
if ":" not in value:
scheme, hostname = value, ""
else:
scheme, hostname = urlsplit(value)[:2]
if scheme in ("http", "https"):
return hostname
return self._unsplit(scheme, hostname)
def _unsplit(self, scheme, hostname):
return urlunsplit((scheme, hostname, "", None, None))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TopK op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class TopKTest(test.TestCase):
def _validateTopK(self,
inputs,
k,
expected_values,
expected_indices,
sorted=True): # pylint: disable=redefined-builtin
np_expected_values = np.array(expected_values)
np_expected_indices = np.array(expected_indices)
with self.cached_session(use_gpu=True) as sess:
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values, indices = self.evaluate([values_op, indices_op])
self.assertShapeEqual(np_expected_values, values_op)
self.assertShapeEqual(np_expected_indices, indices_op)
if sorted:
self.assertAllClose(np_expected_values, values)
# Do some special casing of equality of indices: if indices
# are not the same, but values are floating type, ensure that
# the values are within epsilon of each other.
if not np.issubdtype(np_expected_values.dtype, np.floating):
# Values are not floating point type; check indices exactly
self.assertAllEqual(np_expected_indices, indices)
else:
# Values are floating point; indices may be swapped for
# values near each other.
indices_not_equal = np_expected_indices != indices
if np.any(indices_not_equal):
values_unsure = values[indices_not_equal]
expected_values_unsure = expected_values[indices_not_equal]
self.assertAllClose(expected_values_unsure, values_unsure)
else:
np_inputs = np.array(inputs)
# Check that the indices are valid.
for result_index, src_index in np.ndenumerate(indices):
value = values[result_index]
expected_value = np_inputs[result_index[0], src_index]
np.testing.assert_almost_equal(value, expected_value)
# Check that if two elements are equal, the lower-index element appears
# first.
shape = values.shape
for batch_index in range(shape[0]):
for index in range(shape[1] - 1):
if np.isclose(values[batch_index, index],
values[batch_index, index + 1]):
self.assertLess(indices[batch_index, index],
indices[batch_index, index + 1])
# Now check the results, ignoring order.
self.assertAllEqual(np.sort(np_expected_indices), np.sort(indices))
self.assertAllClose(np.sort(np_expected_values), np.sort(values))
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(inputs, 2, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]])
def testTop3(self):
k = 5
inputs = np.random.permutation(np.linspace(0, 100, 6140, dtype=np.float64))
indices = np.argsort(-inputs)[:k]
values = -np.sort(-inputs)[:k]
self._validateTopK(inputs, k, values, indices)
def _testLargeSort(self, dtype):
b = 10
n = 5000
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)
values = -np.sort(-inputs, axis=1)
self._validateTopK(inputs, n, values, indices)
def testLargeSort(self):
self._testLargeSort(np.float32)
self._testLargeSort(np.float16)
def _testLargeTopK(self, dtype):
b = 10
n = 5000
k = n - 1
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testLargeTopK(self):
self._testLargeTopK(np.float32)
self._testLargeTopK(np.float16)
def _testMediumTopK(self, dtype):
b = 5
n = 500
k = 50
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testMediumTopK(self):
self._testMediumTopK(np.float32)
self._testMediumTopK(np.float16)
def testStableSort(self):
b = 5
n = 500
for k in [1, 5, 50, 500]:
# Lots of repeated integers taking values in [0, 3]
inputs = np.random.permutation(
np.linspace(0, 3, b * n, dtype=np.int32)).reshape(b, n)
# Use mergesort, a stable sort, to get the indices.
indices = np.argsort(-inputs, axis=1, kind="mergesort")[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.4, 0.3, 0.2]]
self._validateTopK(
inputs,
3, [[0.2, 0.3, 0.4], [0.2, 0.4, 0.3]], [[2, 1, 3], [3, 1, 2]],
sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
self._validateTopK(inputs, 3, [19, 18, 17], [11, 3, 7])
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
@test_util.run_deprecated_v1
def testKNegative(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=True):
k = array_ops.placeholder(dtypes.int32)
values, _ = nn_ops.top_k(inputs, k)
with self.assertRaisesOpError("Need k >= 0, got -7"):
values.eval(feed_dict={k: -7})
@test_util.run_deprecated_v1
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
r"must have last dimension >= k = 4"):
nn_ops.top_k(inputs, 4)
@test_util.run_deprecated_v1
def testTopKGradients(self):
with self.session(use_gpu=True) as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[[1., 2., 3.], [4., 5., 6.]]]),
feed_dict={inputs: [[2., -1., 1000., 3., 4.],
[1., 5., 2., 4., 3.]]})[0]
self.assertEqual(
grad.tolist(), [[0., 0., 1., 3., 2.], [0., 4., 0., 5., 6.]])
class TopKBenchmark(test.Benchmark):
def benchmarkTopK(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False, True]):
k = int(p * n)
if k == 0:
continue
name = "m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n))
v = resource_variable_ops.ResourceVariable(x)
op = nn_ops.top_k(v, k)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Dankcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a dankcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# dankcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000 # 1 PEPE in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to dankcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in dankcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness()]*len(self.vin)
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in dankcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Dankcoin Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| |
#
# =================================================================
# =================================================================
"""
This is a Task Module that binds the host_seas Network REST API with
the DOM Model by creating AOMs that are used as view objects for
the user.
"""
from nova import db
from nova import exception
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.db.sqlalchemy import api as db_session
from oslo.config import cfg
from paxes_nova.db import api as dom_api
from paxes_nova.db.network import models as dom_model
from paxes_nova.network.common import exception as network_exception
from paxes_nova.network.common.api_suppress_mixin import NetworkAPISuppressor
from paxes_nova.virt.ibmpowervm.vif.common import ras
from nova.network import neutronv2
from nova import network
from paxes_nova import _
LOG = logging.getLogger(__name__)
DO_NOT_USE_STR = "do-not-use"
CONF = cfg.CONF
class HostSeasQuery(NetworkAPISuppressor):
"""
This class handles processing for the host-seas rest api call in PowerVC.
"""
def __init__(self):
super(HostSeasQuery, self).__init__()
self._nova_net_api = None
@lockutils.synchronized('host_seas', 'host-seas-')
def get_host_seas(self, context, host_name=None, vswitch=None, vlan=None,
net_id=None, session=None):
"""
This method will return a dictionary of data that represents the
Shared Ethernet Adapter information for a given host or a set of hosts.
If vlan or net_id are passed in, then only SEAs that are valid for the
given vlan or net_id will be returned.
:param context: The context for the request.
:param host_name: The identifying name of the host to request the data
If None is passed in, a dictionary representing all
of the managed hosts will be found.
:param vswitch: The vswitch that should be used to help identify the
default adapter. If set to None (the default value),
only the VLAN ID will be used. ie, all vswitches will
be candidates.
:param vlan: The vlan that should be used to help identify the default
and candidate adapters. If set to None (the default
value), a VLAN ID of 1 will be used. This parameter will
be ignored if net_id is passed in.
:param net_id: The network UUID for an existing neutron network. If
this is passed in, then vlan will be ignored and the
vlan to use will be obtained from the neutron network.
:param session: session to be used for db access
:return: A dictionary of host level Shared Ethernet Adapter data. Ex:
{
"host-seas": [
{
"host_name": "host1",
"adapters": [
{
"default": false,
"sea_name": "ent11",
"vswitch": "ETHERNET0",
"lpar_id": 1,
"ha_lpar_id": 2,
"ha_mode": "enabled",
"pvid": 1,
"state": "Available",
"ha_state": "Available",
"lpar_name": "10-23C2P",
"ha_lpar_name": "10-24C2P",
"ha_sea": "ent21"
},
{
"default": false,
"sea_name": "ent12",
"vswitch": "ETHERNET0",
"lpar_id": 1,
"ha_lpar_id": 2,
"ha_mode": "enabled",
"pvid": 2,
"state": "Available",
"ha_state": "Available",
"lpar_name": "10-23C2P",
"ha_lpar_name": "10-24C2P",
"ha_sea": "ent22"
}
]
},
{
"host_name": "host2",
"adapters": [
{
"default": true,
"sea_name": "ent5",
"vswitch": "ETHERNET0",
"lpar_id": 1,
"ha_lpar_id": null,
"ha_mode": "disabled",
"pvid": 1,
"state": "Available",
"ha_state": null,
"lpar_name": "15-34B9Z",
"ha_lpar_name": null,
"ha_sea": null
}
]
}
]
}
"""
ras.function_tracepoint(LOG, __name__, ras.TRACE_INFO,
ras.vif_get_msg('info', 'GET_HOST_SEAS') %
{'host': host_name, 'vlan': vlan,
'vswitch': vswitch, 'net_id': net_id})
# This class should only be used in PowerVM environments
self.raise_if_not_powervm()
if session is None:
session = db_session.get_session()
hosts = self._find_all_host_names(context)
if host_name:
if host_name in hosts:
# Should make it empty before we add for below for loop
# We want to be specific that now it has only one host
hosts = [host_name]
else:
msg = (ras.vif_get_msg
('info', 'HOST_NOT_FOUND') %
{'hostid': host_name})
ras.function_tracepoint(
LOG, __name__, ras.TRACE_INFO, msg)
raise exception.ComputeHostNotFound(host=host_name)
# Validate that network exists
if net_id or net_id == '':
try:
network.API().get(context, net_id)
except:
raise exception.InvalidID(id=net_id)
host_dict_list = []
with session.begin():
ports = None
if net_id:
# Performance optimization -- read network ports before loop
# because this operation is expensive
search_opts = {'network_id': net_id}
network_data = network.API().list_ports(context, **search_opts)
ports = network_data.get('ports', [])
for host in hosts:
resp = self._get_specific_host_seas(context, host, vswitch,
vlan, net_id, session,
ports)
host_dict_list.append(resp)
return {
'host-seas': host_dict_list
}
def _determine_vlan(self, vlan, net_id, host, context):
"""
Determines the VLAN that should be used.
:param vlan: The passed in VLAN from the user. May be None.
:param net_id: The neutron network identifier. May be None.
:param host: The host being queried. String value
:param context: The context for neturon queries
:return: The VLAN identifier to use. If both parameters are none, then
None will be returned, indicating 'all seas' should be
returned
"""
# This is the scenario where the user wants all SEAs.
if vlan is None and net_id is None:
return None
# If a network id is passed in, we will override any VLAN info
# with the info from the network ID.
if net_id:
msg = (ras.vif_get_msg('info', 'NET_ID_SUPPLIED') %
{'vlan_id': vlan, 'host_name': host, 'net_id': net_id})
ras.function_tracepoint(LOG, __name__, ras.TRACE_INFO, msg)
try:
net_api = self._build_network_api()
neutron_net = net_api.get(context, net_id)
except neutronv2.exceptions.NeutronClientException as e:
ras.function_tracepoint(LOG, __name__,
ras.TRACE_ERROR, e.message)
# We need to stop execution here. Since we didn't get
# the client
raise
if neutron_net and neutron_net.\
get('provider:segmentation_id'):
vlan = neutron_net.get('provider:segmentation_id', 1)
elif neutron_net and neutron_net.get('vlan'):
vlan = neutron_net.get('vlan', 1)
else:
msg = _("Couldn't retrieve VLAN associated with Net_id"
"setting default to 1")
ras.function_tracepoint(LOG, __name__,
ras.TRACE_WARNING, msg)
vlan = 1
# Return the passed in value, but make sure its an int
return int(vlan)
def _get_specific_host_seas(self, context, host, vswitch=None, vlan=None,
net_id=None, session=None, ports=None):
"""
This method will return the SEA candidates for a given host, and only
that host.
The format of the response will be:
{
"host_name": "host2",
"adapters": [
{
"default": true,
"sea_name": "ent5",
"vswitch": "ETHERNET0",
"lpar_id": 1,
"ha_lpar_id": null,
"ha_mode": "disabled",
"pvid": 1,
"state": "Available",
"ha_state": null,
"lpar_name": "15-34B9Z",
"ha_lpar_name": null,
"ha_sea": null
}
]
}
:param context: The context for the request.
:param host: The host name (as a string)
:param vswitch: The vswitch that should be used to help identify the
default adapter. If set to None all of the vSwitches
will be utilized.
:param vlan: The vlan that should be used to help identify the default
adapter. If set to None (the default value), a VLAN ID of
1 will be used.
:param net_id: The network UUID of a neutron Network. This is optional
:param ports: An optional list of ports for the specified network.
"""
# Build basic data to determine targets
vio_servers = dom_api.vio_server_find_all(context, host, session)
host_dom = dom_model.Host(host, vio_servers)
vswitches = self._get_vswitches(host_dom.find_all_primary_seas())
# If the network id was set, then we should always use that networks
# vlan id instead of the passed in value.
vlan = self._determine_vlan(vlan, net_id, host, context)
# We need to determine if this network has any VMs on the host. If so,
# then we can't be set to Do not Use. We also can't allow them to
# change vSwitches.
allow_do_not_use_option = False
if net_id:
# We only allow the do not use option if the VM count for this
# network is 0. Otherwise a VM is using it, and we can't flip
# to do not use until all the VMs are done.
vm_list = dom_api.instances_find(context, host, net_id, ports)
allow_do_not_use_option = (len(vm_list) == 0)
# As noted above...if the network association has VMs, then we
# can't let the user change the vSwitch that the networks are on.
# Therefore, set the specific_vswitch so that the candidate_sea
# loop won't take any other vSwitches into account.
if len(vm_list) > 0:
net_assn = dom_api.network_association_find(context,
host_dom.host_name,
net_id, session)
# If there is a network association, just override the vSwitch
# list with the network associations vSwitch. This limits it
# to a single vSwitch search scope.
if net_assn and net_assn.sea:
vswitches = [net_assn.sea.primary_vea.vswitch_name]
else:
# If there was not a network id, then we assume that this is a
# new network and therefore do not use should always be returned.
allow_do_not_use_option = True
# Variable to store all candidate SEAs
candidate_seas = []
# Walk through the vswitches on this host and determine the valid
# candidates (broken into pools defined by the vswitches).
for vswitch_name in vswitches:
# If the user passed in a vswitch, and we don't match, continue
if vswitch is not None and vswitch != vswitch_name:
continue
# Extend the candidates
candidate_seas.extend(self._get_candidate_seas_for_vswitch(
host_dom, vswitch_name, vlan))
# Now we need to find the default adapter...may be None, which
# indicates that it is a do not use.
default_sea = self._find_default_adapter(host_dom, candidate_seas,
net_id, vlan, context,
session)
# If the default sea is not selected, and there's only one vswitch
# we need to determine a default adapter for this VLAN and create
# that relationship.
if(default_sea is None and not allow_do_not_use_option and net_id):
vswitch_with_vlan = []
for vswitch in vswitches:
sea = host_dom.find_sea_for_vlan_vswitch(vlan, vswitch)
if sea:
vswitch_with_vlan.append(sea)
# We would like to set this as the default since in this
# present call to host-seas - we need to report a default
if len(vswitch_with_vlan) == 1:
default_sea = vswitch_with_vlan[0]
dom_api.network_association_put_sea(context, host, net_id,
vswitch_with_vlan[0],
session)
# Now, build the adapter list to return
adapter_list = []
if allow_do_not_use_option or len(vswitches) > 1:
adapter_list.append(self._format_sea_to_dict_response
(host_dom,
None,
default_sea is None))
if default_sea and default_sea not in candidate_seas:
candidate_seas.append(default_sea)
for sea in candidate_seas:
adapter_list.append(self._format_sea_to_dict_response
(host_dom, sea,
default_sea == sea))
msg = (ras.vif_get_msg('info', 'HOST_SEAS_RETURN') %
{'vlan': vlan,
'host_name': host,
'list': adapter_list})
ras.function_tracepoint(
LOG, __name__, ras.TRACE_INFO, msg)
return {
'host_name': host_dom.host_name,
'adapters': adapter_list
}
def _get_candidate_seas_for_vswitch(self, host_dom, vswitch, vlan):
"""
Returns the dictionary responses for a given vSwitch on the system.
Will return a list of SEA DOM objects that are valid for the given
vlan.
:param host_dom: The Host DOM object for the element.
:param vswitch: The name of the vSwitch
:param vlan: Optional VLAN for the request. May be set to None
:return: Set of SEAs that are valid candidates.
"""
# If the VLAN is specified, make sure that the VLAN is not on an orphan
# VEA or the pvid of a control channel VEA.
if vlan and vlan in host_dom.get_unusable_vlanids(vswitch):
return []
# Find all of the SEAs for this vSwitch
all_seas = host_dom.find_all_primary_seas()
specific_seas = []
for sea in all_seas:
# if the vswitches don't match, continue on
if sea.primary_vea.vswitch_name != vswitch:
continue
# Need to check to see if the vlan passed in matches the pvid
# or additional VLANs on the primary VEA. If so, our only
# candidate for this vSwitch is this SEA.
if vlan and\
(sea.primary_vea.pvid == vlan or
vlan in sea.primary_vea.addl_vlan_ids):
return [sea]
# If they're asking for vlan 1, it can only be used if it's the
# pvid of an SEA (K2 does not allow VLAN 1 to be tagged).
# Therefore, if we get here with vlan 1, we know it wasn't the pvid
# because that would have been caught above and we should skip
# this SEA. If no SEA's on this host are found with vlan 1, then
# the candidates list returned will be empty, thus causing this
# host to properly be marked do-not-use.
if vlan != 1:
specific_seas.append(sea)
LOG.debug('_get_candidate_seas_for_vswitch is returning %s' %
specific_seas)
return specific_seas
def _get_vswitches(self, primary_seas_all):
"""
Lists all of the unique vswitches from the SEAsx
param: primary_seas_all: all the seas of a given vios
return: a List of all the unique vSwitch anmes
"""
vswitch_resp = []
for sea in primary_seas_all:
vswitch = sea.primary_vea.vswitch_name
if vswitch not in vswitch_resp:
vswitch_resp.append(vswitch)
LOG.debug('_get_vswitches is returning %s' % vswitch_resp)
return vswitch_resp
def _find_default_adapter(self, host_dom, candidate_seas, net_id, vlan,
context, session):
"""
Will return the adapter from 'candiate_seas' that is the appropriate
default adapter.
:param host_dom: The DOM for the Host
:param candidate_seas: All of the candidate SEA DOM objects.
:param net_id: The neutron network id (optional).
:param vlan: The VLAN for the query. Optional.
:param context: The context for all queries
:param session: The database session
:return: The sea from the candidate_seas list that is the default. If
None is returned, then 'do-not-use' is the default.
"""
LOG.debug('_find_default_adapter starting..')
# First check to see is if a network association exists already,
# and if so, return that.
if net_id:
net_assn = dom_api.network_association_find(context,
host_dom.host_name,
net_id, session)
if net_assn:
# If the network association has 'none' set for the SEA, then
# return None
if net_assn.sea is None:
LOG.debug('returning None')
return None
else:
# Find the corresponding SEA from our query and return that
for sea in candidate_seas:
if sea.name == net_assn.sea.name and\
sea.vio_server.lpar_id == \
net_assn.sea.vio_server.lpar_id:
LOG.debug('returning %s' % sea)
return sea
# Next up, check to see if there is a peer!
# A peer network is one that shares the same VLAN id as this one.
# NOTE: This isn't a true peer as we're disregarding vswitch here. We
# just know by this point that no SEA was associated with the passed
# in net_id, so we are free to pick any vswitch that fits the bill.
peer_sea = self._find_peer_network_sea(context, host_dom.host_name,
vlan, session)
if peer_sea:
# Find the corresponding SEA from our candidate list.
for sea in candidate_seas:
if sea.name == peer_sea.name and\
sea.vio_server.lpar_id == peer_sea.vio_server.lpar_id:
LOG.debug('returning %s' % sea)
return sea
# Next up is to check the SEAs to see if any contains the VLAN passed
# in.
if vlan:
for sea in candidate_seas:
if sea.pvid == vlan or vlan in sea.additional_vlans():
LOG.debug('returning %s' % sea)
return sea
# Lastly, we just have to return the lowest adapter...
return self._find_default_with_no_vlan(candidate_seas)
def _find_default_with_no_vlan(self, primary_seas):
"""
This method finds the default adapter when there's no vlanid specified.
The default SEA is the one that has the lowest pvid in all the primary
seas.
:param: primary_seas: This is a list of all primary_seas for a given
host
:return: A default adapter. Note there can be only one in this case?
"""
lowest_sea = None
# A None should never be returned however
low = 4096
for i in range(0, len(primary_seas)):
# if the sea is not available - we need to find the next available
if primary_seas[i].pvid < low and primary_seas[i].is_available():
low = primary_seas[i].pvid
lowest_sea = primary_seas[i]
msg = (ras.vif_get_msg
('info', 'LOWEST_PVID') % {'lowest_sea':
lowest_sea.name})
ras.function_tracepoint(LOG, __name__, ras.TRACE_INFO,
msg)
# Let's say that none of the seas are available, in this case we pick
# anyone and return
if lowest_sea is None and len(primary_seas) >= 1:
lowest_sea = primary_seas[0]
LOG.info(_('None of the seas are in available state, picking %s'
'as default' % lowest_sea.name))
return lowest_sea
def _format_sea_to_dict_response(self, host, sea, default=False):
"""
Will format a shared ethernet adapter to the corresponding REST API
response dictionary.
:param sea: The SharedEthernetAdapter to format
:param host: The owning Host object for the SEA.
:param default: If set to true, will set the default
attribute of the SEA to True. Defaults to False.
"""
# Set a default HA attributes of none.
ha_lpar_name = None
ha_sea = None
ha_mode = 'disabled'
ha_state = None
ha_lpar_id = None
sea_name = DO_NOT_USE_STR
vswitch = DO_NOT_USE_STR
pri_lparid = 0
pri_lpar_name = None
pvid = None
state = "Not-Applicable"
if sea:
primary_sea_vios = sea.vio_server
pri_lparid = primary_sea_vios.lpar_id
pri_lpar_name = primary_sea_vios.lpar_name
vswitch = sea.get_primary_vea().vswitch_name
sea_name = sea.name
pvid = sea.pvid
if sea.is_available():
state = "available"
else:
state = "unavailable"
sea_chain = host.find_sea_chain_for_sea(sea)
if len(sea_chain) > 1:
# Two or more in the SEA chain indicates HA mode
ha_sea_obj = sea_chain[1]
ha_vios = ha_sea_obj.vio_server
ha_lpar_id = ha_vios.lpar_id
ha_lpar_name = ha_vios.lpar_name
ha_sea = ha_sea_obj.name
ha_mode = 'enabled'
if ha_sea_obj.is_available():
ha_state = "available"
else:
ha_state = "unavailable"
# Return the formated data back to the user.
return {
'default': default,
'sea_name': sea_name,
'vswitch': vswitch,
'lpar_id': pri_lparid,
'lpar_name': pri_lpar_name,
'pvid': pvid,
'state': state,
'ha_lpar_id': ha_lpar_id,
'ha_lpar_name': ha_lpar_name,
'ha_sea': ha_sea,
'ha_mode': ha_mode,
'ha_state': ha_state
}
def _find_all_host_names(self, context):
"""
This returns a list of compute nodes after querying the Nova DB
:param context: A context object that is used to authorize the DB
access.
:returns: A set of compute nodes.
"""
compute_nodes = db.compute_node_get_all(context)
return_nodes = []
for compute in compute_nodes:
if not compute['service']:
ras.trace(LOG, __name__, ras.TRACE_WARNING,
_('No service for compute ID %s' % compute['id']))
continue
host_to_send = compute['service']['host']
return_nodes.append(host_to_send)
return return_nodes
def _find_peer_network_sea(self, context, host_name, vlanid, session):
"""
This method finds whether a given vlanid exists for a host network or
not. If it finds that this VLAN is part of an existing network, it
returns the corresponding SEA.
:param context: The context used to call the dom API
:param host_id: The host_id of the host for which the network
associations are fetched.
:param vlanid: The vlan id for which the match is needed.
:param session: The database session object.
:returns: either an SEA or None
"""
# The VLAN ID may be none here, indicating a pull for all SEAs.
if vlanid is None:
return None
network_associations = dom_api.\
network_association_find_all(context, host_name, session)
# net_map should always be of size 1
for network_association in network_associations:
if network_association:
try:
net_api = self._build_network_api()
neutron_net = net_api.get(context,
network_association.
neutron_net_id)
except Exception as e:
# Will throw an exception if the neutron client could
# not be found
ras.trace(LOG, __name__, ras.TRACE_WARNING,
_('Neutron client not found for net_id %s' %
network_association.neutron_net_id))
if 'neutron_net' in locals() and neutron_net:
if neutron_net.get("provider:segmentation_id") == vlanid:
# There should be only one SEA per VLAN so we return
# as soon as we find one.
return network_association.sea
if neutron_net.get("vlan") == vlanid:
return network_association.sea
return None
def _build_network_api(self):
"""
Builds a nova network API.
:returns: Nova Network API
"""
if not self._nova_net_api:
self._nova_net_api = network.API()
return self._nova_net_api
| |
from collections import OrderedDict
from itertools import zip_longest
from typing import Iterable, Sequence, Tuple, Union
from lxml import etree
from requests import Response
from requests_toolbelt.multipart.decoder import BodyPart
from rets.errors import RetsParseError, RetsApiError, RetsResponseError
from rets.http.data import Metadata, SearchResult, SystemMetadata
DEFAULT_ENCODING = 'utf-8'
ResponseLike = Union[Response, BodyPart]
def parse_xml(response: ResponseLike) -> etree.Element:
encoding = response.encoding or DEFAULT_ENCODING
try:
root = etree.fromstring(response.content.decode(encoding), parser=etree.XMLParser(recover=True))
except ValueError as e:
if str(e) == "Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.":
# parse bytes directly, rather than from string
root = etree.XML(response.content)
else:
raise e
if root is None:
raise RetsResponseError(response.content, response.headers)
reply_code, reply_text = _parse_rets_status(root)
if reply_code and reply_text != "Operation Successful":
raise RetsApiError(reply_code, reply_text, response.content)
return root
def parse_capability_urls(response: Response) -> dict:
"""
Parses the list of capability URLs from the response of a successful Login transaction.
The capability url list is the set of functions or URLs to which the Login grants access.
A capability consists of a key and a URL. The list returned from the server in the login
reply must include URLs for Search, Login, and GetMetadata, and optionally may include
URLs for Action, ChangePassword, GetObject, LoginComplete, Logout, ServerInformation,
and Update.
<RETS ReplyCode="0" ReplyText="Success">
<RETS-RESPONSE>
MemberName=member_name
User=user_id,user_level,user_class,agent_code
Broker=RETSOFFIC
MetadataVersion=01.09.02991
MetadataTimestamp=2016-11-24T05:24:06Z
MinMetadataTimestamp=2016-11-24T05:24:06Z
Login=/rets2_1/Login
Search=/rets2_1/Search
GetMetadata=/rets2_1/GetMetadata
GetObject=/rets2_1/GetObject
Logout=/rets2_1/Logout
</RETS-RESPONSE>
</RETS>
"""
elem = parse_xml(response)
response_elem = elem.find('RETS-RESPONSE')
if response_elem is None:
return {}
raw_arguments = response_elem.text.strip().split('\n')
return dict((s.strip() for s in arg.split('=', 1)) for arg in raw_arguments)
def parse_metadata(response: Response) -> Sequence[Metadata]:
"""
Parse the information from a GetMetadata transaction.
<RETS ReplyCode="0" ReplyText="Success">
<METADATA-RESOURCE Date="2016-11-24T05:24:06Z" Version="01.09.02991">
<COLUMNS> ResourceID StandardName </COLUMNS>
<DATA> ActiveAgent ActiveAgent </DATA>
<DATA> Office Office </DATA>
<DATA> OpenHouse OpenHouse </DATA>
<DATA> Property Property </DATA>
<DATA> RentalSchedule RentalSchedule </DATA>
</METADATA-RESOURCE>
</RETS>
"""
elem = parse_xml(response)
metadata_elems = [e for e in elem.findall('*') if e.tag.startswith('METADATA-')]
if metadata_elems is None:
return ()
def parse_metadata_elem(elem: etree.Element) -> Metadata:
""" Parses a single <METADATA-X> element """
return Metadata(
type_=elem.tag.split('-', 1)[1],
resource=elem.get('Resource'),
class_=elem.get('Class'),
data=tuple(_parse_data(elem)),
)
return tuple(parse_metadata_elem(metadata_elem) for metadata_elem in metadata_elems)
def parse_system(response: Response) -> SystemMetadata:
"""
Parse the server system information from a SYSTEM GetMetadata transaction.
<RETS ReplyCode="0" ReplyText="Success">
<METADATA-SYSTEM Date="2016-11-24T05:24:06Z" Version="01.09.02991">
<SYSTEM SystemDescription="ARMLS" SystemID="az" TimeZoneOffset="-06:00"/>
<COMMENTS/>
</METADATA-SYSTEM>
</RETS>
"""
elem = parse_xml(response)
metadata_system_elem = _find_or_raise(elem, 'METADATA-SYSTEM')
system_elem = _find_or_raise(metadata_system_elem, 'SYSTEM')
comments_elem = metadata_system_elem.find('COMMENTS')
return SystemMetadata(
system_id=system_elem.get('SystemID'),
system_description=system_elem.get('SystemDescription'),
system_date=metadata_system_elem.get('Date'),
system_version=metadata_system_elem.get('Version'),
# Optional fields
time_zone_offset=system_elem.get('TimeZoneOffset'),
comments=comments_elem and (comments_elem.text or None),
)
def parse_search(response: Response) -> SearchResult:
try:
elem = parse_xml(response)
except RetsApiError as e:
if e.reply_code == 20201: # No records found
return SearchResult(0, False, ())
raise
count_elem = elem.find('COUNT')
if count_elem is not None:
count = int(count_elem.get('Records'))
else:
count = None
try:
data = tuple(_parse_data(elem))
except RetsParseError:
data = None
return SearchResult(
count=count,
# python xml.etree.ElementTree.Element objects are always considered false-y
max_rows=elem.find('MAXROWS') is not None,
data=data,
)
def _parse_rets_status(root: etree.Element) -> Tuple[int, str]:
"""
If RETS-STATUS exists, the client must use this instead
of the status from the body-start-line
"""
rets_status = root.find('RETS-STATUS')
elem = rets_status if rets_status is not None else root
return int(elem.get('ReplyCode')), elem.get('ReplyText')
def _parse_data(elem: etree.Element) -> Iterable[dict]:
"""
Parses a generic container element enclosing a single COLUMNS and multiple DATA elems, and
returns a generator of dicts with keys given by the COLUMNS elem and values given by each
DATA elem. The container elem may optionally contain a DELIMITER elem to define the delimiter
used, otherwise a default of '\t' is assumed.
<RETS ReplyCode="0" ReplyText="Success">
<DELIMITER value="09"/>
<COLUMNS> LIST_87 LIST_105 LIST_1 </COLUMNS>
<DATA> 2016-12-01T00:08:10 5489015 20160824051756837742000000 </DATA>
<DATA> 2016-12-01T00:10:02 5497756 20160915055426038684000000 </DATA>
<DATA> 2016-12-01T00:10:26 5528935 20161123230848928777000000 </DATA>
<DATA> 2016-12-01T00:10:52 5528955 20161123234916869427000000 </DATA>
<DATA> 2016-12-01T00:14:31 5530021 20161127221848669500000000 </DATA>
</RETS>
"""
delimiter = _parse_delimiter(elem)
columns_elem = _find_or_raise(elem, 'COLUMNS')
columns = _parse_data_line(columns_elem, delimiter)
data_elems = elem.findall('DATA')
return (OrderedDict(zip_longest(columns, _parse_data_line(data, delimiter)))
for data in data_elems)
def _find_or_raise(elem: etree.Element, child_elem_name: str) -> etree.Element:
child = elem.find(child_elem_name)
if child is None:
raise RetsParseError('Missing %s element' % child_elem_name)
return child
def _parse_data_line(elem: etree.Element, delimiter: str = '\t') -> Sequence[str]:
# DATA elems using the COMPACT format and COLUMN elems all start and end with delimiters
return elem.text.split(delimiter)[1:-1]
def _parse_delimiter(elem: etree.Element) -> str:
delimiter_elem = elem.find('DELIMITER')
if delimiter_elem is None:
return '\t'
return chr(int(delimiter_elem.get('value')))
| |
import json
from sqlalchemy import (
Column, Integer, String, Text, Boolean,
)
from superset import utils
from superset.models.helpers import AuditMixinNullable, ImportMixin
class BaseDatasource(AuditMixinNullable, ImportMixin):
"""A common interface to objects that are queryable (tables and datasources)"""
# ---------------------------------------------------------------
# class attributes to define when deriving BaseDatasource
# ---------------------------------------------------------------
__tablename__ = None # {connector_name}_datasource
type = None # datasoure type, str to be defined when deriving this class
baselink = None # url portion pointing to ModelView endpoint
column_class = None # link to derivative of BaseColumn
metric_class = None # link to derivative of BaseMetric
# Used to do code highlighting when displaying the query in the UI
query_language = None
name = None # can be a Column or a property pointing to one
# ---------------------------------------------------------------
# Columns
id = Column(Integer, primary_key=True)
description = Column(Text)
default_endpoint = Column(Text)
is_featured = Column(Boolean, default=False) # TODO deprecating
filter_select_enabled = Column(Boolean, default=False)
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
params = Column(String(1000))
perm = Column(String(1000))
# placeholder for a relationship to a derivative of BaseColumn
columns = []
# placeholder for a relationship to a derivative of BaseMetric
metrics = []
@property
def uid(self):
"""Unique id across datasource types"""
return "{self.id}__{self.type}".format(**locals())
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return "/superset/explore/{obj.type}/{obj.id}/".format(obj=self)
@property
def column_formats(self):
return {
m.metric_name: m.d3format
for m in self.metrics
if m.d3format
}
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def data(self):
"""Data representation of the datasource sent to the frontend"""
order_by_choices = []
for s in sorted(self.column_names):
order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
verbose_map = {
o.metric_name: o.verbose_name or o.metric_name
for o in self.metrics
}
verbose_map.update({
o.column_name: o.verbose_name or o.column_name
for o in self.columns
})
return {
'all_cols': utils.choicify(self.column_names),
'column_formats': self.column_formats,
'edit_url': self.url,
'filter_select': self.filter_select_enabled,
'filterable_cols': utils.choicify(self.filterable_column_names),
'gb_cols': utils.choicify(self.groupby_column_names),
'id': self.id,
'metrics_combo': self.metrics_combo,
'name': self.name,
'order_by_choices': order_by_choices,
'type': self.type,
'metrics': [o.data for o in self.metrics],
'columns': [o.data for o in self.columns],
'verbose_map': verbose_map,
}
def get_query_str(self, query_obj):
"""Returns a query as a string
This is used to be displayed to the user so that she/he can
understand what is taking place behind the scene"""
raise NotImplementedError()
def query(self, query_obj):
"""Executes the query and returns a dataframe
query_obj is a dictionary representing Superset's query interface.
Should return a ``superset.models.helpers.QueryResult``
"""
raise NotImplementedError()
def values_for_column(self, column_name, limit=10000):
"""Given a column, returns an iterable of distinct values
This is used to populate the dropdown showing a list of
values in filters in the explore view"""
raise NotImplementedError()
class BaseColumn(AuditMixinNullable, ImportMixin):
"""Interface for column"""
__tablename__ = None # {connector_name}_column
id = Column(Integer, primary_key=True)
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
avg = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
# [optional] Set this to support import/export functionality
export_fields = []
def __repr__(self):
return self.column_name
num_types = (
'DOUBLE', 'FLOAT', 'INT', 'BIGINT',
'LONG', 'REAL', 'NUMERIC', 'DECIMAL'
)
date_types = ('DATE', 'TIME', 'DATETIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
@property
def is_num(self):
return (
self.type and
any([t in self.type.upper() for t in self.num_types])
)
@property
def is_time(self):
return (
self.type and
any([t in self.type.upper() for t in self.date_types])
)
@property
def is_string(self):
return (
self.type and
any([t in self.type.upper() for t in self.str_types])
)
@property
def expression(self):
raise NotImplementedError()
@property
def data(self):
attrs = ('column_name', 'verbose_name', 'description', 'expression')
return {s: getattr(self, s) for s in attrs}
class BaseMetric(AuditMixinNullable, ImportMixin):
"""Interface for Metrics"""
__tablename__ = None # {connector_name}_metric
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
"""
The interface should also declare a datasource relationship pointing
to a derivative of BaseDatasource, along with a FK
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
datasource = relationship(
# needs to be altered to point to {Connector}Datasource
'BaseDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
"""
@property
def perm(self):
raise NotImplementedError()
@property
def expression(self):
raise NotImplementedError()
@property
def data(self):
attrs = ('metric_name', 'verbose_name', 'description', 'expression')
return {s: getattr(self, s) for s in attrs}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Cutting Edge QA Marcin Koperski
import os
import os.path
import time
from robot.api import logger
from robot.libraries import DateTime
from selenium.webdriver import FirefoxProfile, ChromeOptions
from selenium.webdriver.common.keys import Keys
from TestToolsMK.robot_instances import validate_create_artifacts_dir, sl, bi
try:
# noinspection PyCompatibility
from urlparse import urljoin
except ImportError: # python3
# noinspection PyCompatibility,PyUnresolvedReferences
from urllib.parse import urljoin
# noinspection PyProtectedMember
class SeleniumLibraryKeywords(object):
WIDTH_DEFAULT = "1366"
HEIGHT_DEFAULT = "768"
SELENIUM_SPEED = "0 sec"
SELENIUM_TEST_BROWSER = "ff"
SELENIUM_TIMEOUT = "5 s"
# noinspection PyPep8
XPATH2_JS = 'if(!window.jQuery){var headID = document.getElementsByTagName("head")[0]; var newScript = document.createElement(\'script\'); newScript.type=\'text/javascript\'; newScript.src=\'http://llamalab.com/js/xpath/minified/XPath.js\'; headID.appendChild(newScript);}'
# noinspection PyPep8
JQUERY_JS = "if(!window.jQuery){var headID = document.getElementsByTagName(\"head\")[0]; var newScript = document.createElement('script'); newScript.type='text/javascript'; newScript.src='http://code.jquery.com/jquery-2.1.4.min.js'; headID.appendChild(newScript);}"
@staticmethod
def open_new_tab(url):
"""Hack it use Control +t to open new tab"""
driver = sl().driver
body = driver.find_element_by_tag_name("body")
body.send_keys(Keys.CONTROL + 't')
time.sleep(2)
sl().go_to(url)
@staticmethod
def switch_tab_by_id(id_tab):
"""Hack it use Control + 1,2,3 etc to switch tab"""
driver = sl().driver
body = driver.find_element_by_tag_name("body")
body.send_keys(Keys.CONTROL + id_tab)
time.sleep(4)
# actions = ActionChains(driver)
# actions.key_down(Keys.CONTROL).key_down(Keys.TAB).key_up(Keys.TAB).key_up(Keys.CONTROL).perform()
@staticmethod
def press_key_python(command, locator="//body", strategy="XPATH"):
"""Hack !!! example argument | Keys.CONTROL + 't' |Keys.TAB + Keys.SHIFT"""
driver = sl().driver
element = driver.find_element(eval("By." + strategy), locator)
element.send_keys(eval(command))
@staticmethod
def close_tab():
"""Hack it use Control +w to close tab"""
driver = sl().driver
body = driver.find_element_by_tag_name("body")
body.send_keys(Keys.CONTROL + 'w')
@staticmethod
def set_browser_size_and_position(width=WIDTH_DEFAULT, height=HEIGHT_DEFAULT, x=0, y=0):
sl().set_window_size(width, height)
sl().set_window_position(x, y)
@staticmethod
def go_to_smart(url):
"""Redirect only in on different url"""
current_url = sl().get_location()
if url != current_url:
sl().go_to(url)
@staticmethod
def click_element_extended(locator, modifier=False, action_chain=False, timeout=None, error_msg=None):
"""
Click element proceed with following steps
* wait_until_page_contains_element
* wait_until_element_is_visible_wait_until_element_is_visible
* scroll_element_into_view
* mouse_over
* click_element
"""
sl().wait_until_page_contains_element(locator, timeout, error_msg)
sl().wait_until_element_is_visible(locator, timeout, error_msg)
sl().scroll_element_into_view(locator)
sl().mouse_over(locator)
sl().click_element(locator, modifier=modifier, action_chain=action_chain)
@staticmethod
def double_click_element_extended(locator, modifier=False, action_chain=False, timeout=None, error=None):
"""
Double Click element proceed with following steps
* wait_until_page_contains_element
* wait_until_element_is_visible_wait_until_element_is_visible
* scroll_element_into_view
* mouse_over
* double_click_element
"""
sl().wait_until_page_contains_element(locator, timeout, error)
sl().wait_until_element_is_visible(locator, timeout, error)
sl().scroll_element_into_view(locator)
sl().mouse_over(locator)
sl().double_click_element(locator, modifier=modifier, action_chain=action_chain)
def click_element_extended_and_wait(self, locator, sleep, modifier=False, action_chain=False, timeout=None, error_msg=None, reason=None):
self.click_element_extended(locator, timeout, error_msg)
bi().sleep(sleep, reason)
@staticmethod
def open_browser_extension(url, browser="ff", width=WIDTH_DEFAULT, height=HEIGHT_DEFAULT, x="0", y="0", alias=None, remote_url=False,
desired_capabilities=None, ff_profile_dir=None, selenium_timeout=SELENIUM_TIMEOUT, keyword_to_run_on_failure="Capture Page Screenshot Extension"):
sl().open_browser("about:blank", browser, alias,
remote_url, desired_capabilities, ff_profile_dir)
sl().set_window_position(x, y)
sl().set_window_size(width, height)
sl().set_selenium_timeout(selenium_timeout)
sl().register_keyword_to_run_on_failure(keyword_to_run_on_failure)
sl().go_to(url)
def import_xpath2(self):
sl().execute_javascript(self.XPATH2_JS)
# noinspection PyPep8Naming,PyPep8Naming
def import_jQuery(self):
sl().execute_javascript(self.JQUERY_JS)
# noinspection PyProtectedMember
@staticmethod
def capture_page_screenshot_extension(prefix="", postfix="", add_time_stamp=True, add_test_case_name=True, add_file_path_to_list="${list of screenshots}",
output_dir="Artifacts/Screenshots"):
output_dir_normalized = validate_create_artifacts_dir(output_dir)
if add_time_stamp:
current_time = " " + \
DateTime.get_current_date(result_format="%Y.%m.%d_%H.%M.%S")
else:
current_time = ""
if add_test_case_name:
test_case_name = bi().get_variable_value("${TEST_NAME}")
else:
test_case_name = ""
output_file = output_dir_normalized + "/" + prefix + \
test_case_name + postfix + current_time + ".png"
output_file_normalized = os.path.normpath(output_file)
# sl()driver.get_screenshot_as_file(output_file_normalized)
sl().capture_page_screenshot(output_file_normalized)
results = bi().run_keyword_and_return_status(
"Variable Should Exist", add_file_path_to_list)
if not results:
bi()._get_var_name(add_file_path_to_list)
list_with_files = bi().create_list(output_file_normalized)
bi().set_test_variable(add_file_path_to_list, list_with_files)
else:
list_with_files = bi().create_list(output_file_normalized)
list_with_files = bi().run_keyword(
"Combine Lists", add_file_path_to_list, list_with_files)
bi().set_test_variable(add_file_path_to_list, list_with_files)
return output_file_normalized
@staticmethod
def element_attribute_should_be(locator, attribute, attribute_value_expected, msg=None, values=True):
actual_value = sl().get_element_attribute(locator + "@" + attribute)
# noinspection PyProtectedMember
actual_value, attribute_value_expected = [bi()._convert_to_string( i) for i in (actual_value, attribute_value_expected)]
bi()._should_be_equal(actual_value, attribute_value_expected, msg, values)
# noinspection SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection
@staticmethod
def create_download_dir_profile_for_firefox(path_to_download, mime_types_file=None, *extensions_files):
"""
Example use
| ${profile} | Create Download Dir Profile For Firefox | Artifacts | Resources/mimeTypes.rdf | Resources/webdriver_element_locator-2.0-fx.xpi | Resources/selenium_ide-2.9.1-fx.xpi |
| Open Browser Extension | https://support.spatialkey.com/spatialkey-sample-csv-data/ | ff_profile_dir=${profile} |
| Click Element | //a[contains(@href,'sample.csv.zip')] |
"""
path_to_download_check = validate_create_artifacts_dir(
path_to_download)
fp = FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.download.manager.alertOnEXEOpen", False)
fp.set_preference("browser.download.dir", path_to_download_check)
fp.set_preference("xpinstall.signatures.required", False)
fp.set_preference("browser.helperApps.alwaysAsk.force", False)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk",
"application/msword;application/csv;text/csv;image/png;image/jpeg;application/pdf;text/html;text/plain;application/octet-stream")
fp.set_preference("pdfjs.disabled", True)
fp.update_preferences()
for single_extension in extensions_files:
fp.add_extension(single_extension)
if mime_types_file is not None:
mime_types_file = os.path.abspath(mime_types_file)
from shutil import copy2
copy2(os.path.normpath(mime_types_file), fp.profile_dir)
logger.info("Firefox Profile Created in dir '" + fp.profile_dir + "'")
return fp.profile_dir
# noinspection SpellCheckingInspection,SpellCheckingInspection
@staticmethod
def create_download_dir_capabilities_for_chrome(path_to_download, **extensions_files):
"""
Example use
| ${capabilities} | create_download_dir_capabilities_for_chrome | Artifacts |
| Open Browser Extension | https://support.spatialkey.com/spatialkey-sample-csv-data/ | gc | desired_capabilities=${capabilities} |
| Click Element | //a[contains(@href,'sample.csv.zip')] |
"""
path_to_download_check = validate_create_artifacts_dir(
path_to_download)
chrome_options = ChromeOptions()
prefs = {"download.default_directory": path_to_download_check,
"directory_upgrade": "true"}
chrome_options.add_experimental_option("prefs", prefs)
chrome_options.add_argument("--disable-web-security")
for single_extension in extensions_files:
chrome_options.add_extension(single_extension)
logger.info("Chrome Capabilities set download dir '" +
path_to_download_check + "'")
return chrome_options.to_capabilities()
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces defined and used by the base layer of RPC Framework."""
import abc
import collections
import enum
# stream is referenced from specification in this module.
from grpc.framework.foundation import stream # pylint: disable=unused-import
@enum.unique
class Outcome(enum.Enum):
"""Operation outcomes."""
COMPLETED = 'completed'
CANCELLED = 'cancelled'
EXPIRED = 'expired'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
SERVICER_FAILURE = 'servicer failure'
SERVICED_FAILURE = 'serviced failure'
class OperationContext(object):
"""Provides operation-related information and action.
Attributes:
trace_id: A uuid.UUID identifying a particular set of related operations.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def is_active(self):
"""Describes whether the operation is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
"""Adds a function to be called upon operation termination.
Args:
callback: A callable that will be passed an Outcome value.
"""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the operation.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the operation to complete before it is considered to have
timed out.
"""
raise NotImplementedError()
@abc.abstractmethod
def fail(self, exception):
"""Indicates that the operation has failed.
Args:
exception: An exception germane to the operation failure. May be None.
"""
raise NotImplementedError()
class Servicer(object):
"""Interface for service implementations."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def service(self, name, context, output_consumer):
"""Services an operation.
Args:
name: The name of the operation.
context: A ServicerContext object affording contextual information and
actions.
output_consumer: A stream.Consumer that will accept output values of
the operation.
Returns:
A stream.Consumer that will accept input values for the operation.
Raises:
exceptions.NoSuchMethodError: If this Servicer affords no method with the
given name.
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
raise NotImplementedError()
class Operation(object):
"""Representation of an in-progress operation.
Attributes:
consumer: A stream.Consumer into which payloads constituting the operation's
input may be passed.
context: An OperationContext affording information and action about the
operation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def cancel(self):
"""Cancels this operation."""
raise NotImplementedError()
class ServicedIngestor(object):
"""Responsible for accepting the result of an operation."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def consumer(self, operation_context):
"""Affords a consumer to which operation results will be passed.
Args:
operation_context: An OperationContext object for the current operation.
Returns:
A stream.Consumer to which the results of the current operation will be
passed.
Raises:
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
raise NotImplementedError()
class ServicedSubscription(object):
"""A sum type representing a serviced's interest in an operation.
Attributes:
kind: A Kind value.
ingestor: A ServicedIngestor. Must be present if kind is Kind.FULL. Must
be None if kind is Kind.TERMINATION_ONLY or Kind.NONE.
"""
__metaclass__ = abc.ABCMeta
@enum.unique
class Kind(enum.Enum):
"""Kinds of subscription."""
FULL = 'full'
TERMINATION_ONLY = 'termination only'
NONE = 'none'
class End(object):
"""Common type for entry-point objects on both sides of an operation."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def operation_stats(self):
"""Reports the number of terminated operations broken down by outcome.
Returns:
A dictionary from Outcome value to an integer identifying the number
of operations that terminated with that outcome.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_idle_action(self, action):
"""Adds an action to be called when this End has no ongoing operations.
Args:
action: A callable that accepts no arguments.
"""
raise NotImplementedError()
class Front(End):
"""Clientish objects that afford the invocation of operations."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def operate(
self, name, payload, complete, timeout, subscription, trace_id):
"""Commences an operation.
Args:
name: The name of the method invoked for the operation.
payload: An initial payload for the operation. May be None.
complete: A boolean indicating whether or not additional payloads to be
sent to the servicer may be supplied after this call.
timeout: A length of time in seconds to allow for the operation.
subscription: A ServicedSubscription for the operation.
trace_id: A uuid.UUID identifying a set of related operations to which
this operation belongs.
Returns:
An Operation object affording information and action about the operation
in progress.
"""
raise NotImplementedError()
class Back(End):
"""Serverish objects that perform the work of operations."""
__metaclass__ = abc.ABCMeta
class FrontToBackTicket(
collections.namedtuple(
'FrontToBackTicket',
['operation_id', 'sequence_number', 'kind', 'name', 'subscription',
'trace_id', 'payload', 'timeout'])):
"""A sum type for all values sent from a front to a back.
Attributes:
operation_id: A unique-with-respect-to-equality hashable object identifying
a particular operation.
sequence_number: A zero-indexed integer sequence number identifying the
ticket's place among all the tickets sent from front to back for this
particular operation. Must be zero if kind is Kind.COMMENCEMENT or
Kind.ENTIRE. Must be positive for any other kind.
kind: A Kind value describing the overall kind of ticket.
name: The name of an operation. Must be present if kind is Kind.COMMENCEMENT
or Kind.ENTIRE. Must be None for any other kind.
subscription: An ServicedSubscription.Kind value describing the interest
the front has in tickets sent from the back. Must be present if
kind is Kind.COMMENCEMENT or Kind.ENTIRE. Must be None for any other kind.
trace_id: A uuid.UUID identifying a set of related operations to which this
operation belongs. May be None.
payload: A customer payload object. Must be present if kind is
Kind.CONTINUATION. Must be None if kind is Kind.CANCELLATION. May be None
for any other kind.
timeout: An optional length of time (measured from the beginning of the
operation) to allow for the entire operation. If None, a default value on
the back will be used. If present and excessively large, the back may
limit the operation to a smaller duration of its choice. May be present
for any ticket kind; setting a value on a later ticket allows fronts
to request time extensions (or even time reductions!) on in-progress
operations.
"""
@enum.unique
class Kind(enum.Enum):
"""Identifies the overall kind of a FrontToBackTicket."""
COMMENCEMENT = 'commencement'
CONTINUATION = 'continuation'
COMPLETION = 'completion'
ENTIRE = 'entire'
CANCELLATION = 'cancellation'
EXPIRATION = 'expiration'
SERVICER_FAILURE = 'servicer failure'
SERVICED_FAILURE = 'serviced failure'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
class BackToFrontTicket(
collections.namedtuple(
'BackToFrontTicket',
['operation_id', 'sequence_number', 'kind', 'payload'])):
"""A sum type for all values sent from a back to a front.
Attributes:
operation_id: A unique-with-respect-to-equality hashable object identifying
a particular operation.
sequence_number: A zero-indexed integer sequence number identifying the
ticket's place among all the tickets sent from back to front for this
particular operation.
kind: A Kind value describing the overall kind of ticket.
payload: A customer payload object. Must be present if kind is
Kind.CONTINUATION. May be None if kind is Kind.COMPLETION. Must be None
otherwise.
"""
@enum.unique
class Kind(enum.Enum):
"""Identifies the overall kind of a BackToFrontTicket."""
CONTINUATION = 'continuation'
COMPLETION = 'completion'
CANCELLATION = 'cancellation'
EXPIRATION = 'expiration'
SERVICER_FAILURE = 'servicer failure'
SERVICED_FAILURE = 'serviced failure'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
class ForeLink(object):
"""Accepts back-to-front tickets and emits front-to-back tickets."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def accept_back_to_front_ticket(self, ticket):
"""Accept a BackToFrontTicket.
Args:
ticket: Any BackToFrontTicket.
"""
raise NotImplementedError()
@abc.abstractmethod
def join_rear_link(self, rear_link):
"""Mates this object with a peer with which it will exchange tickets."""
raise NotImplementedError()
class RearLink(object):
"""Accepts front-to-back tickets and emits back-to-front tickets."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def accept_front_to_back_ticket(self, ticket):
"""Accepts a FrontToBackTicket.
Args:
ticket: Any FrontToBackTicket.
"""
raise NotImplementedError()
@abc.abstractmethod
def join_fore_link(self, fore_link):
"""Mates this object with a peer with which it will exchange tickets."""
raise NotImplementedError()
class FrontLink(Front, ForeLink):
"""Clientish objects that operate by sending and receiving tickets."""
__metaclass__ = abc.ABCMeta
class BackLink(Back, RearLink):
"""Serverish objects that operate by sending and receiving tickets."""
__metaclass__ = abc.ABCMeta
| |
import networkx as nx
import matplotlib.pyplot as plt
import exact_controllability as ECT
from networkx.utils import powerlaw_sequence
import operator
import random
import csv
import copy
import subprocess, os
import time
import numpy as np
from ControllabilityRobustnessBasedOnEdgeAttack import RandomEdgeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeBetweennessAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeBetweennessAttack
import strutral_controllability as SC
def EdgeAttackBA():
start_time = time.time()
n = 200
m = 3
fraction = 0.2
E = 591
E_rm = 118
run_cnt = 100
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackUSAir():
start_time = time.time()
n = 332
fraction = 0.2
E = 2126
E_rm = int(0.2 * E)
run_cnt = 100
#******** Run Edge Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1;
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1;
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackErdosNetwork():
start_time = time.time()
n = 429
fraction = 0.2
E = 1312
E_rm = int(0.2 * E)
run_cnt = 30
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def ReadPajek(filename):
'''Read pajek file to construct DiGraph'''
G = nx.DiGraph()
fp = open(filename, 'r')
line = fp.readline()
while line:
if line[0] == '*':
line = line.strip().split()
label = line[0]
number = int(line[1])
if label == '*Vertices' or label == '*vertices':
NodeNum = number
for i in range(NodeNum):
NodeLine = fp.readline()
NodeLine = NodeLine.strip().split()
NodeID = int(NodeLine[0])
NodeLabel = NodeLine[1]
G.add_node(NodeID)
elif label == '*Arcs' or label == '*arcs':
EdgeNum = number
for i in range(EdgeNum):
EdgeLine = fp.readline()
EdgeLine = EdgeLine.strip().split()
u = int(EdgeLine[0])
v = int(EdgeLine[1])
#w = float(EdgeLine[2])
G.add_edge(u, v)
else:
pass
line = fp.readline()
fp.close()
return G
def EdgeAttack(G):
""" Edge attack experiments on real world networks
Params:
G: A directed network of networkx
Returns:
None. Print the network controllability n_D after
5% 10% 15% 20% edges removed
"""
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
# Edge remove fraction F0, F1, F2, F3, F4
F1 = 0.05
F2 = 0.10
F3 = 0.15
F4 = 0.20
LRA = []
LID = []
LRD = []
LIB = []
LRB = []
# Following is Edge Random Attack (RA)
print '########## Edge RA ##########'
G1 = copy.deepcopy(G)
RandomEdges = copy.deepcopy(G1.edges())
random.shuffle(RandomEdges)
i = 0
while i < int(F1 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F1, nD
LRA.append(nD)
while i < int(F2 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F2, nD
LRA.append(nD)
while i < int(F3 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F3, nD
LRA.append(nD)
while i < int(F4 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F4, nD
LRA.append(nD)
G1.clear()
RandomEdges = []
# Following is Initial Edge Degree Attack (IDA)
print '########## Edge IDA ##########'
G2 = copy.deepcopy(G)
NodeDegrees = nx.degree(G2)
EdgeDegrees = {}
for u, v in G2.edges_iter(): # Calculate the edge degrees
EdgeDegrees[(u, v)] = NodeDegrees[u] * NodeDegrees[v]
# Sort the edges decrendingly according to edge degree
SortedEdges = sorted(EdgeDegrees, key=EdgeDegrees.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F1, nD
LID.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F2, nD
LID.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F3, nD
LID.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F4, nD
LID.append(nD)
G2.clear()
NodeDegrees = {}
EdgeDegrees = {}
SortedEdges = []
# Following is Recalculated Edge Degree Attack (RDA)
print '########## Edge RDA ##########'
G3 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F1, nD
LRD.append(nD)
while i < int(F2 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F2, nD
LRD.append(nD)
while i < int(F3 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F3, nD
LRD.append(nD)
while i < int(F4 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F4, nD
LRD.append(nD)
G3.clear()
# Folloing is Initial Edge Betweenness Attack (IBA)
print '########## Edge IBA ##########'
G4 = copy.deepcopy(G)
EdgeBetweenness = nx.edge_betweenness_centrality(G4)
SortedBetEdges = sorted(EdgeBetweenness,
key=EdgeBetweenness.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F1, nD
LIB.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F2, nD
LIB.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F3, nD
LIB.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F4, nD
LIB.append(nD)
G4.clear()
EdgeBetweenness = {}
SortedBetEdges = []
# Following is Recalculated Edge Betweenness Attack (RBA)
print '########## Edge RBA ##########'
G5 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F1, nD
LRB.append(nD)
while i < int(F2 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F2, nD
LRB.append(nD)
while i < int(F3 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F3, nD
LRB.append(nD)
while i < int(F4 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F4, nD
LRB.append(nD)
G5.clear()
print 'RA: ', LRA[0], LRA[1], LRA[2], LRA[3]
print 'ID: ', LID[0], LID[1], LID[2], LID[3]
print 'RD: ', LRD[0], LRD[1], LRD[2], LRD[3]
print 'IB: ', LIB[0], LIB[1], LIB[2], LIB[3]
print 'RB: ', LRB[0], LRB[1], LRB[2], LRB[3]
if __name__ == "__main__":
#EdgeAttackBA()
#EdgeAttackUSAir()
# Edge Attack Erdos971 Network
# for random attack, we set the random seed to from 1 to 100 for the
# independent 100 runs. For other deliberate attacks, as the attack order
# is fixed, we reset the seed of random to the initial state, i.e. seed(None)
#EdgeAttackErdosNetwork()
# Regulatory
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-1.net')
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-2.net')
#G = ReadPajek('./dataset/Regulatory/TRN-EC-2.net')
#G = ReadPajek('./dataset/Regulatory/Ownership.net')
# World Wide Web (WWW)
#G = ReadPajek('./dataset/WWW/PoliticalBlogs.net')
# Internet
G = ReadPajek('./dataset/Internet/P2P_1.net')
print 'Edge Attack From Temp Temp Temp Files ... '
print 'Internet --- P2P1'
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
DriverNodes = SC.control_nodes(G)
nD = len(DriverNodes) / (NodesNum + 0.0)
print 'Nodes Num: ', NodesNum
print 'Edges Num: ', EdgesNum
print 'nD = ', nD
EdgeAttack(G)
G = ReadPajek('./dataset/Internet/P2P_2.net')
print 'Edge Attack From Temp Temp Temp Files ... '
print 'Internet --- P2P1'
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
DriverNodes = SC.control_nodes(G)
nD = len(DriverNodes) / (NodesNum + 0.0)
print 'Nodes Num: ', NodesNum
print 'Edges Num: ', EdgesNum
print 'nD = ', nD
EdgeAttack(G)
G = ReadPajek('./dataset/Internet/P2P_3.net')
print 'Edge Attack From Temp Temp Temp Files ... '
print 'Internet --- P2P1'
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
DriverNodes = SC.control_nodes(G)
nD = len(DriverNodes) / (NodesNum + 0.0)
print 'Nodes Num: ', NodesNum
print 'Edges Num: ', EdgesNum
print 'nD = ', nD
EdgeAttack(G)
| |
import time
import typing
import json
import jsonpickle
import logging
import uuid
from dateutil import parser
from typing import List, Dict, Text, Any, Type, Optional
if typing.TYPE_CHECKING:
from rasa.core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
def deserialise_events(serialized_events: List[Dict[Text, Any]]
) -> List['Event']:
"""Convert a list of dictionaries to a list of corresponding events.
Example format:
[{"event": "slot", "value": 5, "name": "my_slot"}]
"""
deserialised = []
for e in serialized_events:
if "event" in e:
event = Event.from_parameters(e)
if event:
deserialised.append(event)
else:
logger.warning("Ignoring event ({}) while deserialising "
"events. Couldn't parse it.")
return deserialised
def deserialise_entities(entities):
if isinstance(entities, str):
entities = json.loads(entities)
return [e for e in entities if isinstance(e, dict)]
def md_format_message(text, intent, entities):
from rasa_nlu.training_data.formats import MarkdownWriter, MarkdownReader
message_from_md = MarkdownReader()._parse_training_example(text)
deserialised_entities = deserialise_entities(entities)
return MarkdownWriter()._generate_message_md(
{"text": message_from_md.text,
"intent": intent,
"entities": deserialised_entities}
)
def first_key(d, default_key):
if len(d) > 1:
for k, v in d.items():
if k != default_key:
# we return the first key that is not the default key
return k
elif len(d) == 1:
return list(d.keys())[0]
else:
return None
# noinspection PyProtectedMember
class Event(object):
"""Events describe everything that occurs in
a conversation and tell the :class:`rasa.core.trackers.DialogueStateTracker`
how to update its state."""
type_name = "event"
def __init__(self, timestamp=None):
self.timestamp = timestamp if timestamp else time.time()
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
def as_story_string(self):
raise NotImplementedError
@staticmethod
def from_story_string(event_name: Text,
parameters: Dict[Text, Any],
default: Optional[Type['Event']] = None
) -> Optional[List['Event']]:
event = Event.resolve_by_type(event_name, default)
if event:
return event._from_story_string(parameters)
else:
return None
@staticmethod
def from_parameters(parameters: Dict[Text, Any],
default: Optional[Type['Event']] = None
) -> Optional['Event']:
event_name = parameters.get("event")
if event_name is not None:
copied = parameters.copy()
del copied["event"]
event = Event.resolve_by_type(event_name, default)
if event:
return event._from_parameters(parameters)
else:
return None
else:
return None
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List['Event']]:
"""Called to convert a parsed story line into an event."""
return [cls(parameters.get("timestamp"))]
def as_dict(self):
return {
"event": self.type_name,
"timestamp": self.timestamp,
}
@classmethod
def _from_parameters(cls, parameters):
"""Called to convert a dictionary of parameters to a single event.
By default uses the same implementation as the story line
conversation ``_from_story_string``. But the subclass might
decide to handle parameters differently if the parsed parameters
don't origin from a story file."""
result = cls._from_story_string(parameters)
if len(result) > 1:
logger.warning("Event from parameters called with parameters "
"for multiple events. This is not supported, "
"only the first event will be returned. "
"Parameters: {}".format(parameters))
return result[0] if result else None
@staticmethod
def resolve_by_type(
type_name: Text,
default: Optional[Type['Event']] = None
) -> Optional[Type['Event']]:
"""Returns a slots class by its type name."""
from rasa.core import utils
for cls in utils.all_subclasses(Event):
if cls.type_name == type_name:
return cls
if type_name == "topic":
return None # backwards compatibility to support old TopicSet evts
elif default is not None:
return default
else:
raise ValueError("Unknown event name '{}'.".format(type_name))
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
pass
# noinspection PyProtectedMember
class UserUttered(Event):
"""The user has said something to the bot.
As a side effect a new ``Turn`` will be created in the ``Tracker``."""
type_name = "user"
def __init__(self, text,
intent=None,
entities=None,
parse_data=None,
timestamp=None,
input_channel=None,
message_id=None):
self.text = text
self.intent = intent if intent else {}
self.entities = entities if entities else []
self.input_channel = input_channel
self.message_id = message_id
if parse_data:
self.parse_data = parse_data
else:
self.parse_data = {
"intent": self.intent,
"entities": self.entities,
"text": text,
}
super(UserUttered, self).__init__(timestamp)
@staticmethod
def _from_parse_data(text, parse_data, timestamp=None, input_channel=None):
return UserUttered(text,
parse_data.get("intent"),
parse_data.get("entities", []),
parse_data,
timestamp,
input_channel)
def __hash__(self):
return hash((self.text, self.intent.get("name"),
jsonpickle.encode(self.entities)))
def __eq__(self, other):
if not isinstance(other, UserUttered):
return False
else:
return (self.text, self.intent.get("name"),
[jsonpickle.encode(ent) for ent in self.entities]) == \
(other.text, other.intent.get("name"),
[jsonpickle.encode(ent) for ent in other.entities])
def __str__(self):
return ("UserUttered(text: {}, intent: {}, "
"entities: {})".format(self.text, self.intent, self.entities))
@staticmethod
def empty():
return UserUttered(None)
def as_dict(self):
d = super(UserUttered, self).as_dict()
input_channel = None # for backwards compatibility (persisted evemts)
if hasattr(self, "input_channel"):
input_channel = self.input_channel
d.update({
"text": self.text,
"parse_data": self.parse_data,
"input_channel": input_channel
})
return d
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
try:
return [cls._from_parse_data(parameters.get("text"),
parameters.get("parse_data"),
parameters.get("timestamp"),
parameters.get("input_channel"))]
except KeyError as e:
raise ValueError("Failed to parse bot uttered event. {}".format(e))
def as_story_string(self, e2e=False):
if self.intent:
if self.entities:
ent_string = json.dumps({ent['entity']: ent['value']
for ent in self.entities})
else:
ent_string = ""
parse_string = "{intent}{entities}".format(
intent=self.intent.get("name", ""),
entities=ent_string)
if e2e:
message = md_format_message(self.text,
self.intent,
self.entities)
return "{}: {}".format(self.intent.get("name"), message)
else:
return parse_string
else:
return self.text
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.latest_message = self
tracker.clear_followup_action()
# noinspection PyProtectedMember
class BotUttered(Event):
"""The bot has said something to the user.
This class is not used in the story training as it is contained in the
``ActionExecuted`` class. An entry is made in the ``Tracker``."""
type_name = "bot"
def __init__(self, text=None, data=None, timestamp=None):
self.text = text
self.data = data
super(BotUttered, self).__init__(timestamp)
def __hash__(self):
return hash((self.text, jsonpickle.encode(self.data)))
def __eq__(self, other):
if not isinstance(other, BotUttered):
return False
else:
return (self.text, jsonpickle.encode(self.data)) == \
(other.text, jsonpickle.encode(other.data))
def __str__(self):
return ("BotUttered(text: {}, data: {})"
"".format(self.text, json.dumps(self.data, indent=2)))
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.latest_bot_utterance = self
def as_story_string(self):
return None
@staticmethod
def empty():
return BotUttered()
def as_dict(self):
d = super(BotUttered, self).as_dict()
d.update({
"text": self.text,
"data": self.data,
})
return d
@classmethod
def _from_parameters(cls, parameters):
try:
return BotUttered(parameters.get("text"),
parameters.get("data"),
parameters.get("timestamp"))
except KeyError as e:
raise ValueError("Failed to parse bot uttered event. {}".format(e))
# noinspection PyProtectedMember
class SlotSet(Event):
"""The user has specified their preference for the value of a ``slot``.
Every slot has a name and a value. This event can be used to set a
value for a slot on a conversation.
As a side effect the ``Tracker``'s slots will be updated so
that ``tracker.slots[key]=value``."""
type_name = "slot"
def __init__(self, key, value=None, timestamp=None):
self.key = key
self.value = value
super(SlotSet, self).__init__(timestamp)
def __str__(self):
return "SlotSet(key: {}, value: {})".format(self.key, self.value)
def __hash__(self):
return hash((self.key, jsonpickle.encode(self.value)))
def __eq__(self, other):
if not isinstance(other, SlotSet):
return False
else:
return (self.key, self.value) == (other.key, other.value)
def as_story_string(self):
props = json.dumps({self.key: self.value})
return "{name}{props}".format(name=self.type_name, props=props)
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
slots = []
for slot_key, slot_val in parameters.items():
slots.append(SlotSet(slot_key, slot_val))
if slots:
return slots
else:
return None
def as_dict(self):
d = super(SlotSet, self).as_dict()
d.update({
"name": self.key,
"value": self.value,
})
return d
@classmethod
def _from_parameters(cls, parameters):
try:
return SlotSet(parameters.get("name"),
parameters.get("value"),
parameters.get("timestamp"))
except KeyError as e:
raise ValueError("Failed to parse set slot event. {}".format(e))
def apply_to(self, tracker):
tracker._set_slot(self.key, self.value)
# noinspection PyProtectedMember
class Restarted(Event):
"""Conversation should start over & history wiped.
Instead of deleting all events, this event can be used to reset the
trackers state (e.g. ignoring any past user messages & resetting all
the slots)."""
type_name = "restart"
def __hash__(self):
return hash(32143124312)
def __eq__(self, other):
return isinstance(other, Restarted)
def __str__(self):
return "Restarted()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker):
from rasa.core.actions.action import ACTION_LISTEN_NAME
tracker._reset()
tracker.trigger_followup_action(ACTION_LISTEN_NAME)
# noinspection PyProtectedMember
class UserUtteranceReverted(Event):
"""Bot reverts everything until before the most recent user message.
The bot will revert all events after the latest `UserUttered`, this
also means that the last event on the tracker is usually `action_listen`
and the bot is waiting for a new user message."""
type_name = "rewind"
def __hash__(self):
return hash(32143124315)
def __eq__(self, other):
return isinstance(other, UserUtteranceReverted)
def __str__(self):
return "UserUtteranceReverted()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker._reset()
tracker.replay_events()
# noinspection PyProtectedMember
class AllSlotsReset(Event):
"""All Slots are reset to their initial values.
If you want to keep the dialogue history and only want to reset the
slots, you can use this event to set all the slots to their initial
values."""
type_name = "reset_slots"
def __hash__(self):
return hash(32143124316)
def __eq__(self, other):
return isinstance(other, AllSlotsReset)
def __str__(self):
return "AllSlotsReset()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker):
tracker._reset_slots()
# noinspection PyProtectedMember
class ReminderScheduled(Event):
""" Allows asynchronous scheduling of action execution.
As a side effect the message processor will schedule an action to be run
at the trigger date."""
type_name = "reminder"
def __init__(self, action_name, trigger_date_time, name=None,
kill_on_user_message=True, timestamp=None):
"""Creates the reminder
Args:
action_name: name of the action to be scheduled
trigger_date_time: date at which the execution of the action
should be triggered (either utc or with tz)
name: id of the reminder. if there are multiple reminders with
the same id only the last will be run
kill_on_user_message: ``True`` means a user message before the
trigger date will abort the reminder
timestamp: creation date of the event
"""
self.action_name = action_name
self.trigger_date_time = trigger_date_time
self.kill_on_user_message = kill_on_user_message
self.name = name if name is not None else str(uuid.uuid1())
super(ReminderScheduled, self).__init__(timestamp)
def __hash__(self):
return hash((self.action_name, self.trigger_date_time.isoformat(),
self.kill_on_user_message, self.name))
def __eq__(self, other):
if not isinstance(other, ReminderScheduled):
return False
else:
return self.name == other.name
def __str__(self):
return ("ReminderScheduled("
"action: {}, trigger_date: {}, name: {}"
")".format(self.action_name, self.trigger_date_time,
self.name))
def _data_obj(self):
return {
"action": self.action_name,
"date_time": self.trigger_date_time.isoformat(),
"name": self.name,
"kill_on_user_msg": self.kill_on_user_message
}
def as_story_string(self):
props = json.dumps(self._data_obj())
return "{name}{props}".format(name=self.type_name, props=props)
def as_dict(self):
d = super(ReminderScheduled, self).as_dict()
d.update(self._data_obj())
return d
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
trigger_date_time = parser.parse(parameters.get("date_time"))
return [ReminderScheduled(parameters.get("action"),
trigger_date_time,
parameters.get("name", None),
parameters.get("kill_on_user_msg", True),
parameters.get("timestamp"))]
# noinspection PyProtectedMember
class ReminderCancelled(Event):
"""Cancel all jobs with a specific name."""
type_name = "cancel_reminder"
def __init__(self, action_name, timestamp=None):
"""
Args:
action_name: name of the scheduled action to be cancelled
"""
self.action_name = action_name
super(ReminderCancelled, self).__init__(timestamp)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, ReminderCancelled)
def __str__(self):
return ("ReminderCancelled(action: {})"
.format(self.action_name))
def as_story_string(self):
props = json.dumps(self._data_obj())
return "{name}{props}".format(name=self.type_name, props=props)
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
return [ReminderCancelled(parameters.get("action"),
parameters.get("timestamp"))]
# noinspection PyProtectedMember
class ActionReverted(Event):
"""Bot undoes its last action.
The bot everts everything until before the most recent action.
This includes the action itself, as well as any events that
action created, like set slot events - the bot will now
predict a new action using the state before the most recent
action."""
type_name = "undo"
def __hash__(self):
return hash(32143124318)
def __eq__(self, other):
return isinstance(other, ActionReverted)
def __str__(self):
return "ActionReverted()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker._reset()
tracker.replay_events()
# noinspection PyProtectedMember
class StoryExported(Event):
"""Story should get dumped to a file."""
type_name = "export"
def __init__(self, path=None, timestamp=None):
self.path = path
super(StoryExported, self).__init__(timestamp)
def __hash__(self):
return hash(32143124319)
def __eq__(self, other):
return isinstance(other, StoryExported)
def __str__(self):
return "StoryExported()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
if self.path:
tracker.export_stories_to_file(self.path)
# noinspection PyProtectedMember
class FollowupAction(Event):
"""Enqueue a followup action."""
type_name = "followup"
def __init__(self, name, timestamp=None):
self.action_name = name
super(FollowupAction, self).__init__(timestamp)
def __hash__(self):
return hash(self.action_name)
def __eq__(self, other):
if not isinstance(other, FollowupAction):
return False
else:
return self.action_name == other.action_name
def __str__(self):
return "FollowupAction(action: {})".format(self.action_name)
def as_story_string(self):
props = json.dumps({"name": self.action_name})
return "{name}{props}".format(name=self.type_name, props=props)
@classmethod
def _from_story_string(cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
return [FollowupAction(parameters.get("name"),
parameters.get("timestamp"))]
def as_dict(self):
d = super(FollowupAction, self).as_dict()
d.update({"name": self.action_name})
return d
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.trigger_followup_action(self.action_name)
# noinspection PyProtectedMember
class ConversationPaused(Event):
"""Ignore messages from the user to let a human take over.
As a side effect the ``Tracker``'s ``paused`` attribute will
be set to ``True``. """
type_name = "pause"
def __hash__(self):
return hash(32143124313)
def __eq__(self, other):
return isinstance(other, ConversationPaused)
def __str__(self):
return "ConversationPaused()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker):
tracker._paused = True
# noinspection PyProtectedMember
class ConversationResumed(Event):
"""Bot takes over conversation.
Inverse of ``PauseConversation``. As a side effect the ``Tracker``'s
``paused`` attribute will be set to ``False``."""
type_name = "resume"
def __hash__(self):
return hash(32143124314)
def __eq__(self, other):
return isinstance(other, ConversationResumed)
def __str__(self):
return "ConversationResumed()"
def as_story_string(self):
return self.type_name
def apply_to(self, tracker):
tracker._paused = False
# noinspection PyProtectedMember
class ActionExecuted(Event):
"""An operation describes an action taken + its result.
It comprises an action and a list of events. operations will be appended
to the latest ``Turn`` in the ``Tracker.turns``."""
type_name = "action"
def __init__(self,
action_name,
policy=None,
confidence=None,
timestamp=None):
self.action_name = action_name
self.policy = policy
self.confidence = confidence
self.unpredictable = False
super(ActionExecuted, self).__init__(timestamp)
def __str__(self):
return ("ActionExecuted(action: {}, policy: {}, confidence: {})"
"".format(self.action_name, self.policy, self.confidence))
def __hash__(self):
return hash(self.action_name)
def __eq__(self, other):
if not isinstance(other, ActionExecuted):
return False
else:
return self.action_name == other.action_name
def as_story_string(self):
return self.action_name
@classmethod
def _from_story_string(
cls,
parameters: Dict[Text, Any]
) -> Optional[List[Event]]:
return [ActionExecuted(parameters.get("name"),
parameters.get("policy"),
parameters.get("confidence"),
parameters.get("timestamp")
)]
def as_dict(self):
d = super(ActionExecuted, self).as_dict()
policy = None # for backwards compatibility (persisted evemts)
if hasattr(self, "policy"):
policy = self.policy
confidence = None
if hasattr(self, "confidence"):
confidence = self.confidence
d.update({
"name": self.action_name,
"policy": policy,
"confidence": confidence
})
return d
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.set_latest_action_name(self.action_name)
tracker.clear_followup_action()
class AgentUttered(Event):
"""The agent has said something to the user.
This class is not used in the story training as it is contained in the
``ActionExecuted`` class. An entry is made in the ``Tracker``."""
type_name = "agent"
def __init__(self, text=None, data=None, timestamp=None):
self.text = text
self.data = data
super(AgentUttered, self).__init__(timestamp)
def __hash__(self):
return hash((self.text, jsonpickle.encode(self.data)))
def __eq__(self, other):
if not isinstance(other, AgentUttered):
return False
else:
return (self.text, jsonpickle.encode(self.data)) == \
(other.text, jsonpickle.encode(other.data))
def __str__(self):
return "AgentUttered(text: {}, data: {})".format(
self.text, json.dumps(self.data, indent=2))
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
pass
def as_story_string(self):
return None
def as_dict(self):
d = super(AgentUttered, self).as_dict()
d.update({
"text": self.text,
"data": self.data,
})
return d
@staticmethod
def empty():
return AgentUttered()
@classmethod
def _from_parameters(cls, parameters):
try:
return AgentUttered(parameters.get("text"),
parameters.get("data"),
parameters.get("timestamp"))
except KeyError as e:
raise ValueError("Failed to parse agent uttered event. "
"{}".format(e))
class Form(Event):
"""If `name` is not None: activates a form with `name`
else deactivates active form
"""
type_name = "form"
def __init__(self, name, timestamp=None):
self.name = name
super(Form, self).__init__(timestamp)
def __str__(self):
return "Form({})".format(self.name)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, Form):
return False
else:
return self.name == other.name
def as_story_string(self):
props = json.dumps({"name": self.name})
return "{name}{props}".format(name=self.type_name, props=props)
@classmethod
def _from_story_string(cls, parameters):
"""Called to convert a parsed story line into an event."""
return [Form(parameters.get("name"),
parameters.get("timestamp"))]
def as_dict(self):
d = super(Form, self).as_dict()
d.update({"name": self.name})
return d
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.change_form_to(self.name)
class FormValidation(Event):
"""Event added by FormPolicy to notify form action
whether or not to validate the user input"""
type_name = "form_validation"
def __init__(self,
validate,
timestamp=None):
self.validate = validate
super(FormValidation, self).__init__(timestamp)
def __str__(self):
return "FormValidation({})".format(self.validate)
def __hash__(self):
return hash(self.validate)
def __eq__(self, other):
return isinstance(other, FormValidation)
def as_story_string(self):
return None
@classmethod
def _from_parameters(cls, parameters):
return FormValidation(parameters.get("validate"),
parameters.get("timestamp"))
def as_dict(self):
d = super(FormValidation, self).as_dict()
d.update({"validate": self.validate})
return d
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.set_form_validation(self.validate)
class ActionExecutionRejected(Event):
"""Notify Core that the execution of the action has been rejected"""
type_name = 'action_execution_rejected'
def __init__(self,
action_name,
policy=None,
confidence=None,
timestamp=None):
self.action_name = action_name
self.policy = policy
self.confidence = confidence
super(ActionExecutionRejected, self).__init__(timestamp)
def __str__(self):
return ("ActionExecutionRejected("
"action: {}, policy: {}, confidence: {})"
"".format(self.action_name, self.policy, self.confidence))
def __hash__(self):
return hash(self.action_name)
def __eq__(self, other):
if not isinstance(other, ActionExecutionRejected):
return False
else:
return self.action_name == other.action_name
@classmethod
def _from_parameters(cls, parameters):
return ActionExecutionRejected(parameters.get("name"),
parameters.get("policy"),
parameters.get("confidence"),
parameters.get("timestamp"))
def as_story_string(self):
return None
def as_dict(self):
d = super(ActionExecutionRejected, self).as_dict()
d.update({"name": self.action_name,
"policy": self.policy,
"confidence": self.confidence})
return d
def apply_to(self, tracker: 'DialogueStateTracker') -> None:
tracker.reject_action(self.action_name)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for converting service configs to OpenAPI (Swagger) specs."""
from __future__ import absolute_import
import hashlib
import json
import logging
import re
from . import api_exceptions
from . import message_parser
from . import message_types
from . import messages
from . import remote
from . import resource_container
from . import util
_logger = logging.getLogger(__name__)
_PATH_VARIABLE_PATTERN = r'{([a-zA-Z_][a-zA-Z_.\d]*)}'
_MULTICLASS_MISMATCH_ERROR_TEMPLATE = (
'Attempting to implement service %s, version %s, with multiple '
'classes that aren\'t compatible. See docstring for api() for '
'examples how to implement a multi-class API.')
_INVALID_AUTH_ISSUER = 'No auth issuer named %s defined in this Endpoints API.'
_API_KEY = 'api_key'
_API_KEY_PARAM = 'key'
_DEFAULT_SECURITY_DEFINITION = 'google_id_token'
_VALID_API_NAME = re.compile('^[a-z][a-z0-9]{0,39}$')
def _validate_api_name(name):
valid = (_VALID_API_NAME.match(name) is not None)
if not valid:
raise api_exceptions.InvalidApiNameException(
'The API name must match the regular expression {}'.format(
_VALID_API_NAME.pattern[1:-1]))
return name
class OpenApiGenerator(object):
"""Generates an OpenAPI spec from a ProtoRPC service.
Example:
class HelloRequest(messages.Message):
my_name = messages.StringField(1, required=True)
class HelloResponse(messages.Message):
hello = messages.StringField(1, required=True)
class HelloService(remote.Service):
@remote.method(HelloRequest, HelloResponse)
def hello(self, request):
return HelloResponse(hello='Hello there, %s!' %
request.my_name)
api_config = OpenApiGenerator().pretty_print_config_to_json(HelloService)
The resulting api_config will be a JSON OpenAPI document describing the API
implemented by HelloService.
"""
# Constants for categorizing a request method.
# __NO_BODY - Request without a request body, such as GET and DELETE methods.
# __HAS_BODY - Request (such as POST/PUT/PATCH) with info in the request body.
__NO_BODY = 1 # pylint: disable=invalid-name
__HAS_BODY = 2 # pylint: disable=invalid-name
def __init__(self):
self.__parser = message_parser.MessageTypeToJsonSchema()
# Maps method id to the request schema id.
self.__request_schema = {}
# Maps method id to the response schema id.
self.__response_schema = {}
def _add_def_paths(self, prop_dict):
"""Recursive method to add relative paths for any $ref objects.
Args:
prop_dict: The property dict to alter.
Side Effects:
Alters prop_dict in-place.
"""
for prop_key, prop_value in prop_dict.iteritems():
if prop_key == '$ref' and not 'prop_value'.startswith('#'):
prop_dict[prop_key] = '#/definitions/' + prop_dict[prop_key]
elif isinstance(prop_value, dict):
self._add_def_paths(prop_value)
def _construct_operation_id(self, service_name, protorpc_method_name):
"""Return an operation id for a service method.
Args:
service_name: The name of the service.
protorpc_method_name: The ProtoRPC method name.
Returns:
A string representing the operation id.
"""
# camelCase the ProtoRPC method name
method_name_camel = util.snake_case_to_headless_camel_case(
protorpc_method_name)
return '{0}_{1}'.format(service_name, method_name_camel)
def __get_request_kind(self, method_info):
"""Categorize the type of the request.
Args:
method_info: _MethodInfo, method information.
Returns:
The kind of request.
"""
if method_info.http_method in ('GET', 'DELETE'):
return self.__NO_BODY
else:
return self.__HAS_BODY
def __field_to_subfields(self, field):
"""Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields.
"""
# Termination condition
if not isinstance(field, messages.MessageField):
return [[field]]
result = []
for subfield in sorted(field.message_type.all_fields(),
key=lambda f: f.number):
subfield_results = self.__field_to_subfields(subfield)
for subfields_list in subfield_results:
subfields_list.insert(0, field)
result.append(subfields_list)
return result
def __field_to_parameter_type_and_format(self, field):
"""Converts the field variant type into a tuple describing the parameter.
Args:
field: An instance of a subclass of messages.Field.
Returns:
A tuple with the type and format of the field, respectively.
Raises:
TypeError: if the field variant is a message variant.
"""
# We use lowercase values for types (e.g. 'string' instead of 'STRING').
variant = field.variant
if variant == messages.Variant.MESSAGE:
raise TypeError('A message variant can\'t be used in a parameter.')
# Note that the 64-bit integers are marked as strings -- this is to
# accommodate JavaScript, which would otherwise demote them to 32-bit
# integers.
custom_variant_map = {
messages.Variant.DOUBLE: ('number', 'double'),
messages.Variant.FLOAT: ('number', 'float'),
messages.Variant.INT64: ('string', 'int64'),
messages.Variant.SINT64: ('string', 'int64'),
messages.Variant.UINT64: ('string', 'uint64'),
messages.Variant.INT32: ('integer', 'int32'),
messages.Variant.SINT32: ('integer', 'int32'),
messages.Variant.UINT32: ('integer', 'uint32'),
messages.Variant.BOOL: ('boolean', None),
messages.Variant.STRING: ('string', None),
messages.Variant.BYTES: ('string', 'byte'),
messages.Variant.ENUM: ('string', None),
}
return custom_variant_map.get(variant) or (variant.name.lower(), None)
def __get_path_parameters(self, path):
"""Parses path paremeters from a URI path and organizes them by parameter.
Some of the parameters may correspond to message fields, and so will be
represented as segments corresponding to each subfield; e.g. first.second if
the field "second" in the message field "first" is pulled from the path.
The resulting dictionary uses the first segments as keys and each key has as
value the list of full parameter values with first segment equal to the key.
If the match path parameter is null, that part of the path template is
ignored; this occurs if '{}' is used in a template.
Args:
path: String; a URI path, potentially with some parameters.
Returns:
A dictionary with strings as keys and list of strings as values.
"""
path_parameters_by_segment = {}
for format_var_name in re.findall(_PATH_VARIABLE_PATTERN, path):
first_segment = format_var_name.split('.', 1)[0]
matches = path_parameters_by_segment.setdefault(first_segment, [])
matches.append(format_var_name)
return path_parameters_by_segment
def __validate_simple_subfield(self, parameter, field, segment_list,
segment_index=0):
"""Verifies that a proposed subfield actually exists and is a simple field.
Here, simple means it is not a MessageField (nested).
Args:
parameter: String; the '.' delimited name of the current field being
considered. This is relative to some root.
field: An instance of a subclass of messages.Field. Corresponds to the
previous segment in the path (previous relative to _segment_index),
since this field should be a message field with the current segment
as a field in the message class.
segment_list: The full list of segments from the '.' delimited subfield
being validated.
segment_index: Integer; used to hold the position of current segment so
that segment_list can be passed as a reference instead of having to
copy using segment_list[1:] at each step.
Raises:
TypeError: If the final subfield (indicated by _segment_index relative
to the length of segment_list) is a MessageField.
TypeError: If at any stage the lookup at a segment fails, e.g if a.b
exists but a.b.c does not exist. This can happen either if a.b is not
a message field or if a.b.c is not a property on the message class from
a.b.
"""
if segment_index >= len(segment_list):
# In this case, the field is the final one, so should be simple type
if isinstance(field, messages.MessageField):
field_class = field.__class__.__name__
raise TypeError('Can\'t use messages in path. Subfield %r was '
'included but is a %s.' % (parameter, field_class))
return
segment = segment_list[segment_index]
parameter += '.' + segment
try:
field = field.type.field_by_name(segment)
except (AttributeError, KeyError):
raise TypeError('Subfield %r from path does not exist.' % (parameter,))
self.__validate_simple_subfield(parameter, field, segment_list,
segment_index=segment_index + 1)
def __validate_path_parameters(self, field, path_parameters):
"""Verifies that all path parameters correspond to an existing subfield.
Args:
field: An instance of a subclass of messages.Field. Should be the root
level property name in each path parameter in path_parameters. For
example, if the field is called 'foo', then each path parameter should
begin with 'foo.'.
path_parameters: A list of Strings representing URI parameter variables.
Raises:
TypeError: If one of the path parameters does not start with field.name.
"""
for param in path_parameters:
segment_list = param.split('.')
if segment_list[0] != field.name:
raise TypeError('Subfield %r can\'t come from field %r.'
% (param, field.name))
self.__validate_simple_subfield(field.name, field, segment_list[1:])
def __parameter_default(self, field):
"""Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
"""
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
else:
return field.default
def __parameter_enum(self, param):
"""Returns enum descriptor of a parameter if it is an enum.
An enum descriptor is a list of keys.
Args:
param: A simple field.
Returns:
The enum descriptor for the field, if it's an enum descriptor, else
returns None.
"""
if isinstance(param, messages.EnumField):
return [enum_entry[0] for enum_entry in sorted(
param.type.to_dict().items(), key=lambda v: v[1])]
def __body_parameter_descriptor(self, method_id):
return {
'name': 'body',
'in': 'body',
'schema': {
'$ref': '#/definitions/{0}'.format(
self.__request_schema[method_id])
}
}
def __non_body_parameter_descriptor(self, param):
"""Creates descriptor for a parameter.
Args:
param: The parameter to be described.
Returns:
Dictionary containing a descriptor for the parameter.
"""
descriptor = {}
descriptor['name'] = param.name
param_type, param_format = self.__field_to_parameter_type_and_format(param)
# Required
if param.required:
descriptor['required'] = True
# Type
descriptor['type'] = param_type
# Format (optional)
if param_format:
descriptor['format'] = param_format
# Default
default = self.__parameter_default(param)
if default is not None:
descriptor['default'] = default
# Repeated
if param.repeated:
descriptor['repeated'] = True
# Enum
enum_descriptor = self.__parameter_enum(param)
if enum_descriptor is not None:
descriptor['enum'] = enum_descriptor
return descriptor
def __path_parameter_descriptor(self, param):
descriptor = self.__non_body_parameter_descriptor(param)
descriptor['required'] = True
descriptor['in'] = 'path'
return descriptor
def __query_parameter_descriptor(self, param):
descriptor = self.__non_body_parameter_descriptor(param)
descriptor['in'] = 'query'
# If this is a repeated field, convert it to the collectionFormat: multi
# style.
if param.repeated:
descriptor['collectionFormat'] = 'multi'
descriptor['items'] = {
'type': descriptor['type']
}
descriptor['type'] = 'array'
descriptor.pop('repeated', None)
return descriptor
def __add_parameter(self, param, path_parameters, params):
"""Adds all parameters in a field to a method parameters descriptor.
Simple fields will only have one parameter, but a message field 'x' that
corresponds to a message class with fields 'y' and 'z' will result in
parameters 'x.y' and 'x.z', for example. The mapping from field to
parameters is mostly handled by __field_to_subfields.
Args:
param: Parameter to be added to the descriptor.
path_parameters: A list of parameters matched from a path for this field.
For example for the hypothetical 'x' from above if the path was
'/a/{x.z}/b/{other}' then this list would contain only the element
'x.z' since 'other' does not match to this field.
params: List of parameters. Each parameter in the field.
"""
# If this is a simple field, just build the descriptor and append it.
# Otherwise, build a schema and assign it to this descriptor
if not isinstance(param, messages.MessageField):
if param.name in path_parameters:
descriptor = self.__path_parameter_descriptor(param)
else:
descriptor = self.__query_parameter_descriptor(param)
params.append(descriptor)
else:
# If a subfield of a MessageField is found in the path, build a descriptor
# for the path parameter.
for subfield_list in self.__field_to_subfields(param):
qualified_name = '.'.join(subfield.name for subfield in subfield_list)
if qualified_name in path_parameters:
descriptor = self.__path_parameter_descriptor(subfield_list[-1])
descriptor['required'] = True
params.append(descriptor)
def __params_descriptor_without_container(self, message_type,
request_kind, method_id, path):
"""Describe parameters of a method which does not use a ResourceContainer.
Makes sure that the path parameters are included in the message definition
and adds any required fields and URL query parameters.
This method is to preserve backwards compatibility and will be removed in
a future release.
Args:
message_type: messages.Message class, Message with parameters to describe.
request_kind: The type of request being made.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
path: string, HTTP path to method.
Returns:
A list of dicts: Descriptors of the parameters
"""
params = []
path_parameter_dict = self.__get_path_parameters(path)
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__validate_path_parameters(field, matched_path_parameters)
if matched_path_parameters or request_kind == self.__NO_BODY:
self.__add_parameter(field, matched_path_parameters, params)
# If the request has a body, add the body parameter
if (message_type != message_types.VoidMessage() and
request_kind == self.__HAS_BODY):
params.append(self.__body_parameter_descriptor(method_id))
return params
def __params_descriptor(self, message_type, request_kind, path, method_id):
"""Describe the parameters of a method.
If the message_type is not a ResourceContainer, will fall back to
__params_descriptor_without_container (which will eventually be deprecated).
If the message type is a ResourceContainer, then all path/query parameters
will come from the ResourceContainer. This method will also make sure all
path parameters are covered by the message fields.
Args:
message_type: messages.Message or ResourceContainer class, Message with
parameters to describe.
request_kind: The type of request being made.
path: string, HTTP path to method.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
Returns:
A tuple (dict, list of string): Descriptor of the parameters, Order of the
parameters.
"""
path_parameter_dict = self.__get_path_parameters(path)
if not isinstance(message_type, resource_container.ResourceContainer):
if path_parameter_dict:
_logger.warning('Method %s specifies path parameters but you are not '
'using a ResourceContainer; instead, you are using %r. '
'This will fail in future releases; please switch to '
'using ResourceContainer as soon as possible.',
method_id, type(message_type))
return self.__params_descriptor_without_container(
message_type, request_kind, method_id, path)
# From here, we can assume message_type is a ResourceContainer.
params = []
# Process body parameter, if any
if message_type.body_message_class != message_types.VoidMessage:
params.append(self.__body_parameter_descriptor(method_id))
# Process path/querystring parameters
params_message_type = message_type.parameters_message_class()
# Make sure all path parameters are covered.
for field_name, matched_path_parameters in path_parameter_dict.iteritems():
field = params_message_type.field_by_name(field_name)
self.__validate_path_parameters(field, matched_path_parameters)
# Add all fields, sort by field.number since we have parameterOrder.
for field in sorted(params_message_type.all_fields(),
key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__add_parameter(field, matched_path_parameters, params)
return params
def __request_message_descriptor(self, request_kind, message_type, method_id,
path):
"""Describes the parameters and body of the request.
Args:
request_kind: The type of request being made.
message_type: messages.Message or ResourceContainer class. The message to
describe.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
path: string, HTTP path to method.
Returns:
Dictionary describing the request.
Raises:
ValueError: if the method path and request required fields do not match
"""
if isinstance(message_type, resource_container.ResourceContainer):
base_message_type = message_type.body_message_class()
else:
base_message_type = message_type
if (request_kind != self.__NO_BODY and
base_message_type != message_types.VoidMessage()):
self.__request_schema[method_id] = self.__parser.add_message(
base_message_type.__class__)
params = self.__params_descriptor(message_type, request_kind, path,
method_id)
return params
def __definitions_descriptor(self):
"""Describes the definitions section of the OpenAPI spec.
Returns:
Dictionary describing the definitions of the spec.
"""
# Filter out any keys that aren't 'properties' or 'type'
result = {}
for def_key, def_value in self.__parser.schemas().iteritems():
if 'properties' in def_value or 'type' in def_value:
key_result = {}
required_keys = set()
if 'type' in def_value:
key_result['type'] = def_value['type']
if 'properties' in def_value:
for prop_key, prop_value in def_value['properties'].items():
if isinstance(prop_value, dict) and 'required' in prop_value:
required_keys.add(prop_key)
del prop_value['required']
key_result['properties'] = def_value['properties']
# Add in the required fields, if any
if required_keys:
key_result['required'] = sorted(required_keys)
result[def_key] = key_result
# Add 'type': 'object' to all object properties
# Also, recursively add relative path to all $ref values
for def_value in result.itervalues():
for prop_value in def_value.itervalues():
if isinstance(prop_value, dict):
if '$ref' in prop_value:
prop_value['type'] = 'object'
self._add_def_paths(prop_value)
return result
def __response_message_descriptor(self, message_type, method_id):
"""Describes the response.
Args:
message_type: messages.Message class, The message to describe.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
Returns:
Dictionary describing the response.
"""
# Skeleton response descriptor, common to all response objects
descriptor = {'200': {'description': 'A successful response'}}
if message_type != message_types.VoidMessage():
self.__parser.add_message(message_type.__class__)
self.__response_schema[method_id] = self.__parser.ref_for_message_type(
message_type.__class__)
descriptor['200']['schema'] = {'$ref': '#/definitions/{0}'.format(
self.__response_schema[method_id])}
return dict(descriptor)
def __x_google_quota_descriptor(self, metric_costs):
"""Describes the metric costs for a call.
Args:
metric_costs: Dict of metric definitions to the integer cost value against
that metric.
Returns:
A dict descriptor describing the Quota limits for the endpoint.
"""
return {
'metricCosts': {
metric: cost for (metric, cost) in metric_costs.items()
}
} if metric_costs else None
def __x_google_quota_definitions_descriptor(self, limit_definitions):
"""Describes the quota limit definitions for an API.
Args:
limit_definitions: List of endpoints.LimitDefinition tuples
Returns:
A dict descriptor of the API's quota limit definitions.
"""
if not limit_definitions:
return None
definitions_list = [{
'name': ld.metric_name,
'metric': ld.metric_name,
'unit': '1/min/{project}',
'values': {'STANDARD': ld.default_limit},
'displayName': ld.display_name,
} for ld in limit_definitions]
metrics = [{
'name': ld.metric_name,
'valueType': 'INT64',
'metricKind': 'GAUGE',
} for ld in limit_definitions]
return {
'quota': {'limits': definitions_list},
'metrics': metrics,
}
def __method_descriptor(self, service, method_info, operation_id,
protorpc_method_info, security_definitions):
"""Describes a method.
Args:
service: endpoints.Service, Implementation of the API as a service.
method_info: _MethodInfo, Configuration for the method.
operation_id: string, Operation ID of the method
protorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC
description of the method.
security_definitions: list of dicts, security definitions for the API.
Returns:
Dictionary describing the method.
"""
descriptor = {}
request_message_type = (resource_container.ResourceContainer.
get_request_message(protorpc_method_info.remote))
request_kind = self.__get_request_kind(method_info)
remote_method = protorpc_method_info.remote
path = method_info.get_path(service.api_info)
descriptor['parameters'] = self.__request_message_descriptor(
request_kind, request_message_type,
method_info.method_id(service.api_info),
path)
descriptor['responses'] = self.__response_message_descriptor(
remote_method.response_type(), method_info.method_id(service.api_info))
descriptor['operationId'] = operation_id
# Insert the auth audiences, if any
api_key_required = method_info.is_api_key_required(service.api_info)
if method_info.audiences is not None:
descriptor['security'] = self.__security_descriptor(
method_info.audiences, security_definitions,
api_key_required=api_key_required)
elif service.api_info.audiences is not None or api_key_required:
descriptor['security'] = self.__security_descriptor(
service.api_info.audiences, security_definitions,
api_key_required=api_key_required)
# Insert the metric costs, if any
if method_info.metric_costs:
descriptor['x-google-quota'] = self.__x_google_quota_descriptor(
method_info.metric_costs)
return descriptor
def __security_descriptor(self, audiences, security_definitions,
api_key_required=False):
if not audiences:
if not api_key_required:
# no security
return []
# api key only
return [{_API_KEY: []}]
if isinstance(audiences, (tuple, list)):
audiences = {_DEFAULT_SECURITY_DEFINITION: audiences}
results = []
for issuer, issuer_audiences in audiences.items():
result_dict = {}
if issuer not in security_definitions:
raise TypeError('Missing issuer {}'.format(issuer))
audience_string = ','.join(sorted(issuer_audiences))
audience_hash = hashfunc(audience_string)
full_definition_key = '-'.join([issuer, audience_hash])
result_dict[full_definition_key] = []
if api_key_required:
result_dict[_API_KEY] = []
if full_definition_key not in security_definitions:
new_definition = dict(security_definitions[issuer])
new_definition['x-google-audiences'] = audience_string
security_definitions[full_definition_key] = new_definition
results.append(result_dict)
return results
def __security_definitions_descriptor(self, issuers):
"""Create a descriptor for the security definitions.
Args:
issuers: dict, mapping issuer names to Issuer tuples
Returns:
The dict representing the security definitions descriptor.
"""
if not issuers:
result = {
_DEFAULT_SECURITY_DEFINITION: {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': 'https://accounts.google.com',
'x-google-jwks_uri': 'https://www.googleapis.com/oauth2/v3/certs',
}
}
return result
result = {}
for issuer_key, issuer_value in issuers.items():
result[issuer_key] = {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': issuer_value.issuer,
}
# If jwks_uri is omitted, the auth library will use OpenID discovery
# to find it. Otherwise, include it in the descriptor explicitly.
if issuer_value.jwks_uri:
result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri
return result
def __get_merged_api_info(self, services):
"""Builds a description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
Returns:
The _ApiInfo object to use for the API that the given services implement.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()).
"""
merged_api_info = services[0].api_info
# Verify that, if there are multiple classes here, they're allowed to
# implement the same API.
for service in services[1:]:
if not merged_api_info.is_same_api(service.api_info):
raise api_exceptions.ApiConfigurationError(
_MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name,
service.api_info.api_version))
return merged_api_info
def __api_openapi_descriptor(self, services, hostname=None, x_google_api_name=False):
"""Builds an OpenAPI description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary that can be deserialized into JSON and stored as an API
description document in OpenAPI format.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()), or a repeated method
signature.
"""
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info,
hostname=hostname,
x_google_api_name=x_google_api_name)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['info']['description'] = description
security_definitions = self.__security_definitions_descriptor(
merged_api_info.issuers)
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
for service in services:
remote_methods = service.all_remote_methods()
for protorpc_meth_name in sorted(remote_methods.iterkeys()):
protorpc_meth_info = remote_methods[protorpc_meth_name]
method_info = getattr(protorpc_meth_info, 'method_info', None)
# Skip methods that are not decorated with @method
if method_info is None:
continue
method_id = method_info.method_id(service.api_info)
is_api_key_required = method_info.is_api_key_required(service.api_info)
path = '/{0}/{1}/{2}'.format(merged_api_info.name,
merged_api_info.path_version,
method_info.get_path(service.api_info))
verb = method_info.http_method.lower()
if path not in method_map:
method_map[path] = {}
# If an API key is required and the security definitions don't already
# have the apiKey issuer, add the appropriate notation now
if is_api_key_required and _API_KEY not in security_definitions:
security_definitions[_API_KEY] = {
'type': 'apiKey',
'name': _API_KEY_PARAM,
'in': 'query'
}
# Derive an OperationId from the method name data
operation_id = self._construct_operation_id(
service.__name__, protorpc_meth_name)
method_map[path][verb] = self.__method_descriptor(
service, method_info, operation_id, protorpc_meth_info,
security_definitions)
# Make sure the same method name isn't repeated.
if method_id in method_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'Method %s used multiple times, in classes %s and %s' %
(method_id, method_collision_tracker[method_id],
service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
# Make sure the same HTTP method & path aren't repeated.
rest_identifier = (method_info.http_method,
method_info.get_path(service.api_info))
if rest_identifier in rest_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'%s path "%s" used multiple times, in classes %s and %s' %
(method_info.http_method, method_info.get_path(service.api_info),
rest_collision_tracker[rest_identifier],
service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
if method_map:
descriptor['paths'] = method_map
# Add request and/or response definitions, if any
definitions = self.__definitions_descriptor()
if definitions:
descriptor['definitions'] = definitions
descriptor['securityDefinitions'] = security_definitions
# Add quota limit metric definitions, if any
limit_definitions = self.__x_google_quota_definitions_descriptor(
merged_api_info.limit_definitions)
if limit_definitions:
descriptor['x-google-management'] = limit_definitions
return descriptor
def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):
"""Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
"""
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path
if base_path != '/':
base_path = base_path.rstrip('/')
defaults = {
'swagger': '2.0',
'info': {
'version': api_info.api_version,
'title': api_info.name
},
'host': hostname,
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': [protocol],
'basePath': base_path,
}
if x_google_api_name:
defaults['x-google-api-name'] = _validate_api_name(api_info.name)
return defaults
def get_openapi_dict(self, services, hostname=None, x_google_api_name=False):
"""JSON dict description of a protorpc.remote.Service in OpenAPI format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
dict, The OpenAPI descriptor document as a JSON dict.
"""
if not isinstance(services, (tuple, list)):
services = [services]
# The type of a class that inherits from remote.Service is actually
# remote._ServiceClass, thanks to metaclass strangeness.
# pylint: disable=protected-access
util.check_list_type(services, remote._ServiceClass, 'services',
allow_none=False)
return self.__api_openapi_descriptor(services, hostname=hostname, x_google_api_name=x_google_api_name)
def pretty_print_config_to_json(self, services, hostname=None, x_google_api_name=False):
"""JSON string description of a protorpc.remote.Service in OpenAPI format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The OpenAPI descriptor document as a JSON string.
"""
descriptor = self.get_openapi_dict(services, hostname, x_google_api_name=x_google_api_name)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': '))
def hashfunc(string):
return hashlib.md5(string).hexdigest()[:8]
| |
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
from urllib.parse import urlparse
import charms_openstack.adapters
import charms_openstack.charm
import charmhelpers.core as ch_core
import charmhelpers.fetch as fetch
import charmhelpers.core.unitdata as unitdata
import charmhelpers.contrib.openstack.utils as os_utils
import charms.reactive as reactive
TV_MOUNTS = "/var/triliovault-mounts"
# Location of the certificate file to use when talking to S3 endpoint.
S3_SSL_CERT_FILE = '/usr/share/ca-certificates/charm-s3.cert'
# Used to store the discovered release version for caching between invocations
TRILIO_RELEASE_KEY = 'charmers.trilio-release-version'
# _trilio_releases{} is a dictionary of release -> class that is instantiated
# according to the release that is being requested. i.e. a charm can
# handle more than one release. The BaseOpenStackCharm() derived class sets the
# `release` variable to indicate which OpenStack release that the charm
# supports # and `trilio_release` to indicate which Trilio release the charm
# supports. # Any subsequent releases that need a different/specialised charm
# uses the # `release` and `trilio_release` class properties to indicate that
# it handles those releases onwards.
_trilio_releases = {}
@charms_openstack.adapters.config_property
def trilio_properties(cls):
"""Trilio properties additions for config adapter.
:param cls: Configuration Adapter class
:type cls: charms_openstack.adapters.DefaultConfigurationAdapter
"""
cur_ver = cls.charm_instance.release_pkg_version()
comp = fetch.apt_pkg.version_compare(cur_ver, '4.1')
if comp >= 0:
return {
'db_type': 'dedicated',
'transport_type': 'dmapi'}
else:
return {
'db_type': 'legacy',
'transport_type': 'legacy'}
@charms_openstack.adapters.config_property
def trilio_s3_cert_config(cls):
"""Trilio S3 certificate config
:param cls: Configuration Adapter class
:type cls: charms_openstack.adapters.DefaultConfigurationAdapter
"""
s3_cert_config = {}
config = ch_core.hookenv.config('tv-s3-ssl-cert')
if config:
s3_cert_config = {
'cert_file': S3_SSL_CERT_FILE,
'cert_data': base64.b64decode(config).decode('utf-8')}
return s3_cert_config
class AptPkgVersion():
"""Allow package version to be compared."""
def __init__(self, version):
self.version = version
def __lt__(self, other):
return fetch.apt_pkg.version_compare(self.version, other.version) == -1
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return fetch.apt_pkg.version_compare(self.version, other.version) == 1
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __eq__(self, other):
return fetch.apt_pkg.version_compare(self.version, other.version) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.version
def __hash__(self):
return hash(repr(self))
class NFSShareNotMountedException(Exception):
"""Signal that the trilio nfs share is not mount"""
pass
class UnitNotLeaderException(Exception):
"""Signal that the unit is not the application leader"""
pass
class GhostShareAlreadyMountedException(Exception):
"""Signal that a ghost share is already mounted"""
pass
class MismatchedConfigurationException(Exception):
"""Signal that nfs-shares and ghost-shares are mismatched"""
pass
def _configure_triliovault_source():
"""Configure triliovault specific package sources in addition to
any general openstack package sources (via openstack-origin)
"""
with open(
"/etc/apt/sources.list.d/trilio-gemfury-sources.list", "w"
) as tsources:
tsources.write(ch_core.hookenv.config("triliovault-pkg-source"))
def _install_triliovault(charm):
"""Install packages dealing with Trilio nuances for upgrades as well
Set the 'upgrade.triliovault' flag to ensure that any triliovault
packages are upgraded.
"""
packages = charm.all_packages
if not reactive.is_flag_set("upgrade.triliovault"):
packages = fetch.filter_installed_packages(
charm.all_packages)
if packages:
ch_core.hookenv.status_set('maintenance',
'Installing/upgrading packages')
fetch.apt_install(packages, fatal=True)
# AJK: we set this as charms can use it to detect installed state
charm.set_state('{}-installed'.format(charm.name))
charm.update_api_ports()
# NOTE(jamespage): clear upgrade flag if set
if reactive.is_flag_set("upgrade.triliovault"):
reactive.clear_flag('upgrade.triliovault')
def get_trilio_codename_install_source(trilio_source):
"""Derive codename from trilio source string.
Try and derive a trilio version from a deb string like:
'deb [trusted=yes] https://apt.fury.io/triliodata-4-0/ /'
:param trilio_source: Trilio source
:type trilio_source: str
:returns: Trilio version
:rtype: str
:raises: AssertionError
"""
deb_url = trilio_source.split()[-2]
code = re.findall(r'-(\d*-\d*)', urlparse(deb_url).path)
assert len(code) == 1, "Cannot derive release from {}".format(deb_url)
new_os_rel = code[0].replace('-', '.')
return new_os_rel
def make_trilio_get_charm_instance_handler():
"""This handler sets the get_charm_instance function.
"""
@charms_openstack.charm.core.register_get_charm_instance
def get_trilio_charm_instance(release=None, package_type='deb', *args,
**kwargs):
"""Get an instance of the charm based on the release (or use the
default if release is None).
Note that it passes args and kwargs to the class __init__() method.
:param release: String representing release wanted. Should be of the
form '<openstack_release>_<trilio_release>'
eg 'queens_4.0'
:type release: str
:param package_type: The package type required
:type package_type: str
:returns: Charm class
:rtype: BaseOpenStackCharm() derived class according to cls.releases
"""
cls = None
known_os_releases = sorted(_trilio_releases.keys())
if release is None:
# If release is None then select the class(es) which supports the
# most recent OpenStack release, from within this set select the
# class that supports the most recent Trilio release.
os_release = known_os_releases[-1]
known_trilio_releases = sorted(_trilio_releases[os_release].keys())
trilio_release = known_trilio_releases[-1]
cls = _trilio_releases[os_release][trilio_release][package_type]
else:
os_release, trilio_release = release.split('_')
trilio_release = AptPkgVersion(trilio_release)
if os_release not in os_utils.OPENSTACK_RELEASES:
raise RuntimeError(
"Release {} is not a known OpenStack release?".format(
os_release))
os_release_index = os_utils.OPENSTACK_RELEASES.index(os_release)
if (os_release_index <
os_utils.OPENSTACK_RELEASES.index(known_os_releases[0])):
raise RuntimeError(
"Release {} is not supported by this charm. Earliest "
"support is {} release".format(
os_release,
known_os_releases[0]))
else:
known_trilio_releases = []
# Search through the dictionary of registered charm classes
# looking for the most recent group which can support
# `os_release`
for known_os_release in reversed(known_os_releases):
_idx = os_utils.OPENSTACK_RELEASES.index(known_os_release)
if os_release_index >= _idx:
trilio_classes = _trilio_releases[known_os_release]
known_trilio_releases = sorted(trilio_classes.keys())
break
# Search through the dictionary of registered charm classes
# that support `known_os_release` onwards and look for the
# class # which supports the most recent trilio release which
# is <= `trilio_release`
for known_trilio_release in reversed(known_trilio_releases):
if known_trilio_release <= trilio_release:
cls = trilio_classes[known_trilio_release][
package_type]
# Found a class so exit loop
break
if cls is None:
raise RuntimeError("Release {} is not supported".format(release))
return cls(release=os_release, *args, **kwargs)
def make_trilio_handlers():
"""This handler sets the trilio release selector get_charm_instance funcs.
"""
make_trilio_get_charm_instance_handler()
make_trilio_select_release_handler()
def make_trilio_select_release_handler():
"""This handler sets the release selector function.
"""
@charms_openstack.charm.core.register_os_release_selector
def select_trilio_release():
"""Determine the OpenStack and Trilio release
Determine the OpenStack release based on the `singleton.os_release_pkg`
that is installed. If it is not installed look for and exanine other
semantic versioned packages. If both those tactics fail fall back to
checking the charm `openstack-origin` option.
Determine the Trilio release based on the `singleton.version_package`
that is installed. If it is not installed fall back to checking the
charm `triliovault-pkg-source` option.
Note that this function caches the release after the first install so
that it doesn't need to keep going and getting it from the package
information.
"""
singleton = None
# Search for target OpenStack Release
os_release_version = unitdata.kv().get(
charms_openstack.charm.core.OPENSTACK_RELEASE_KEY,
None)
if os_release_version is None:
try:
# First make an attempt of determining release from a charm
# instance defined package codename dictionary.
singleton = charms_openstack.charm.core.get_charm_instance()
if singleton.release_pkg is None:
raise RuntimeError("release_pkg is not set")
os_release_version = singleton.get_os_codename_package(
singleton.os_release_pkg, singleton.package_codenames,
apt_cache_sufficient=(not singleton.source_config_key))
if os_release_version is None:
# Surprisingly get_os_codename_package called with
# ``Fatal=True`` does not raise an error when the charm
# class ``package_codenames`` map does not contain package
# or major version. We'll handle it here instead of
# changing the API of the method.
raise ValueError
except (AttributeError, ValueError):
try:
pkgs = os_utils.get_installed_semantic_versioned_packages()
pkg = pkgs[0]
except IndexError:
# A non-existent package will cause os_release to try other
# tactics for deriving the release.
pkg = 'dummy-package'
os_release_version = os_utils.os_release(
pkg, source_key=singleton.source_config_key)
unitdata.kv().set(
charms_openstack.charm.core.OPENSTACK_RELEASE_KEY,
os_release_version)
unitdata.kv().flush()
# Search for target Trilio Release
trilio_release_version = unitdata.kv().get(TRILIO_RELEASE_KEY, None)
if trilio_release_version is None:
if not singleton:
singleton = charms_openstack.charm.core.get_charm_instance()
if singleton.version_package is None:
raise RuntimeError("version_package is not set")
try:
trilio_release_version = singleton.get_package_version(
singleton.version_package)
except (AttributeError, ValueError):
trilio_release_version = get_trilio_codename_install_source(
singleton.trilio_source)
unitdata.kv().set(TRILIO_RELEASE_KEY, trilio_release_version)
unitdata.kv().flush()
return '{}_{}'.format(os_release_version, trilio_release_version)
class BaseTrilioCharmMeta(charms_openstack.charm.core.BaseOpenStackCharmMeta):
"""Metaclass to handle registering charm classes by their supported
OpenStack release, Trilio release and package typea
_trilio_releases has the form::
{
'Openstack Code Name': {
'Trilio Package Veersion': {
'Package Type': <charm class>}},
"""
def __init__(cls, name, mro, members):
"""Receive the BaseOpenStackCharm() (derived) class and store the
release that it works against. Each class defines a 'release' which
corresponds to the Openstack release that it handles. The class should
also specify 'trilio_release' which defines the Trilio releases it can
handle.
:param name: string for class name.
:param mro: tuple of base classes.
:param members: dictionary of name to class attribute (f, p, a, etc.)
"""
# Do not attempt to calculate the release for an abstract class
if members.get('abstract_class', False):
return
if all(key in members.keys() for key in ['release', 'trilio_release']):
package_type = members.get('package_type', 'deb')
if package_type not in ('deb', 'snap'):
raise RuntimeError(
"Package type {} is not a known type"
.format(package_type))
release = members['release']
trilio_release = AptPkgVersion(members['trilio_release'])
if release not in os_utils.OPENSTACK_RELEASES:
raise RuntimeError(
"Release {} is not a known OpenStack release"
.format(release))
try:
_pre = _trilio_releases[release][trilio_release][package_type]
except KeyError:
# All good this comination has not been registered yet.
pass
else:
raise RuntimeError(
"Release {} + {} defined more than once in classes {} and "
"{} (at least)"
.format(release,
trilio_release,
_pre.__name__,
name))
# store the class against the release.
if release not in _trilio_releases:
_trilio_releases[release] = {}
if trilio_release not in _trilio_releases[release]:
_trilio_releases[release][trilio_release] = {}
_trilio_releases[release][trilio_release][package_type] = cls
else:
raise RuntimeError(
"class '{}' must define both the release it supports using "
"the 'release' class property and the trilio release it "
"supports using the 'trilio_release' class property.".format(
name))
class TrilioVaultCharmMixin():
"""The TrilioVaultCharm class provides common specialisation of certain
functions for the Trilio charm set and is designed for use alongside
other base charms.openstack classes
"""
abstract_class = True
def __init__(self, **kwargs):
try:
del kwargs['trilio_release']
except KeyError:
pass
super().__init__(**kwargs)
def configure_source(self):
"""Configure triliovault specific package sources in addition to
any general openstack package sources (via openstack-origin)
"""
_configure_triliovault_source()
super().configure_source()
def install(self):
"""Install packages dealing with Trilio nuances for upgrades as well
"""
self.configure_source()
_install_triliovault(self)
def series_upgrade_complete(self):
"""Re-configure sources post series upgrade"""
super().series_upgrade_complete()
self.configure_source()
@property
def trilio_source(self):
"""Trilio source config option"""
return self.config.get("triliovault-pkg-source")
def do_trilio_pkg_upgrade(self):
"""Upgrade Trilio packages
"""
new_os_rel = get_trilio_codename_install_source(
self.trilio_source)
ch_core.hookenv.log('Performing Trilio upgrade to %s.' % (new_os_rel))
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
fetch.apt_update()
fetch.apt_install(
packages=self.all_packages,
options=dpkg_opts,
fatal=True)
self.remove_obsolete_packages()
def do_trilio_upgrade_db_migration(self):
"""Run Trilio DB sync
Trilio charms sync_cmd refers to a trilio db sync.
"""
super().do_openstack_upgrade_db_migration()
def run_trilio_upgrade(self, interfaces_list=None):
"""
:param interfaces_list: List of instances of interface classes
:returns: None
"""
ch_core.hookenv.status_set('maintenance', 'Running openstack upgrade')
cur_os_release = self.get_os_codename_package(
self.os_release_pkg,
self.package_codenames)
new_trilio_release = get_trilio_codename_install_source(
self.trilio_source)
new_release = '{}_{}'.format(cur_os_release, new_trilio_release)
unitdata.kv().set(TRILIO_RELEASE_KEY, new_trilio_release)
_configure_triliovault_source()
target_charm = charms_openstack.charm.core.get_charm_instance(
new_release)
target_charm.do_trilio_pkg_upgrade()
target_charm.render_with_interfaces(interfaces_list)
target_charm.do_trilio_upgrade_db_migration()
def trilio_upgrade_available(self, package=None):
"""Check if an OpenStack upgrade is available
:param package: str Package name to use to check upgrade availability
:returns: bool
"""
cur_vers = self.get_package_version(package)
avail_vers = get_trilio_codename_install_source(
self.trilio_source)
return fetch.apt_pkg.version_compare(avail_vers, cur_vers) == 1
def upgrade_if_available(self, interfaces_list):
if self.openstack_upgrade_available(self.os_release_pkg):
if self.config.get('action-managed-upgrade', False):
ch_core.hookenv.log('Not performing OpenStack upgrade as '
'action-managed-upgrade is enabled')
else:
self.run_upgrade(interfaces_list=interfaces_list)
if self.trilio_upgrade_available(
package=self.trilio_version_package()):
if self.config.get('action-managed-upgrade', False):
ch_core.hookenv.log('Not performing Trilio upgrade as '
'action-managed-upgrade is enabled')
else:
self.run_trilio_upgrade(interfaces_list=interfaces_list)
@classmethod
def trilio_version_package(cls):
raise NotImplementedError
@property
def version_package(self):
return self.trilio_version_package()
@property
def release_pkg(self):
return self.trilio_version_package()
@classmethod
def release_pkg_version(cls):
return cls.get_package_version(cls.trilio_version_package())
class TrilioVaultCharm(TrilioVaultCharmMixin,
charms_openstack.charm.HAOpenStackCharm,
metaclass=BaseTrilioCharmMeta):
abstract_class = True
class TrilioVaultSubordinateCharm(TrilioVaultCharmMixin,
charms_openstack.charm.OpenStackCharm,
metaclass=BaseTrilioCharmMeta):
abstract_class = True
def configure_source(self):
"""Configure TrilioVault specific package sources
"""
_configure_triliovault_source()
fetch.apt_update(fatal=True)
class TrilioVaultCharmGhostAction(object):
"""Shared 'ghost share' action for TrilioVault charms
It is designed as a mixin, and is separated out so that it is easier to
maintain.
i.e.
class TrilioWLMCharm(TrilioVaultCharm,
TrilioVaultCharmGhostAction):
... stuff ...
"""
def _encode_endpoint(self, backup_endpoint):
"""base64 encode an backup endpoint for cross mounting support"""
return base64.b64encode(backup_endpoint.encode()).decode()
def ghost_nfs_share(self, ghost_shares):
"""Bind mount local NFS shares to remote NFS paths
:param ghost_shares: Comma separated NFS shares URL to ghost
:type ghost_shares: str
"""
ghost_shares = ghost_shares.split(',')
nfs_shares = ch_core.hookenv.config("nfs-shares").split(',')
try:
share_mappings = [
(nfs_shares[i], ghost_shares[i])
for i in range(0, len(nfs_shares))
]
except IndexError:
raise MismatchedConfigurationException(
"ghost-shares and nfs-shares are different lengths"
)
for local_share, ghost_share in share_mappings:
self._ghost_nfs_share(local_share, ghost_share)
def _ghost_nfs_share(self, local_share, ghost_share):
"""Bind mount a local unit NFS share to another sites location
:param local_share: Local NFS share URL
:type local_share: str
:param ghost_share: NFS share URL to ghost
:type ghost_share: str
"""
nfs_share_path = os.path.join(
TV_MOUNTS,
self._encode_endpoint(local_share)
)
ghost_share_path = os.path.join(
TV_MOUNTS, self._encode_endpoint(ghost_share)
)
current_mounts = [mount[0] for mount in ch_core.host.mounts()]
if nfs_share_path not in current_mounts:
# Trilio has not mounted the NFS share so return
raise NFSShareNotMountedException(
"nfs-share ({}) not mounted".format(
local_share
)
)
if ghost_share_path in current_mounts:
# bind mount already setup so return
raise GhostShareAlreadyMountedException(
"ghost mountpoint ({}) already bound".format(ghost_share_path)
)
if not os.path.exists(ghost_share_path):
os.mkdir(ghost_share_path)
ch_core.host.mount(nfs_share_path, ghost_share_path, options="bind")
| |
"""fips verb to build the oryol samples webpage"""
import os
import yaml
import shutil
import subprocess
import glob
from string import Template
from mod import log, util, project, emscripten, android
from tools import texexport
# what to build
BuildEmscripten = True
BuildWasm = True
ExportAssets = True
ExtensionSamples = True
# webpage template arguments
GitHubSamplesURL = 'https://github.com/floooh/oryol/tree/master/code/Samples/'
DocTitle = 'Oryol Core Samples'
Title = 'Oryol'
Subtitle = 'core samples'
# Separator = 'rainbow-separator'
# GameSeparator = 'game-rainbow-separator'
# BackgroundColor = '#19A3FF' # this is the original bright blue
Separator = 'simple-separator'
GameSeparator = 'simple-separator'
BackgroundColor = '#42A5F5'
# build configuration
EmscConfig = 'webgl2-emsc-ninja-release'
WasmConfig = 'webgl2-wasm-ninja-release'
#-------------------------------------------------------------------------------
def deploy_webpage(fips_dir, proj_dir, webpage_dir) :
"""builds the final webpage under under fips-deploy/oryol-webpage"""
ws_dir = util.get_workspace_dir(fips_dir)
# load the websamples.yml file, should have been created during the last build
with open(webpage_dir + '/websamples.yml', 'r') as f :
samples = yaml.load(f.read())
# create directories
for platform in ['asmjs', 'wasm'] :
platform_dir = '{}/{}'.format(webpage_dir, platform)
if not os.path.isdir(platform_dir) :
os.makedirs(platform_dir)
# link to the Extension Samples
content = ''
if ExtensionSamples :
content = '<div class="thumb">\n'
content += ' <div class="thumb-title">To Extension Samples...</div>\n'
content += ' <div class="img-frame"><a href="http://floooh.github.com/oryol-samples/index.html"><img class="image" src="ext_samples.jpg"></img></a></div>\n'
content += '</div>\n'
# build the thumbnail gallery
for sample in samples :
if sample['name'] != '__end__' :
log.info('> adding thumbnail for {}'.format(sample['name']))
name = sample['name']
imgPath = sample['image']
types = sample['type']
desc = sample['desc']
head, tail = os.path.split(imgPath)
if tail == 'none' :
imgFileName = 'dummy.jpg'
else :
imgFileName = tail
content += '<div class="thumb">\n'
content += ' <div class="thumb-title">{}</div>\n'.format(name)
content += ' <div class="img-frame"><a href="asmjs/{}.html"><img class="image" src="{}" title="{}"></img></a></div>\n'.format(name,imgFileName,desc)
content += ' <div class="thumb-bar">\n'
content += ' <ul class="thumb-list">\n'
if BuildEmscripten and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="asmjs/{}.html">asm.js</a></li>\n'.format(name)
if BuildWasm and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="wasm/{}.html">wasm</a></li>\n'.format(name)
content += ' </ul>\n'
content += ' </div>\n'
content += '</div>\n'
# populate the html template, and write to the build directory
with open(proj_dir + '/web/index.html', 'r') as f :
templ = Template(f.read())
html = templ.safe_substitute(doctitle=DocTitle, title=Title, subtitle=Subtitle, samples=content, separator=Separator)
with open(webpage_dir + '/index.html', 'w') as f :
f.write(html)
# and the same with the CSS template
with open(proj_dir + '/web/style.css', 'r') as f :
templ = Template(f.read())
css = templ.safe_substitute(background=BackgroundColor)
with open(webpage_dir +'/style.css', 'w') as f :
f.write(css)
# copy other required files
for name in ['dummy.jpg', 'emsc.js', 'wasm.js', 'about.html', 'favicon.png', 'ext_samples.jpg'] :
log.info('> copy file: {}'.format(name))
shutil.copy(proj_dir + '/web/' + name, webpage_dir + '/' + name)
# generate emscripten HTML pages
if BuildEmscripten and emscripten.check_exists(fips_dir) :
emsc_deploy_dir = '{}/fips-deploy/oryol/{}'.format(ws_dir, EmscConfig)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate emscripten HTML page: {}'.format(name))
for ext in ['js', 'html.mem'] :
src_path = '{}/{}.{}'.format(emsc_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/asmjs/'.format(webpage_dir))
with open(proj_dir + '/web/emsc.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url, separator=GameSeparator)
with open('{}/asmjs/{}.html'.format(webpage_dir, name, name), 'w') as f :
f.write(html)
# generate WebAssembly HTML pages
if BuildWasm and emscripten.check_exists(fips_dir) :
wasm_deploy_dir = '{}/fips-deploy/oryol/{}'.format(ws_dir, WasmConfig)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate wasm HTML page: {}'.format(name))
for ext in ['js', 'wasm.mappedGlobals'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/'.format(webpage_dir))
for ext in ['html.mem', 'wasm'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/{}.{}.txt'.format(webpage_dir, name, ext))
with open(proj_dir + '/web/wasm.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url, separator=GameSeparator)
with open('{}/wasm/{}.html'.format(webpage_dir, name), 'w') as f :
f.write(html)
# copy the screenshots
for sample in samples :
if sample['name'] != '__end__' :
img_path = sample['image']
head, tail = os.path.split(img_path)
if tail != 'none' :
log.info('> copy screenshot: {}'.format(tail))
shutil.copy(img_path, webpage_dir + '/' + tail)
#-------------------------------------------------------------------------------
def export_assets(fips_dir, proj_dir, webpage_dir) :
tex_srcdir = proj_dir + '/data'
tex_dstdir = webpage_dir + '/data'
texexport.configure(proj_dir, tex_srcdir, tex_dstdir)
texexport.exportSampleTextures()
for ext in ['txt'] :
for dataFile in glob.glob(proj_dir + '/data/*.{}'.format(ext)) :
shutil.copy(dataFile, '{}/data/'.format(webpage_dir))
#-------------------------------------------------------------------------------
def build_deploy_webpage(fips_dir, proj_dir, rebuild) :
# if webpage dir exists, clear it first
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-webpage'.format(ws_dir)
if rebuild :
if os.path.isdir(webpage_dir) :
shutil.rmtree(webpage_dir)
if not os.path.isdir(webpage_dir) :
os.makedirs(webpage_dir)
# compile samples
if BuildEmscripten and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, EmscConfig)
project.build(fips_dir, proj_dir, EmscConfig)
if BuildWasm and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, WasmConfig)
project.build(fips_dir, proj_dir, WasmConfig)
# export sample assets
if ExportAssets :
export_assets(fips_dir, proj_dir, webpage_dir)
# deploy the webpage
deploy_webpage(fips_dir, proj_dir, webpage_dir)
log.colored(log.GREEN, 'Generated Samples web page under {}.'.format(webpage_dir))
#-------------------------------------------------------------------------------
def serve_webpage(fips_dir, proj_dir) :
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-webpage'.format(ws_dir)
p = util.get_host_platform()
if p == 'osx' :
try :
subprocess.call(
'open http://localhost:8000 ; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt :
pass
elif p == 'win':
try:
subprocess.call(
'cmd /c start http://localhost:8000 && python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
elif p == 'linux':
try:
subprocess.call(
'xdg-open http://localhost:8000; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
if len(args) > 0 :
if args[0] == 'build' :
build_deploy_webpage(fips_dir, proj_dir, False)
elif args[0] == 'rebuild' :
build_deploy_webpage(fips_dir, proj_dir, True)
elif args[0] == 'serve' :
serve_webpage(fips_dir, proj_dir)
else :
log.error("Invalid param '{}', expected 'build' or 'serve'".format(args[0]))
else :
log.error("Param 'build' or 'serve' expected")
#-------------------------------------------------------------------------------
def help() :
log.info(log.YELLOW +
'fips webpage build\n' +
'fips webpage rebuild\n' +
'fips webpage serve\n' +
log.DEF +
' build oryol samples webpage')
| |
"""
sqmpy.job.saga_helper
~~~~~
Provides ways to interact with saga classes
"""
import os
import pwd
import time
import base64
import hashlib
from threading import Thread
import saga
import flask
from flask_login import current_user
from sqmpy.job import helpers
from sqmpy.job.helpers import send_state_change_email
from sqmpy.job.constants import FileRelation, ScriptType, HPCBackend
from sqmpy.job.exceptions import JobManagerException
from sqmpy.job.models import StagingFile, Job
from sqmpy.job.callback import JobStateChangeCallback
from sqmpy.database import db
__author__ = 'Mehdi Sadeghi'
class SagaJobWrapper(object):
"""
To wrap, initialize and run a saga job.
"""
def __init__(self, job):
"""
Init
:param job: an instance of job model class
:return:
"""
self._job_id = job.id
self._job = job
# If job has resource_job_id, connect to it
if job.remote_job_id:
self._job_service = self.make_job_service(job.resource_endpoint)
self._saga_job = self._job_service.get_job(job.remote_job_id)
else:
# Creating the job service object i.e. the connection
job.resource_endpoint = \
get_resource_endpoint(job.resource.url, job.hpc_backend)
self._job_service = self.make_job_service(job.resource_endpoint)
def make_job_service(self, endpoint):
# Create ssh security context
ctx = None
session = None
if not flask.current_app.config.get('LOGIN_DISABLED') and\
flask.current_app.config.get('SSH_WITH_LOGIN_INFO'):
# Do not load default security contexts (user ssh keys)
session = saga.Session(False)
ctx = saga.Context('userpass')
ctx.user_id = current_user.username
ctx.user_pass =\
base64.b64decode(flask.session['password'].decode('utf-8'))
else:
session = saga.Session()
ctx = saga.Context('ssh')
# Explicitely add the only desired security context
session.add_context(ctx)
js = saga.job.Service(endpoint, session=session)
# TODO: Fix in upstream. Service does not populate adaptor's session.
js._adaptor._set_session(session)
return js
def _register_callbacks(self):
"""
Register callback functions for the saga job
:return:
"""
# This callback will locally store output files and new states in db
self._saga_job.add_callback(saga.STATE,
JobStateChangeCallback(self._job, self))
def get_job(self):
"""
Returns the inner job
:return:
"""
return self._job
def run(self):
"""
Run the job on remote resource
:return:
"""
# Set remote job working directory
remote_job_dir = \
get_job_endpoint(self._job.id, self._job_service.get_session())
# Make sure the working directory is empty
if remote_job_dir.list():
raise JobManagerException('Remote directory is not empty')
flask.current_app.logger.debug('Going to transfer files')
# transfer job files to remote directory
transfer_job_files(self._job.id, remote_job_dir,
self._job_service.get_session())
flask.current_app.logger.debug('File transfer done.')
# Create saga job description
jd = make_job_description(self._job, remote_job_dir)
# Create saga job
self._saga_job = self._job_service.create_job(jd)
# Register call backs. SAGA callbacks are not reliable and
# we don't use them unless they are fixed first. Instead,
# we use a monitoring thread. The flask.copy_current_request_context
# decorator is very important here, if not used, the other
# thread will not have access to the context data of the original
# one.
# self._register_callbacks()
# Run the job eventually
flask.current_app.logger.debug("...starting job[%s]..." % self._job.id)
self._saga_job.run()
# Store remote pid
self._job.remote_job_id = self._saga_job.get_id()
db.session.commit()
# Make sure to start monitoring after starting the job
#flask.current_app.logger.debug(
# 'creating monitoring thread and passing %s to it' %
# (flask.current_app._get_current_object()))
flask.current_app.monitor.send((self._job.id,
self._job_service))
flask.current_app.logger.debug(
"Remote Job ID : %s" % self._saga_job.id)
flask.current_app.logger.debug(
"Remote Job State : %s" % self._saga_job.state)
def cancel(self):
"""
Cancel the job
"""
self._saga_job.cancel()
def get_resource_endpoint(host, hpc_backend):
"""
Get ssh URI of remote host
:param host: host to make url for it
:param hpc_backend: hpc_backend integer value according to HPCBackend enum
:return:
"""
# Default SAGA adaptor to ssh
adaptor = 'ssh'
if helpers.is_localhost(host):
adaptor = 'fork'
elif hpc_backend == HPCBackend.sge.value:
adaptor = 'sge+ssh'
return '{adaptor}://{remote_host}'.format(adaptor=adaptor,
remote_host=host)
def _get_remote_home(session):
"""
Return homde directory on target resource based on
the current security context.
"""
user_id = None
for ctx in session.list_contexts():
if ctx.type == 'userpass':
return '/home/{0}'.format(ctx.user_id)
elif ctx.type == 'ssh':
user_id = ctx.user_id
# If user_id is not in the session object consider the user which
# is running the application
# This might not work on Windows, not tried.
user_id = pwd.getpwuid(os.getuid()).pw_name
if not user_id:
import getpass
user_id = getpass.getuser()
if not user_id:
raise Exception("Can't find the right username for SSH connection.")
return '/home/{0}'.format(user_id)
def get_job_endpoint(job_id, session):
"""
Returns the remote job working directory. Creates the parent
folders if they don't exist.
:param job_id: job id
:param session: saga session to be used
:return:
"""
job = Job.query.get(job_id)
# We use a combination of job id and a random string to make the
# directory name unique and meanwhile human readable
# Use the staging directory name as remote directory name as well,
# otherwise decide for a new unique name
# however since directories normally resied on different machines we
# don't need to do that. It only makes things more human.
dir_name = None
if job.staging_dir:
# Get the last part of path, i.e. job directory. See os.path.split
if job.staging_dir.endswith('/'):
dir_name = os.path.split(job.staging_dir[:-1])[-1]
else:
dir_name = os.path.split(job.staging_dir)[-1]
else:
# If staging directory is not set make a random name
dir_name = "{0}_{1}".format(job_id,
base64.urlsafe_b64encode(os.urandom(6)))
if not job.remote_dir:
job.remote_dir =\
'{home_directory}/.sqmpy/{path}'.format(
home_directory=_get_remote_home(session),
path=dir_name)
elif not os.path.isabs(job.remote_dir):
raise Exception('Working directory should be absolute path.')
adapter = 'sftp'
if helpers.is_localhost(job.resource.url):
adapter = 'file'
adaptor_string = '{adapter}://{remote_host}{working_directory}'
remote_address = \
adaptor_string.format(adapter=adapter,
remote_host=job.resource.url,
working_directory=job.remote_dir)
# Appropriate folders will be created
return \
saga.filesystem.Directory(remote_address,
saga.filesystem.CREATE_PARENTS,
session=session)
def make_job_description(job, remote_job_dir):
"""
Creates saga job description
:param job: job instance
:param remote_job_dir: saga remote job directory instance
:return:
"""
script_file = \
StagingFile.query.filter(
StagingFile.parent_id == job.id,
StagingFile.relation == FileRelation.script.value).first()
jd = saga.job.Description()
# TODO: Add queue name, project and other params
jd.working_directory = remote_job_dir.get_url().path
jd.total_cpu_count = job.total_cpu_count
jd.wall_time_limit = job.walltime_limit
jd.spmd_variation = job.spmd_variation
jd.queue = job.queue or None
jd.total_physical_memory = job.total_physical_memory or None
jd.project = job.project or None
# TODO: Add proper arguments for each input file
jd.arguments = [script_file.name]
# TODO: Use script handler instead, see issue #13 on github
if job.script_type == ScriptType.python.value:
jd.executable = '/usr/bin/python'
if job.script_type == ScriptType.shell.value:
jd.executable = '/bin/sh'
script_abs_path = '{dir}/{file}'.format(dir=remote_job_dir.get_url().path,
file=script_file.name)
jd.arguments = [script_abs_path]
jd.output = '{script_name}.out.txt'.format(script_name=script_file.name)
jd.error = '{script_name}.err.txt'.format(script_name=script_file.name)
return jd
def download_job_files(job_id, job_description, session, wipe=True):
"""
Copies output and error files along with any other output files back to the
current machine.
:param job_id: job id
:param job_description:
:param session: saga session to remote resource
:param wipe: if set to True will wipe files from remote machine.
:return:
"""
# Get staging file names for this job which are already uploaded
# we don't need to download them since we have them already
staged_files = \
StagingFile.query.filter(StagingFile.parent_id == Job.id,
Job.id == job_id).all()
# Convert tuple result to list
excluded = [os.path.join(sf.location, sf.name) for sf in staged_files]
# Get or create job directory
job_staging_folder = helpers.get_job_staging_folder(job_id)
# Get the working directory instance
remote_dir = get_job_endpoint(job_id, session)
# Find all files in the working directory and its subdirectories
# Files are as a dict of full_path/saga.filesystem.Url objects
remote_files, sub_direcotires = _traverse_directory(remote_dir)
# Copy/move files and create corresponding records in db
# Note: we can recursively move everything back but the reason
# behind traversing through all directories is that we want to
# collect some information about them upon adding.
for remote_abspath, remote_url in remote_files.iteritems():
# Copy physical file to local directory. Since paths are absolote
# we need to make them relative to the job's staging folder
relative_path =\
_make_relative_path(remote_dir.get_url().get_path(),
remote_abspath)
# Join the relative path and local job staging directory
local_path =\
os.path.join('sftp://localhost{job_path}'.format(
job_path=job_staging_folder), relative_path)
# No sftp in path
local_abspath = os.path.join(job_staging_folder, relative_path)
# Do nothing if the file is already downloaded or belongs
# to initially uploaded files.
if local_abspath in excluded:
flask.current_app.logger.debug('Excluding %s' % local_abspath)
continue
if wipe:
# Move the file and create parents if required
remote_dir.move(remote_abspath, local_path,
saga.filesystem.CREATE_PARENTS)
else:
# Copy the file and create parents if required
remote_dir.copy(remote_abspath, local_path,
saga.filesystem.CREATE_PARENTS)
# Insert appropriate record into db
sf = StagingFile()
sf.name = remote_url.path
sf.original_name = remote_url.path
sf.location = os.path.dirname(local_abspath)
sf.relative_path = relative_path.lstrip(os.sep)
sf.relation = _get_file_relation_to_job(job_description, remote_url)
sf.checksum = hashlib.md5(open(local_abspath).read()).hexdigest()
sf.parent_id = job_id
db.session.add(sf)
# Persist changes
db.session.commit()
def _get_file_relation_to_job(job_description, file_url):
"""
Find if file is stdout, stderr or generated output.
"""
if file_url.path == job_description.output:
return FileRelation.stdout.value
elif file_url.path == job_description.error:
return FileRelation.stderr.value
elif file_url.path in job_description.arguments[0]:
return FileRelation.script.value
else:
return FileRelation.output.value
def _make_relative_path(base_path, full_path):
"""
Strip out the base_path from full_path and make it relative.
"""
flask.current_app.logger.debug(
'got base_path: %s and full_path: %s' % (base_path, full_path))
if base_path in full_path:
# Get the common prefix
common_prefix =\
os.path.commonprefix([base_path, full_path])
rel_path = full_path[len(common_prefix):]
# Remove '/' from the beginning
if os.path.isabs(rel_path):
rel_path = rel_path[1:]
return rel_path
def _traverse_directory(directory,
collected_files=None,
collected_directories=None):
"""
Walk through subdirectories and collect files.
:param directory: instance of saga.filesystem.Directory
"""
if not collected_files:
collected_files = {}
if not collected_directories:
collected_directories = []
# Find all files in the working directory and its subdirectories
for entry in directory.list():
# Add entry only if its a file
if directory.is_file(entry):
# Generate full path to each file
file_path =\
os.path.join(directory.get_url().get_path(),
entry.path)
collected_files[file_path] = entry
# Go through sub-directories
elif directory.is_dir(entry):
# Fixme: currently saga failes to populate child Urls,
# therefore we have to fill it manually. See #483 on Github
# Desired format is like: {scheme}://{host}/{path}/{sub_path}
path_template =\
'{scheme}://{host}/{job_dir}/{job_rel_dir}'
job_staging_folder = directory.get_url().get_path().lstrip(os.sep)
sub_dir_path =\
path_template.format(scheme=directory.get_url().get_scheme(),
host=directory.get_url().get_host(),
job_dir=job_staging_folder,
job_rel_dir=entry.path.lstrip(os.sep))
flask.current_app.logger.debug(
'Made this path for sub_dir: %s' % sub_dir_path)
sub_dir = directory.open_dir(sub_dir_path)
collected_directories.append(sub_dir)
collected_files, collected_directories =\
_traverse_directory(sub_dir,
collected_files,
collected_directories)
else:
flask.current_app.logger.debug(
'Omitting non-file and non-directory entry: %s' % entry)
# Return collected information
return collected_files, collected_directories
def transfer_job_files(job_id, remote_job_dir, session):
"""
Upload job files to remote resource
:param job_id: job id
:param remote_job_dir: saga.filesystem.Directory instance
of remote job directory
:param session: saga.Session instance for this transfer
:return
"""
# Copy script and input files to remote host
uploading_files = \
StagingFile.query.filter(
StagingFile.parent_id == job_id,
StagingFile.relation.in_([FileRelation.input.value,
FileRelation.script.value])).all()
for file_to_upload in uploading_files:
# If we don't pass correct session object, saga will create default
# session object which will not reflect correct security context.
# it would be useful only for a local run and not multi-user run
# session and remote_job_dir._adaptor.session should be the same
file_wrapper = \
saga.filesystem.File('file://localhost/{file_path}'
.format(file_path=file_to_upload.get_path()),
session=session)
# TODO: This is a workaround for bug #480 remove it later
file_wrapper._adaptor._set_session(session)
file_wrapper.copy(remote_job_dir.get_url(), saga.filesystem.RECURSIVE)
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, cint
from frappe.model import integer_docfield_properties, default_fields, no_value_fields, optional_fields
from frappe.model.document import Document
from frappe.model.base_document import BaseDocument
from frappe.model.db_schema import type_map
def get_meta(doctype, cached=True):
if cached:
return frappe.cache().hget("meta", doctype, lambda: Meta(doctype))
else:
return Meta(doctype)
def get_table_columns(doctype):
return frappe.cache().hget("table_columns", doctype,
lambda: frappe.db.get_table_columns(doctype))
def load_doctype_from_file(doctype):
fname = frappe.scrub(doctype)
with open(frappe.get_app_path("frappe", "core", "doctype", fname, fname + ".json"), "r") as f:
txt = json.loads(f.read())
for d in txt.get("fields", []):
d["doctype"] = "DocField"
for d in txt.get("permissions", []):
d["doctype"] = "DocPerm"
txt["fields"] = [BaseDocument(d) for d in txt["fields"]]
if "permissions" in txt:
txt["permissions"] = [BaseDocument(d) for d in txt["permissions"]]
return txt
class Meta(Document):
_metaclass = True
default_fields = list(default_fields)[1:]
special_doctypes = ("DocField", "DocPerm", "Role", "DocType", "Module Def")
def __init__(self, doctype):
self._fields = {}
super(Meta, self).__init__("DocType", doctype)
self.process()
def load_from_db(self):
try:
super(Meta, self).load_from_db()
except frappe.DoesNotExistError:
if self.doctype=="DocType" and self.name in self.special_doctypes:
self.__dict__.update(load_doctype_from_file(self.name))
else:
raise
def get_link_fields(self):
return self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]})
def get_dynamic_link_fields(self):
return self.get("fields", {"fieldtype": "Dynamic Link"})
def get_select_fields(self):
return self.get("fields", {"fieldtype": "Select", "options":["not in",
["[Select]", "Loading..."]]})
def get_table_fields(self):
if not hasattr(self, "_table_fields"):
if self.name!="DocType":
self._table_fields = self.get('fields', {"fieldtype":"Table"})
else:
self._table_fields = doctype_table_fields
return self._table_fields
def get_valid_columns(self):
if not hasattr(self, "_valid_columns"):
if self.name in ("DocType", "DocField", "DocPerm", "Property Setter"):
self._valid_columns = get_table_columns(self.name)
else:
self._valid_columns = self.default_fields + \
[df.fieldname for df in self.get("fields") if df.fieldtype in type_map]
return self._valid_columns
def get_table_field_doctype(self, fieldname):
return { "fields": "DocField", "permissions": "DocPerm"}.get(fieldname)
def get_field(self, fieldname):
if not self._fields:
for f in self.get("fields"):
self._fields[f.fieldname] = f
return self._fields.get(fieldname)
def get_label(self, fieldname):
return self.get_field(fieldname).label
def get_options(self, fieldname):
return self.get_field(fieldname).options
def get_link_doctype(self, fieldname):
df = self.get_field(fieldname)
if df.fieldtype == "Link":
return df.options
elif df.fieldtype == "Dynamic Link":
return self.get_options(df.options)
else:
return None
def get_search_fields(self):
search_fields = self.search_fields or "name"
search_fields = [d.strip() for d in search_fields.split(",")]
if "name" not in search_fields:
search_fields.append("name")
return search_fields
def get_list_fields(self):
list_fields = ["name"] + [d.fieldname \
for d in self.fields if (d.in_list_view and d.fieldtype in type_map)]
if self.title_field and self.title_field not in list_fields:
list_fields.append(self.title_field)
return list_fields
def get_custom_fields(self):
return [d for d in self.fields if d.get('is_custom_field')]
def get_title_field(self):
return self.title_field or "name"
def process(self):
# don't process for special doctypes
# prevent's circular dependency
if self.name in self.special_doctypes:
return
self.add_custom_fields()
self.apply_property_setters()
self.sort_fields()
self.get_valid_columns()
def add_custom_fields(self):
try:
self.extend("fields", frappe.db.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.name,), as_dict=1,
update={"is_custom_field": 1}))
except Exception, e:
if e.args[0]==1146:
return
else:
raise
def apply_property_setters(self):
for ps in frappe.db.sql("""select * from `tabProperty Setter` where
doc_type=%s""", (self.name,), as_dict=1):
if ps.doctype_or_field=='DocType':
if ps.property_type in ('Int', 'Check'):
ps.value = cint(ps.value)
self.set(ps.property, ps.value)
else:
docfield = self.get("fields", {"fieldname":ps.field_name}, limit=1)
if docfield:
docfield = docfield[0]
else:
continue
if ps.property in integer_docfield_properties:
ps.value = cint(ps.value)
docfield.set(ps.property, ps.value)
def sort_fields(self):
"""sort on basis of previous_field"""
if self.get("_idx"):
newlist = []
pending = self.get("fields")
for fieldname in json.loads(self.get("_idx")):
d = self.get("fields", {"fieldname": fieldname}, limit=1)
if d:
newlist.append(d[0])
pending.remove(d[0])
if pending:
newlist += pending
# renum
idx = 1
for d in newlist:
d.idx = idx
idx += 1
self.set("fields", newlist)
def get_fields_to_check_permissions(self, user_permission_doctypes):
fields = self.get("fields", {
"fieldtype":"Link",
"parent": self.name,
"ignore_user_permissions":("!=", 1),
"options":("in", user_permission_doctypes)
})
if self.name in user_permission_doctypes:
fields.append(frappe._dict({
"label":"Name",
"fieldname":"name",
"options": self.name
}))
return fields
def get_high_permlevel_fields(self):
"""Build list of fields with high perm level and all the higher perm levels defined."""
if not hasattr(self, "high_permlevel_fields"):
self.high_permlevel_fields = []
for df in self.fields:
if df.permlevel > 0:
self.high_permlevel_fields.append(df)
return self.high_permlevel_fields
doctype_table_fields = [
frappe._dict({"fieldname": "fields", "options": "DocField"}),
frappe._dict({"fieldname": "permissions", "options": "DocPerm"})
]
#######
def is_single(doctype):
try:
return frappe.db.get_value("DocType", doctype, "issingle")
except IndexError:
raise Exception, 'Cannot determine whether %s is single' % doctype
def get_parent_dt(dt):
parent_dt = frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s and (parent not like "old_parent:%%")
limit 1""", dt)
return parent_dt and parent_dt[0][0] or ''
def set_fieldname(field_id, fieldname):
frappe.db.set_value('DocField', field_id, 'fieldname', fieldname)
def get_field_currency(df, doc=None):
"""get currency based on DocField options and fieldvalue in doc"""
currency = None
if not df.get("options"):
return None
if not doc:
return None
if not getattr(frappe.local, "field_currency", None):
frappe.local.field_currency = frappe._dict()
if not frappe.local.field_currency.get((doc.doctype, doc.parent or doc.name), {}).get(df.fieldname):
if ":" in cstr(df.get("options")):
split_opts = df.get("options").split(":")
if len(split_opts)==3:
currency = frappe.db.get_value(split_opts[0], doc.get(split_opts[1]), split_opts[2])
else:
currency = doc.get(df.get("options"))
if not currency and doc.parent:
currency = frappe.db.get_value(doc.parenttype, doc.parent, df.get("options"))
if currency:
frappe.local.field_currency.setdefault((doc.doctype, doc.parent or doc.name), frappe._dict())\
.setdefault(df.fieldname, currency)
return frappe.local.field_currency.get((doc.doctype, doc.parent or doc.name), {}).get(df.fieldname)
def get_field_precision(df, doc=None, currency=None):
"""get precision based on DocField options and fieldvalue in doc"""
from frappe.utils import get_number_format_info
if cint(df.precision):
precision = cint(df.precision)
elif df.fieldtype == "Currency":
number_format = None
if not currency and doc:
currency = get_field_currency(df, doc)
if not currency:
# use default currency
currency = frappe.db.get_default("currency")
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
else:
precision = cint(frappe.db.get_default("float_precision")) or 3
return precision
def get_default_df(fieldname):
if fieldname in default_fields:
if fieldname in ("creation", "modified"):
return frappe._dict(
fieldname = fieldname,
fieldtype = "Datetime"
)
else:
return frappe._dict(
fieldname = fieldname,
fieldtype = "Data"
)
def trim_tables():
"""Use this to remove columns that don't exist in meta"""
ignore_fields = default_fields + optional_fields
for doctype in frappe.db.get_all("DocType", filters={"issingle": 0}):
doctype = doctype.name
columns = frappe.db.get_table_columns(doctype)
fields = [df.fieldname for df in frappe.get_meta(doctype).fields if df.fieldtype not in no_value_fields]
columns_to_remove = [f for f in list(set(columns) - set(fields)) if f not in ignore_fields
and not f.startswith("_")]
if columns_to_remove:
print doctype, "columns removed:", columns_to_remove
columns_to_remove = ", ".join(["drop `{0}`".format(c) for c in columns_to_remove])
query = """alter table `tab{doctype}` {columns}""".format(
doctype=doctype, columns=columns_to_remove)
frappe.db.sql_ddl(query)
def clear_cache(doctype=None):
cache = frappe.cache()
cache.delete_value("is_table")
cache.delete_value("doctype_modules")
groups = ["meta", "form_meta", "table_columns", "last_modified", "linked_doctypes"]
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
# also clear linked_with list cache
cache.delete_keys("user:*:linked_with:{doctype}:".format(doctype=doctype))
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s""", (doctype,)):
clear_single(dt[0])
# clear all notifications
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
else:
# clear all
for name in groups:
cache.delete_value(name)
| |
import logging
from gluon import A
from gluon import DIV
from gluon import H3
from gluon import H4
from gluon import H5
from gluon import I
from gluon import IS_IN_SET
from gluon import LI
from gluon import P
from gluon import MARKMIN
from gluon import SQLFORM
from gluon import SPAN
from gluon import TAG
from gluon import UL
from gluon import URL
from gluon import XML
from gluon import xmlescape
date_format = '%B %Y'
index_class = 'col-xs-12 col-sm-6 col-md-4'
poem_class = 'col-xs-12 col-sm-10 col-md-8'
def _thumb(row, cls, title=None):
""" Return a column DIV thumbnail. """
caption = DIV(
H3(row.chapter.title),
H4('Chapter %i' % row.chapter.number),
H5(row.published.strftime(date_format)),
H3(row.intro_hanzi),
H4(row.intro_en),
_class='caption',
_role='button',
_title=title)
anchor = A(
caption,
_class='ddj-thumbnail',
_href=URL('poems', 'chapter', args=[row.chapter.number]))
thumbnail = DIV(anchor, _class='thumbnail')
return DIV(thumbnail, _class=cls)
def chapter(poem, db, uhdb):
""" Return a bootstrap row for a poem row. """
if not poem:
raise Exception('No such poem')
qry = ((db.verse.book==1) & (db.verse.chapter==poem.chapter))
verse = db(qry).select().first()
title = H3(poem.chapter.title)
subtitle = H4('Chapter %i' % poem.chapter.number)
published = H5(poem.published.strftime(date_format))
stanzas = verse.en.split('\r\n\r\n')
content = []
for stanza in stanzas:
content.append(P(XML(stanza.replace('\r\n', '<br />'))))
link = P(
A(
I('Go to the study version'),
_href=URL('studies', 'chapter', args=[poem.chapter.number]),
_style='color:inherit;',
_title='Study version'),
_style='font-size:0.9em;padding-top:1em')
content.append(P(link))
column = DIV(title, subtitle, published, *content, _class=poem_class)
return DIV(
column, _class='row',
_style='font-size:1.12em;white-space:nowrap;')
def chapter_range(page_number):
if page_number >= 1 and page_number <= 9:
low = ((page_number-1)*9)+1
high = page_number*9
else:
raise Exception('No such page')
return low, high
def decache(chapter, db):
""" Clear study chapter cache data. """
import studies
from gluon import current
# Decache the associated study.
studies.decache(chapter, db)
# Decache the poem itself.
current.cache.ram('poem-%d' % chapter, None)
# Decache links in the next poem.
qry = db.poem.chapter > int(chapter)
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
current.cache.ram('links-%d' % nxt.first().chapter, None)
# Decache links in the previous poem.
qry = db.poem.chapter < chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if prev:
current.cache.ram('links-%d' % prev.first().chapter, None)
# Decache the page containing the poem.
page = (chapter + 8) / 9
current.cache.ram('poems-%d' % page, None)
def grid(db, deletable=False):
""" Return an SQLFORM.grid to manage poems. """
createargs = editargs = viewargs = {
'fields': [
'chapter', 'published', 'intro_hanzi', 'intro_en']}
fields = [
db.poem.chapter,
db.poem.published,
db.poem.intro_hanzi,
db.poem.intro_en]
maxtextlengths = {'poem.published': 50}
onupdate = lambda form: decache(int(form.vars.chapter), db)
db.poem.published.represent = lambda value, row: value.strftime(date_format)
db.poem.chapter.requires = IS_IN_SET(range(1, 82), zero=None)
grid = SQLFORM.grid(
db.poem,
createargs=createargs,
csv=False,
deletable=deletable,
details=False,
editargs=editargs,
fields=fields,
maxtextlengths=maxtextlengths,
oncreate=onupdate,
onupdate=onupdate,
orderby=db.poem.chapter,
paginate=None,
searchable=False,
viewargs=viewargs)
return grid
def index(page_number, db):
""" Return a row DIV of a page of poems. """
low, high = chapter_range(page_number)
qry = ((db.poem.chapter>=low) & (db.poem.chapter<=high))
thumbs = []
for row in db(qry).select(orderby=db.poem.chapter):
thumbs.append(_thumb(row, index_class))
return DIV(thumbs, _class='row display-flex')
def links(poem, db):
""" Return a row DIV of prev/next poems. """
thumbs = []
# Next.
qry = db.poem.chapter > poem.chapter
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if not nxt:
qry = db.poem.chapter >= 1
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
thumbs.append(_thumb(nxt.first(), poem_class, 'Next'))
# Previous.
qry = db.poem.chapter < poem.chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if not prev:
qry = db.poem.chapter <= 81
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if prev:
thumbs.append(_thumb(prev.first(), poem_class, 'Previous'))
# Bootstrap.
return DIV(
thumbs,
_class='row',
_style='padding-top: 2.5em;')
def pager(db):
""" Return a row DIV for a pager. """
from gluon import current
# Previous/current/next page.
if current.request.args(0):
current_page = int(current.request.args(0))
else:
current_page = 1
prev_page = current_page - 1
next_page = current_page + 1
# List of LI.
pages = []
# Previous/left.
li_class = ''
href = URL('poems', 'page', args=[str(prev_page)])
if prev_page < 1:
li_class = 'disabled'
href = '#'
elif prev_page == 1:
href = URL('poems', 'index')
span = SPAN(xmlescape(u'\u4e0a'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Previous'})
pages.append(LI(anchor, _class=li_class, _title='Previous Page'))
# Chapter range links.
for page in range(1, 10):
li_class = ''
href = URL('poems', 'page', args=[str(page)])
page_range = ['%d-%d' % (((page-1)*9)+1, page*9)]
if page == 1:
href = URL('poems', 'index')
if page == current_page:
li_class = 'active'
page_range.append(SPAN('(current)', _class='sr-only'))
anchor = A(page_range, _href=href)
pages.append(LI(anchor, _class=li_class))
# Next/right.
li_class = ''
href = URL('poems', 'page', args=[str(next_page)])
if next_page > 9:
li_class = 'disabled'
href = '#'
span = SPAN(xmlescape(u'\u4e0b'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Next'})
pages.append(LI(anchor, _class=li_class, _title='Next Page'))
# Together.
return UL(pages, _class='pagination')
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import mimetypes
from io import BytesIO
from flask import flash, jsonify, redirect, request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.core.db import db
from indico.core.errors import NoReportError
from indico.modules.auth.util import redirect_to_login
from indico.modules.events.agreements.forms import AgreementAnswerSubmissionForm, AgreementEmailForm, AgreementForm
from indico.modules.events.agreements.models.agreements import Agreement, AgreementState
from indico.modules.events.agreements.notifications import notify_agreement_reminder, notify_new_signature_to_manager
from indico.modules.events.agreements.util import get_agreement_definitions, send_new_agreements
from indico.modules.events.agreements.views import (WPAgreementFormConference, WPAgreementFormSimpleEvent,
WPAgreementManager)
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
from indico.web.forms.base import FormDefaults
from indico.web.views import WPJinjaMixin
class RHAgreementManagerBase(RHManageEventBase):
"""Base class for agreement management RHs"""
class RHAgreementForm(RHDisplayEventBase):
"""Agreement form page"""
normalize_url_spec = {
'locators': {
lambda self: self.agreement
},
'preserved_args': {'uuid'}
}
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.agreement = Agreement.get_one(request.view_args['id'])
if self.agreement.is_orphan():
raise NotFound('The agreement is not active anymore')
def _require_user(self):
if session.user is None:
raise Forbidden(response=redirect_to_login(reason=_('You are trying to sign an agreement that requires '
'you to be logged in')))
if self.agreement.user != session.user:
raise Forbidden(_('Please log in as {name} to sign this agreement.')
.format(name=self.agreement.user.full_name))
def _check_access(self):
# XXX: Not checking event protection here - if you get the agreement link
# you need to be able to sign it no matter if you have access to the event
# or not. Speakers might not even have an Indico account...
if self.agreement.uuid != request.view_args['uuid']:
raise Forbidden(_("The URL for this agreement is invalid."))
if self.agreement.user:
self._require_user()
def _process(self):
form = AgreementForm()
if form.validate_on_submit() and self.agreement.pending:
reason = form.reason.data if not form.agreed.data else None
func = self.agreement.accept if form.agreed.data else self.agreement.reject
func(from_ip=request.remote_addr, reason=reason)
if self.agreement.definition.event_settings.get(self.event, 'manager_notifications_enabled'):
notify_new_signature_to_manager(self.agreement)
return redirect(url_for('.agreement_form', self.agreement, uuid=self.agreement.uuid))
html = self.agreement.render(form)
view_class = (WPAgreementFormConference
if self.event.type_ == EventType.conference else
WPAgreementFormSimpleEvent)
return view_class.render_template('form_page.html', self.event, agreement=self.agreement, html=html)
class RHAgreementManager(RHAgreementManagerBase):
"""Agreements types page (admin)"""
def _process(self):
definitions = get_agreement_definitions().values()
return WPAgreementManager.render_template('agreement_types.html', self.event, definitions=definitions)
class RHAgreementManagerDetails(RHAgreementManagerBase):
"""Management page for all agreements of a certain type (admin)"""
def _process_args(self):
RHAgreementManagerBase._process_args(self)
definition_name = request.view_args['definition']
self.definition = get_agreement_definitions().get(definition_name)
if self.definition is None:
raise NotFound("Agreement type '{}' does not exist".format(definition_name))
if not self.definition.is_active(self.event):
flash(_("The '{}' agreement is not used in this event.").format(self.definition.title), 'error')
return redirect(url_for('.event_agreements', self.event))
def _process(self):
people = self.definition.get_people(self.event)
agreements = (self.event.agreements
.filter(Agreement.type == self.definition.name,
Agreement.identifier.in_(people))
.all())
return WPAgreementManager.render_template('agreement_type_details.html', self.event,
definition=self.definition, agreements=agreements)
class RHAgreementManagerDetailsToggleNotifications(RHAgreementManagerDetails):
"""Toggles notifications to managers for an agreement type on an event"""
def _process(self):
enabled = request.form['enabled'] == '1'
self.definition.event_settings.set(self.event, 'manager_notifications_enabled', enabled)
return jsonify(success=True, enabled=enabled)
class RHAgreementManagerDetailsEmailBase(RHAgreementManagerDetails):
dialog_template = None
def _process_args(self):
RHAgreementManagerDetails._process_args(self)
def _success_handler(self, form):
raise NotImplementedError
def _get_form(self):
template = self.definition.get_email_body_template(self.event)
form_defaults = FormDefaults(body=template.get_html_body())
return AgreementEmailForm(obj=form_defaults, definition=self.definition, event=self.event)
def _process(self):
form = self._get_form()
if form.validate_on_submit():
self._success_handler(form)
return jsonify(success=True)
return WPJinjaMixin.render_template(self.dialog_template, event=self.event, form=form,
definition=self.definition)
class RHAgreementManagerDetailsSend(RHAgreementManagerDetailsEmailBase):
dialog_template = 'events/agreements/dialogs/agreement_email_form_send.html'
def _get_people(self):
identifiers = set(request.form.getlist('references'))
return {k: v for k, v in self.definition.get_people_not_notified(self.event).iteritems()
if v.email and v.identifier in identifiers}
def _success_handler(self, form):
people = self._get_people()
email_body = form.body.data
send_new_agreements(self.event, self.definition.name, people, email_body, form.cc_addresses.data,
form.from_address.data)
class RHAgreementManagerDetailsRemind(RHAgreementManagerDetailsEmailBase):
dialog_template = 'events/agreements/dialogs/agreement_email_form_remind.html'
def _get_agreements(self):
ids = set(request.form.getlist('references'))
return (self.event.agreements
.filter(Agreement.id.in_(ids),
Agreement.person_email != None) # noqa
.all())
def _success_handler(self, form):
email_body = form.body.data
agreements = self._get_agreements()
for agreement in agreements:
notify_agreement_reminder(agreement, email_body, form.cc_addresses.data, form.from_address.data)
flash(_("Reminders sent"), 'success')
class RHAgreementManagerDetailsSendAll(RHAgreementManagerDetailsSend):
dialog_template = 'events/agreements/dialogs/agreement_email_form_send_all.html'
def _get_people(self):
return {k: v for k, v in self.definition.get_people_not_notified(self.event).iteritems() if v.email}
class RHAgreementManagerDetailsRemindAll(RHAgreementManagerDetailsRemind):
dialog_template = 'events/agreements/dialogs/agreement_email_form_remind_all.html'
def _get_agreements(self):
agreements = self.event.agreements.filter(Agreement.pending,
Agreement.person_email != None, # noqa
Agreement.type == self.definition.name).all()
return [a for a in agreements if not a.is_orphan()]
class RHAgreementManagerDetailsAgreementBase(RHAgreementManagerDetails):
normalize_url_spec = {
'locators': {
lambda self: self.agreement
},
'args': {
'definition': lambda self: self.agreement.type,
'filename': lambda self: self.agreement.attachment_filename
}
}
def _process_args(self):
RHAgreementManagerDetails._process_args(self)
self.agreement = Agreement.get_one(request.view_args['id'])
class RHAgreementManagerDetailsSubmitAnswer(RHAgreementManagerDetails):
"""Submits the answer of an agreement on behalf of the person"""
def _process_args(self):
RHAgreementManagerDetails._process_args(self)
if 'id' in request.view_args:
self.agreement = Agreement.get_one(request.view_args['id'])
if self.event != self.agreement.event:
raise NotFound
if not self.agreement.pending:
raise NoReportError(_("The agreement is already signed"))
else:
self.agreement = None
identifier = request.args['identifier']
try:
self.person = self.definition.get_people(self.event)[identifier]
except KeyError:
raise NotFound
def _process(self):
agreement = self.agreement
form = AgreementAnswerSubmissionForm()
if form.validate_on_submit():
if agreement is None:
agreement = Agreement.create_from_data(event=self.event, type_=self.definition.name,
person=self.person)
db.session.add(agreement)
db.session.flush()
if form.answer.data:
agreement.accept(from_ip=request.remote_addr, on_behalf=True)
agreement.attachment_filename = form.document.data.filename
agreement.attachment = form.document.data.read()
else:
agreement.reject(from_ip=request.remote_addr, on_behalf=True)
flash(_("Agreement answered on behalf of {0}".format(agreement.person_name)), 'success')
return jsonify(success=True)
return WPJinjaMixin.render_template('events/agreements/dialogs/agreement_submit_answer_form.html', form=form,
event=self.event, agreement=agreement)
class RHAgreementManagerDetailsDownloadAgreement(RHAgreementManagerDetailsAgreementBase):
def _process_args(self):
RHAgreementManagerDetailsAgreementBase._process_args(self)
if self.agreement.state != AgreementState.accepted_on_behalf:
raise NoReportError("The agreement was not accepted manually by an admin")
def _process(self):
io = BytesIO(self.agreement.attachment)
mimetype = mimetypes.guess_type(self.agreement.attachment_filename)[0] or 'application/octet-stream'
return send_file(self.agreement.attachment_filename, io, mimetype)
| |
import re
import frappe
import psycopg2
import psycopg2.extensions
from six import string_types
from frappe.utils import cstr
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from frappe.database.database import Database
from frappe.database.postgres.schema import PostgresTable
# cast decimals as floats
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
'DEC2FLOAT',
lambda value, curs: float(value) if value is not None else None)
psycopg2.extensions.register_type(DEC2FLOAT)
class PostgresDatabase(Database):
ProgrammingError = psycopg2.ProgrammingError
TableMissingError = psycopg2.ProgrammingError
OperationalError = psycopg2.OperationalError
InternalError = psycopg2.InternalError
SQLError = psycopg2.ProgrammingError
DataError = psycopg2.DataError
InterfaceError = psycopg2.InterfaceError
REGEX_CHARACTER = '~'
def setup_type_map(self):
self.db_type = 'postgres'
self.type_map = {
'Currency': ('decimal', '18,6'),
'Int': ('bigint', None),
'Long Int': ('bigint', None),
'Float': ('decimal', '18,6'),
'Percent': ('decimal', '18,6'),
'Check': ('smallint', None),
'Small Text': ('text', ''),
'Long Text': ('text', ''),
'Code': ('text', ''),
'Text Editor': ('text', ''),
'Markdown Editor': ('text', ''),
'HTML Editor': ('text', ''),
'Date': ('date', ''),
'Datetime': ('timestamp', None),
'Time': ('time', '6'),
'Text': ('text', ''),
'Data': ('varchar', self.VARCHAR_LEN),
'Link': ('varchar', self.VARCHAR_LEN),
'Dynamic Link': ('varchar', self.VARCHAR_LEN),
'Password': ('text', ''),
'Select': ('varchar', self.VARCHAR_LEN),
'Rating': ('smallint', None),
'Read Only': ('varchar', self.VARCHAR_LEN),
'Attach': ('text', ''),
'Attach Image': ('text', ''),
'Signature': ('text', ''),
'Color': ('varchar', self.VARCHAR_LEN),
'Barcode': ('text', ''),
'Geolocation': ('text', ''),
'Duration': ('decimal', '18,6')
}
def get_connection(self):
conn = psycopg2.connect("host='{}' dbname='{}' user='{}' password='{}' port={}".format(
self.host, self.user, self.user, self.password, self.port
))
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # TODO: Remove this
return conn
def escape(self, s, percent=True):
"""Excape quotes and percent in given string."""
if isinstance(s, bytes):
s = s.decode('utf-8')
if percent:
s = s.replace("%", "%%")
s = s.encode('utf-8')
return str(psycopg2.extensions.QuotedString(s))
def get_database_size(self):
''''Returns database size in MB'''
db_size = self.sql("SELECT (pg_database_size(%s) / 1024 / 1024) as database_size",
self.db_name, as_dict=True)
return db_size[0].get('database_size')
# pylint: disable=W0221
def sql(self, *args, **kwargs):
if args:
# since tuple is immutable
args = list(args)
args[0] = modify_query(args[0])
args = tuple(args)
elif kwargs.get('query'):
kwargs['query'] = modify_query(kwargs.get('query'))
return super(PostgresDatabase, self).sql(*args, **kwargs)
def get_tables(self):
return [d[0] for d in self.sql("""select table_name
from information_schema.tables
where table_catalog='{0}'
and table_type = 'BASE TABLE'
and table_schema='{1}'""".format(frappe.conf.db_name, frappe.conf.get("db_schema", "public")))]
def format_date(self, date):
if not date:
return '0001-01-01'
if not isinstance(date, str):
date = date.strftime('%Y-%m-%d')
return date
# column type
@staticmethod
def is_type_number(code):
return code == psycopg2.NUMBER
@staticmethod
def is_type_datetime(code):
return code == psycopg2.DATETIME
# exception type
@staticmethod
def is_deadlocked(e):
return e.pgcode == '40P01'
@staticmethod
def is_timedout(e):
# http://initd.org/psycopg/docs/extensions.html?highlight=datatype#psycopg2.extensions.QueryCanceledError
return isinstance(e, psycopg2.extensions.QueryCanceledError)
@staticmethod
def is_table_missing(e):
return getattr(e, 'pgcode', None) == '42P01'
@staticmethod
def is_missing_column(e):
return getattr(e, 'pgcode', None) == '42703'
@staticmethod
def is_access_denied(e):
return e.pgcode == '42501'
@staticmethod
def cant_drop_field_or_key(e):
return e.pgcode.startswith('23')
@staticmethod
def is_duplicate_entry(e):
return e.pgcode == '23505'
@staticmethod
def is_primary_key_violation(e):
return e.pgcode == '23505' and '_pkey' in cstr(e.args[0])
@staticmethod
def is_unique_key_violation(e):
return e.pgcode == '23505' and '_key' in cstr(e.args[0])
@staticmethod
def is_duplicate_fieldname(e):
return e.pgcode == '42701'
@staticmethod
def is_data_too_long(e):
return e.pgcode == '22001'
def create_auth_table(self):
self.sql_ddl("""create table if not exists "__Auth" (
"doctype" VARCHAR(140) NOT NULL,
"name" VARCHAR(255) NOT NULL,
"fieldname" VARCHAR(140) NOT NULL,
"password" TEXT NOT NULL,
"encrypted" INT NOT NULL DEFAULT 0,
PRIMARY KEY ("doctype", "name", "fieldname")
)""")
def create_global_search_table(self):
if not '__global_search' in self.get_tables():
self.sql('''create table "__global_search"(
doctype varchar(100),
name varchar({0}),
title varchar({0}),
content text,
route varchar({0}),
published int not null default 0,
unique (doctype, name))'''.format(self.VARCHAR_LEN))
def create_user_settings_table(self):
self.sql_ddl("""create table if not exists "__UserSettings" (
"user" VARCHAR(180) NOT NULL,
"doctype" VARCHAR(180) NOT NULL,
"data" TEXT,
UNIQUE ("user", "doctype")
)""")
def create_help_table(self):
self.sql('''CREATE TABLE "help"(
"path" varchar(255),
"content" text,
"title" text,
"intro" text,
"full_path" text)''')
self.sql('''CREATE INDEX IF NOT EXISTS "help_index" ON "help" ("path")''')
def updatedb(self, doctype, meta=None):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = self.sql("select issingle from `tabDocType` where name='{}'".format(doctype))
if not res:
raise Exception('Wrong doctype {0} in updatedb'.format(doctype))
if not res[0][0]:
db_table = PostgresTable(doctype, meta)
db_table.validate()
self.commit()
db_table.sync()
self.begin()
@staticmethod
def get_on_duplicate_update(key='name'):
if isinstance(key, list):
key = '", "'.join(key)
return 'ON CONFLICT ("{key}") DO UPDATE SET '.format(
key=key
)
def check_transaction_status(self, query):
pass
def has_index(self, table_name, index_name):
return self.sql("""SELECT 1 FROM pg_indexes WHERE tablename='{table_name}'
and indexname='{index_name}' limit 1""".format(table_name=table_name, index_name=index_name))
def add_index(self, doctype, fields, index_name=None):
"""Creates an index with given fields if not already created.
Index name will be `fieldname1_fieldname2_index`"""
index_name = index_name or self.get_index_name(fields)
table_name = 'tab' + doctype
self.commit()
self.sql("""CREATE INDEX IF NOT EXISTS "{}" ON `{}`("{}")""".format(index_name, table_name, '", "'.join(fields)))
def add_unique(self, doctype, fields, constraint_name=None):
if isinstance(fields, string_types):
fields = [fields]
if not constraint_name:
constraint_name = "unique_" + "_".join(fields)
if not self.sql("""
SELECT CONSTRAINT_NAME
FROM information_schema.TABLE_CONSTRAINTS
WHERE table_name=%s
AND constraint_type='UNIQUE'
AND CONSTRAINT_NAME=%s""",
('tab' + doctype, constraint_name)):
self.commit()
self.sql("""ALTER TABLE `tab%s`
ADD CONSTRAINT %s UNIQUE (%s)""" % (doctype, constraint_name, ", ".join(fields)))
def get_table_columns_description(self, table_name):
"""Returns list of column and its description"""
# pylint: disable=W1401
return self.sql('''
SELECT a.column_name AS name,
CASE LOWER(a.data_type)
WHEN 'character varying' THEN CONCAT('varchar(', a.character_maximum_length ,')')
WHEN 'timestamp without time zone' THEN 'timestamp'
ELSE a.data_type
END AS type,
COUNT(b.indexdef) AS Index,
SPLIT_PART(COALESCE(a.column_default, NULL), '::', 1) AS default,
BOOL_OR(b.unique) AS unique
FROM information_schema.columns a
LEFT JOIN
(SELECT indexdef, tablename, indexdef LIKE '%UNIQUE INDEX%' AS unique
FROM pg_indexes
WHERE tablename='{table_name}') b
ON SUBSTRING(b.indexdef, '\(.*\)') LIKE CONCAT('%', a.column_name, '%')
WHERE a.table_name = '{table_name}'
GROUP BY a.column_name, a.data_type, a.column_default, a.character_maximum_length;'''
.format(table_name=table_name), as_dict=1)
def get_database_list(self, target):
return [d[0] for d in self.sql("SELECT datname FROM pg_database;")]
def modify_query(query):
""""Modifies query according to the requirements of postgres"""
# replace ` with " for definitions
query = query.replace('`', '"')
query = replace_locate_with_strpos(query)
# select from requires ""
if re.search('from tab', query, flags=re.IGNORECASE):
query = re.sub('from tab([a-zA-Z]*)', r'from "tab\1"', query, flags=re.IGNORECASE)
return query
def replace_locate_with_strpos(query):
# strpos is the locate equivalent in postgres
if re.search(r'locate\(', query, flags=re.IGNORECASE):
query = re.sub(r'locate\(([^,]+),([^)]+)\)', r'strpos(\2, \1)', query, flags=re.IGNORECASE)
return query
| |
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines ranking metrics as TF ops.
The metrics here are meant to be used during the TF training. That is, a batch
of instances in the Tensor format are evaluated by ops. It works with listwise
Tensors only.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import tensorflow as tf
from tensorflow_ranking.python import metrics_impl
from tensorflow_ranking.python import utils
_DEFAULT_GAIN_FN = lambda label: tf.pow(2.0, label) - 1
_DEFAULT_RANK_DISCOUNT_FN = lambda rank: tf.math.log(2.) / tf.math.log1p(rank)
class RankingMetricKey(object):
"""Ranking metric key strings."""
# Mean Reciprocal Rank. For binary relevance.
MRR = 'mrr'
# Average Relevance Position.
ARP = 'arp'
# Normalized Discounted Cumulative Gain.
NDCG = 'ndcg'
# Discounted Cumulative Gain.
DCG = 'dcg'
# Precision. For binary relevance.
PRECISION = 'precision'
# Recall. For binary relevance.
RECALL = 'recall'
# Mean Average Precision. For binary relevance.
MAP = 'map'
# PrecisionIA. For binary relevance of subtopics.
PRECISION_IA = 'precision_ia'
# Ordered Pair Accuracy.
ORDERED_PAIR_ACCURACY = 'ordered_pair_accuracy'
# Alpha Discounted Cumulative Gain.
ALPHA_DCG = 'alpha_dcg'
# Binary Preference.
BPREF = 'bpref'
def compute_mean(metric_key,
labels,
predictions,
weights=None,
topn=None,
name=None):
"""Returns the mean of the specified metric given the inputs.
Args:
metric_key: A key in `RankingMetricKey`.
labels: A `Tensor` of the same shape as `predictions` representing
relevance.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: An `integer` specifying the cutoff of how many items are considered in
the metric.
name: A `string` used as the name for this metric.
Returns:
A scalar as the computed metric.
"""
metric_dict = {
RankingMetricKey.ARP: metrics_impl.ARPMetric(metric_key),
RankingMetricKey.MRR: metrics_impl.MRRMetric(metric_key, topn),
RankingMetricKey.NDCG: metrics_impl.NDCGMetric(name, topn),
RankingMetricKey.DCG: metrics_impl.DCGMetric(name, topn),
RankingMetricKey.PRECISION: metrics_impl.PrecisionMetric(name, topn),
RankingMetricKey.RECALL: metrics_impl.RecallMetric(name, topn),
RankingMetricKey.MAP: metrics_impl.MeanAveragePrecisionMetric(name, topn),
RankingMetricKey.ORDERED_PAIR_ACCURACY: metrics_impl.OPAMetric(name),
RankingMetricKey.BPREF: metrics_impl.BPrefMetric(name, topn),
}
assert metric_key in metric_dict, ('metric_key %s not supported.' %
metric_key)
# TODO: Add mask argument for metric.compute() call
metric, weight = metric_dict[metric_key].compute(labels, predictions, weights)
return tf.compat.v1.div_no_nan(
tf.reduce_sum(input_tensor=metric * weight),
tf.reduce_sum(input_tensor=weight))
def make_ranking_metric_fn(metric_key,
weights_feature_name=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN,
**kwargs):
"""Factory method to create a ranking metric function.
Args:
metric_key: A key in `RankingMetricKey`.
weights_feature_name: A `string` specifying the name of the weights feature
in `features` dict.
topn: An `integer` specifying the cutoff of how many items are considered in
the metric.
name: A `string` used as the name for this metric.
gain_fn: (function) Transforms labels. A method to calculate gain parameters
used in the definitions of the DCG and NDCG metrics, where the input is
the relevance label of the item. The gain is often defined to be of the
form 2^label-1.
rank_discount_fn: (function) The rank discount function. A method to define
the discount parameters used in the definitions of DCG and NDCG metrics,
where the input in the rank of item. The discount function is commonly
defined to be of the form log(rank+1).
**kwargs: Other keyword arguments (e.g. alpha, seed).
Returns:
A metric fn with the following Args:
* `labels`: A `Tensor` of the same shape as `predictions` representing
graded relevance.
* `predictions`: A `Tensor` with shape [batch_size, list_size]. Each value
is the ranking score of the corresponding example.
* `features`: A dict of `Tensor`s that contains all features.
"""
def _get_weights(features):
"""Get weights tensor from features and reshape it to 2-D if necessary."""
weights = None
if weights_feature_name:
weights = tf.convert_to_tensor(value=features[weights_feature_name])
# Convert weights to a 2-D Tensor.
weights = utils.reshape_to_2d(weights)
return weights
def _average_relevance_position_fn(labels, predictions, features):
"""Returns average relevance position as the metric."""
return average_relevance_position(
labels, predictions, weights=_get_weights(features), name=name)
def _mean_reciprocal_rank_fn(labels, predictions, features):
"""Returns mean reciprocal rank as the metric."""
return mean_reciprocal_rank(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _normalized_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns normalized discounted cumulative gain as the metric."""
return normalized_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns discounted cumulative gain as the metric."""
return discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _precision_fn(labels, predictions, features):
"""Returns precision as the metric."""
return precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _recall_fn(labels, predictions, features):
"""Returns recall as the metric."""
return recall(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _mean_average_precision_fn(labels, predictions, features):
"""Returns mean average precision as the metric."""
return mean_average_precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _precision_ia_fn(labels, predictions, features):
"""Returns an intent-aware precision as the metric."""
return precision_ia(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _ordered_pair_accuracy_fn(labels, predictions, features):
"""Returns ordered pair accuracy as the metric."""
return ordered_pair_accuracy(
labels, predictions, weights=_get_weights(features), name=name)
def _alpha_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns alpha discounted cumulative gain as the metric."""
return alpha_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
rank_discount_fn=rank_discount_fn,
**kwargs)
def _binary_preference_fn(labels, predictions, features):
"""Returns binary preference as the metric."""
return binary_preference(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
**kwargs)
metric_fn_dict = {
RankingMetricKey.ARP: _average_relevance_position_fn,
RankingMetricKey.MRR: _mean_reciprocal_rank_fn,
RankingMetricKey.NDCG: _normalized_discounted_cumulative_gain_fn,
RankingMetricKey.DCG: _discounted_cumulative_gain_fn,
RankingMetricKey.RECALL: _recall_fn,
RankingMetricKey.PRECISION: _precision_fn,
RankingMetricKey.MAP: _mean_average_precision_fn,
RankingMetricKey.PRECISION_IA: _precision_ia_fn,
RankingMetricKey.ORDERED_PAIR_ACCURACY: _ordered_pair_accuracy_fn,
RankingMetricKey.ALPHA_DCG: _alpha_discounted_cumulative_gain_fn,
RankingMetricKey.BPREF: _binary_preference_fn,
}
assert metric_key in metric_fn_dict, ('metric_key %s not supported.' %
metric_key)
return metric_fn_dict[metric_key]
def mean_reciprocal_rank(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes mean reciprocal rank (MRR).
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: An integer cutoff specifying how many examples to consider for this
metric. If None, the whole list is considered.
name: A string used as the name for this metric.
Returns:
A metric for the weighted mean reciprocal rank of the batch.
"""
metric = metrics_impl.MRRMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'mean_reciprocal_rank',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
mrr, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(mrr, per_list_weights)
def average_relevance_position(labels, predictions, weights=None, name=None):
"""Computes average relevance position (ARP).
This can also be named as average_relevance_rank, but this can be confusing
with mean_reciprocal_rank in acronyms. This name is more distinguishing and
has been used historically for binary relevance as average_click_position.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
name: A string used as the name for this metric.
Returns:
A metric for the weighted average relevance position.
"""
metric = metrics_impl.ARPMetric(name)
with tf.compat.v1.name_scope(metric.name, 'average_relevance_position',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_arp, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_arp, per_list_weights)
def precision(labels, predictions, weights=None, topn=None, name=None):
"""Computes precision as weighted average of relevant examples.
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted precision of the batch.
"""
metric = metrics_impl.PrecisionMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'precision',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
precision_at_k, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(precision_at_k, per_list_weights)
def recall(labels, predictions, weights=None, topn=None, name=None):
"""Computes recall as weighted average of relevant examples.
Args:
labels: A `Tensor` as the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` as the same shape as predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted recall of the batch.
"""
metric = metrics_impl.RecallMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'recall',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call.
recall_at_k, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(recall_at_k, per_list_weights)
def mean_average_precision(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes mean average precision (MAP).
The implementation of MAP is based on Equation (1.7) in the following:
Liu, T-Y "Learning to Rank for Information Retrieval" found at
https://www.nowpublishers.com/article/DownloadSummary/INR-016
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the mean average precision.
"""
metric = metrics_impl.MeanAveragePrecisionMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'mean_average_precision',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_map, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_map, per_list_weights)
def precision_ia(labels, predictions, weights=None, topn=None, name=None):
"""Computes Intent-Aware Precision as weighted average of relevant examples.
Args:
labels: A `Tensor` with shape [batch_size, list_size, subtopic_size]. A
nonzero value means that the example covers the corresponding subtopic.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted precision of the batch.
"""
metric = metrics_impl.PrecisionIAMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'precision_ia',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
precision_at_k, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(precision_at_k, per_list_weights)
def normalized_discounted_cumulative_gain(
labels,
predictions,
weights=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN):
"""Computes normalized discounted cumulative gain (NDCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
gain_fn: (function) Transforms labels. Note that this implementation of
NDCG assumes that this function is *increasing* as a function of its
imput.
rank_discount_fn: (function) The rank discount function. Note that this
implementation of NDCG assumes that this function is *decreasing* as a
function of its imput.
Returns:
A metric for the weighted normalized discounted cumulative gain of the
batch.
"""
metric = metrics_impl.NDCGMetric(name, topn, gain_fn, rank_discount_fn)
with tf.compat.v1.name_scope(metric.name,
'normalized_discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_ndcg, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_ndcg, per_list_weights)
def discounted_cumulative_gain(labels,
predictions,
weights=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN):
"""Computes discounted cumulative gain (DCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
gain_fn: (function) Transforms labels.
rank_discount_fn: (function) The rank discount function.
Returns:
A metric for the weighted discounted cumulative gain of the batch.
"""
metric = metrics_impl.DCGMetric(name, topn, gain_fn, rank_discount_fn)
with tf.compat.v1.name_scope(name, 'discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
dcg, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(dcg, per_list_weights)
def alpha_discounted_cumulative_gain(
labels,
predictions,
weights=None,
topn=None,
name=None,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN,
alpha=0.5,
seed=None):
"""Computes alpha discounted cumulative gain (alpha-DCG).
Args:
labels: A `Tensor` with shape [batch_size, list_size, subtopic_size]. Each
value represents graded relevance to a subtopic: 1 for relevent subtopic,
0 for irrelevant, and -1 for paddings. When the actual subtopic number
of a query is smaller than the `subtopic_size`, `labels` will be padded
to `subtopic_size` with -1, similar to the paddings used for queries
with doc number less then list_size.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of shape [batch_size, list_size] or [batch_size, 1].
They are per-example and per-list, respectively.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
rank_discount_fn: A function of rank discounts. Default is set to
discount = 1 / log2(rank+1).
alpha: A float between 0 and 1. Originally introduced as an assessor error
in judging whether a document is covering a subtopic of the query. It
can also be interpreted as the inverse number of documents covering the
same subtopic reader needs to get and confirm the subtopic information
of a query.
seed: The ops-level random seed used in shuffle ties in `sort_by_scores`.
Returns:
A metric for the weighted alpha discounted cumulative gain of the batch.
"""
metric = metrics_impl.AlphaDCGMetric(name, topn, alpha=alpha,
rank_discount_fn=rank_discount_fn,
seed=seed)
with tf.compat.v1.name_scope(name, 'alpha_discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
alpha_dcg, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(alpha_dcg, per_list_weights)
def ordered_pair_accuracy(labels, predictions, weights=None, name=None):
"""Computes the percentage of correctly ordered pair.
For any pair of examples, we compare their orders determined by `labels` and
`predictions`. They are correctly ordered if the two orders are compatible.
That is, labels l_i > l_j and predictions s_i > s_j and the weight for this
pair is the weight from the l_i.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
name: A string used as the name for this metric.
Returns:
A metric for the accuracy or ordered pairs.
"""
metric = metrics_impl.OPAMetric(name)
with tf.compat.v1.name_scope(metric.name, 'ordered_pair_accuracy',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_opa, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_opa, per_list_weights)
def binary_preference(labels,
predictions,
weights=None,
topn=None,
name=None,
use_trec_version=True):
"""Computes binary preference (BPref).
The implementation of BPref is based on the desciption in the following:
https://trec.nist.gov/pubs/trec15/appendices/CE.MEASURES06.pdf
BPref = 1 / R SUM_r(1 - |n ranked higher than r| / min(R, N))
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
use_trec_version: A boolean to choose the version of the formula to use.
If False, than the alternative BPref formula will be used:
BPref = 1 / R SUM_r(1 - |n ranked higher than r| / R)
Returns:
A metric for binary preference metric of the batch.
"""
metric = metrics_impl.BPrefMetric(
name, topn, use_trec_version=use_trec_version)
with tf.compat.v1.name_scope(metric.name, 'binary_preference',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_bpref, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_bpref, per_list_weights)
def eval_metric(metric_fn, **kwargs):
"""A stand-alone method to evaluate metrics on ranked results.
Note that this method requires for the arguments of the metric to called
explicitly. So, the correct usage is of the following form:
tfr.metrics.eval_metric(tfr.metrics.mean_reciprocal_rank,
labels=my_labels,
predictions=my_scores).
Here is a simple example showing how to use this method:
import tensorflow_ranking as tfr
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 2.]]
weights = [[1., 2., 3.], [4., 5., 6.]]
tfr.metrics.eval_metric(
metric_fn=tfr.metrics.mean_reciprocal_rank,
labels=labels,
predictions=scores,
weights=weights)
Args:
metric_fn: (function) Metric definition. A metric appearing in the
TF-Ranking metrics module, e.g. tfr.metrics.mean_reciprocal_rank
**kwargs: A collection of argument values to be passed to the metric, e.g.
labels and predictions. See `_RankingMetric` and the various metric
definitions in tfr.metrics for the specifics.
Returns:
The evaluation of the metric on the input ranked lists.
Raises:
ValueError: One of the arguments required by the metric is not provided in
the list of arguments included in kwargs.
"""
metric_spec = inspect.getargspec(metric_fn)
metric_args = metric_spec.args
required_metric_args = (metric_args[:-len(metric_spec.defaults)])
for arg in required_metric_args:
if arg not in kwargs:
raise ValueError('Metric %s requires argument %s.' %
(metric_fn.__name__, arg))
args = {}
for arg in kwargs:
if arg not in metric_args:
raise ValueError('Metric %s does not accept argument %s.' %
(metric_fn.__name__, arg))
args[arg] = kwargs[arg]
with tf.compat.v1.Session() as sess:
metric_op, update_op = metric_fn(**args)
sess.run(tf.compat.v1.local_variables_initializer())
sess.run([metric_op, update_op])
return sess.run(metric_op)
| |
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import itertools
import uuid
import mock
from oslo_policy import policy as oslo_policy
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_virtual_interface
from nova.tests import uuidsentinel as uuids
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': uuids.instance,
'network_info': '[]',
}
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
@mock.patch.object(policy, 'enforce')
def test_check_policy(self, mock_enforce):
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
api.check_policy(self.context, 'get_all')
mock_enforce.assert_called_once_with(
self.context, 'network:get_all', target)
def test_skip_policy(self):
policy.reset()
rules = {'network:get_all': '!'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
api = network.API()
self.assertRaises(exception.PolicyNotAuthorized,
api.get_all, self.context)
api = network.API(skip_policy_check=True)
api.get_all(self.context)
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
fakes.FAKE_PROJECT_ID)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_liberal(self, mock_get_all):
self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only="allow_none")
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, uuids.instance))
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = uuids.network_1
instance = objects.Instance(uuid=uuids.instance)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(uuids.network_1, vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, uuids.instance)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=uuids.network_1)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(uuids.network_1, vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
with mock.patch.object(self.network_api.network_rpcapi,
"allocate_for_instance") as mock_alloc:
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn',
'macs', 'dhcp_options'],
itertools.repeat(mock.ANY)))
mock_alloc.return_value = []
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
instance = objects.Instance(id=1, uuid=uuids.instance,
project_id='project_id',
host='host', system_metadata={},
flavor=flavor)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
mock_alloc.assert_called_once_with(self.context, **kwargs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = objects.Instance(uuid=FAKE_UUID)
def fake_associate(*args, **kwargs):
return orig_instance_uuid
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
if instance_uuid == orig_instance_uuid:
self.assertIn('extra.flavor', columns_to_join)
return fake_instance.fake_db_instance(uuid=instance_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
if orig_instance_uuid:
expected_updated_instances = [new_instance.uuid,
orig_instance_uuid]
else:
expected_updated_instances = [new_instance.uuid]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
return fake_info_cache
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
with test.nested(
mock.patch.object(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate),
mock.patch.object(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid),
mock.patch.object(self.network_api, '_get_instance_nw_info',
fake_get_nw_info),
mock.patch.object(self.network_api.db,
'instance_info_cache_update',
fake_instance_info_cache_update),
mock.patch.object(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
):
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip(uuids.orig_uuid)
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
fake_instance = objects.Instance(
uuid=uuid.uuid4().hex,
project_id='fake_project_id',
instance_type_id=fake_flavor['id'],
flavor=fake_flavor,
system_metadata={})
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_get_multi_addresses(*args, **kwargs):
return multi_host, ['fake_float1', 'fake_float2']
self.stub_out('nova.network.rpcapi.NetworkAPI.' + method,
fake_mig_inst_method)
self.stub_out('nova.network.api.API._get_multi_addresses',
fake_get_multi_addresses)
expected = {'instance_uuid': fake_instance.uuid,
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
with mock.patch.object(self.network_api.db, 'fixed_ip_get_by_instance',
side_effect=exception.FixedIpNotFoundForInstance(
instance_uuid=uuid)):
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = (
self.network_api._get_multi_addresses(self.context, instance))
self.assertFalse(result)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with test.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
mock.patch.object(network_model.NetworkInfo, 'hydrate'),
mock.patch.object(objects.InstanceInfoCache, 'save'),
) as (
method_mock, nwinfo_mock, hydrate_mock, save_mock
):
nw_info = network_model.NetworkInfo([])
method_mock.return_value = nw_info
hydrate_mock.return_value = nw_info
getattr(self.network_api, method)(*args, **kwargs)
hydrate_mock.assert_called_once_with(nw_info)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
vpn = 'fake-vpn'
requested_networks = 'fake-networks'
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
def test_add_fixed_ip_to_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = uuids.instance)
self.assertEqual(uuids.instance,
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertIsNone(
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API.migrate_instance_start')
def test_cleanup_instance_network_on_host(self, fake_migrate_start):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.cleanup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_start.assert_called_once_with(
self.context, instance,
{'source_compute': 'fake_compute_source', 'dest_compute': None})
@mock.patch('nova.network.api.API.migrate_instance_finish')
def test_setup_instance_network_on_host(self, fake_migrate_finish):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.setup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_finish.assert_called_once_with(
self.context, instance,
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(api.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock):
fake_result = mock.sentinel.get_nw_info_result
mock_get.return_value = fake_result
instance = fake_instance.fake_instance_obj(self.context)
result = self.network_api.get_instance_nw_info(self.context, instance)
mock_get.assert_called_once_with(self.context, instance)
mock_update.assert_called_once_with(self.network_api, self.context,
instance, nw_info=fake_result,
update_cells=False)
self.assertEqual(fake_result, result)
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update', return_value=fake_info_cache)
class TestUpdateInstanceCache(test.NoDBTestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(uuid=FAKE_UUID)
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_one_network(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_empty_list(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance,
network_model.NetworkInfo([]))
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_object(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
| |
"""
@package mi.instrument.um.thsph.thsph.test.test_driver
@file marine-integrations/mi/instrument/um/thsph/ooicore/driver.py
@author Richard Han
@brief Test cases for thsph driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Richard Han'
__license__ = 'Apache 2.0'
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey, DriverStartupConfigKey
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import AgentCapabilityType
from mi.core.exceptions import SampleException
from mi.core.instrument.chunker import StringChunker
from mi.instrument.um.thsph.ooicore.driver import InstrumentDriver, THSPHDataParticleKey, THSPHParticle
from mi.instrument.um.thsph.ooicore.driver import DataParticleType
from mi.instrument.um.thsph.ooicore.driver import Command
from mi.instrument.um.thsph.ooicore.driver import ProtocolState
from mi.instrument.um.thsph.ooicore.driver import ProtocolEvent
from mi.instrument.um.thsph.ooicore.driver import Capability
from mi.instrument.um.thsph.ooicore.driver import Parameter
from mi.instrument.um.thsph.ooicore.driver import THSPHProtocol
from mi.instrument.um.thsph.ooicore.driver import Prompt
from mi.instrument.um.thsph.ooicore.driver import NEWLINE
from mi.core.instrument.instrument_driver import ResourceAgentState
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.um.thsph.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='WHSSRV',
instrument_agent_name='um_thsph_ooicore',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.INTERVAL: 6,
Parameter.INSTRUMENT_SERIES: 'A',
}
}
)
GO_ACTIVE_TIMEOUT = 180
TEST_POLLED_INTERVAL = 12
TEST_INSTRUMENT_SERIES = 'A'
TEST_INVALID_POLLED_INTERVAL = 601
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
COMM_TEST_RESPONSE = "AP*" + NEWLINE
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class THSPHMixinSub(DriverTestMixin):
InstrumentDriver = InstrumentDriver
"""
Mixin class used for storing data particle constants and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
INVALID_SAMPLE_01 = "This is an invalid sample; it had better cause an exception."
INVALID_SAMPLE_02 = "GG200A200720DE20AA10883FFF2211225E?"
VALID_SAMPLE_01 = "aH200A200720DE20AA10883FFF2211225E#"
VALID_SAMPLE_02 = "aH200A200720E120AB108A3FFF21FF2420#"
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.INSTRUMENT_SERIES: {TYPE: str, READONLY: False, DA: False, STARTUP: True},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.GET: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.SET: {STATES: [ProtocolState.COMMAND]},
}
_sample_parameters = {
THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_1: {TYPE: int, VALUE: 8202, REQUIRED: True},
THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_2: {TYPE: int, VALUE: 8199, REQUIRED: True},
THSPHDataParticleKey.H2_ELECTRODE: {TYPE: int, VALUE: 8414, REQUIRED: True},
THSPHDataParticleKey.S2_ELECTRODE: {TYPE: int, VALUE: 8362, REQUIRED: True},
THSPHDataParticleKey.THERMOCOUPLE1: {TYPE: int, VALUE: 4232, REQUIRED: True},
THSPHDataParticleKey.THERMOCOUPLE2: {TYPE: int, VALUE: 16383, REQUIRED: True},
THSPHDataParticleKey.REFERENCE_THERMISTOR: {TYPE: int, VALUE: 8721, REQUIRED: True},
THSPHDataParticleKey.BOARD_THERMISTOR: {TYPE: int, VALUE: 8798, REQUIRED: True},
}
_sample_parameters_2 = {
THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_1: {TYPE: int, VALUE: 8202, REQUIRED: True},
THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_2: {TYPE: int, VALUE: 8199, REQUIRED: True},
THSPHDataParticleKey.H2_ELECTRODE: {TYPE: int, VALUE: 8417, REQUIRED: True},
THSPHDataParticleKey.S2_ELECTRODE: {TYPE: int, VALUE: 8363, REQUIRED: True},
THSPHDataParticleKey.THERMOCOUPLE1: {TYPE: int, VALUE: 4234, REQUIRED: True},
THSPHDataParticleKey.THERMOCOUPLE2: {TYPE: int, VALUE: 16383, REQUIRED: True},
THSPHDataParticleKey.REFERENCE_THERMISTOR: {TYPE: int, VALUE: 8703, REQUIRED: True},
THSPHDataParticleKey.BOARD_THERMISTOR: {TYPE: int, VALUE: 9248, REQUIRED: True},
}
def assert_particle_sample(self, data_particle, verify_values=False):
"""
Verify sample particle
@param data_particle: THSPHDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(THSPHDataParticleKey, self._sample_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.THSPH_PARSED,
require_instrument_timestamp=False)
self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values)
def assert_particle_sample2(self, data_particle, verify_values=False):
"""
Verify sample particle
@param data_particle: THSPHDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(THSPHDataParticleKey, self._sample_parameters_2)
self.assert_data_particle_header(data_particle, DataParticleType.THSPH_PARSED,
require_instrument_timestamp=False)
self.assert_data_particle_parameters(data_particle, self._sample_parameters_2, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, THSPHMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(Command())
# Test capabilities for duplicates, then verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(THSPHProtocol.sieve_function)
self.assert_chunker_sample(chunker, self.VALID_SAMPLE_01)
self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE_01)
self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE_01)
self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE_01)
self.assert_chunker_sample(chunker, self.VALID_SAMPLE_02)
self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE_02)
self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE_02)
self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE_02)
def test_corrupt_data_sample(self):
for particle in (THSPHParticle(self.INVALID_SAMPLE_01),
THSPHParticle(self.INVALID_SAMPLE_02)):
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_SAMPLE_01, self.assert_particle_sample, True)
self.assert_particle_published(driver, self.VALID_SAMPLE_02, self.assert_particle_sample2, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = THSPHProtocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.COMMAND: [ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_DIRECT,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_SAMPLE],
ProtocolState.AUTOSAMPLE: [ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.SCHEDULE_ACQUIRE_SAMPLE],
ProtocolState.DIRECT_ACCESS: [ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT],
ProtocolState.UNKNOWN: [ProtocolEvent.DISCOVER]
}
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, THSPHMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def test_connection(self):
self.assert_initialize_driver()
def test_get(self):
self.assert_initialize_driver()
self.assert_get(Parameter.INTERVAL)
self.assert_get(Parameter.INSTRUMENT_SERIES)
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
self.assert_set(Parameter.INTERVAL, TEST_POLLED_INTERVAL)
self.assert_set(Parameter.INSTRUMENT_SERIES, TEST_INSTRUMENT_SERIES)
self.assert_set_exception(Parameter.INTERVAL, TEST_INVALID_POLLED_INTERVAL)
def test_data_on(self):
"""
@brief Test for turning data on
"""
self.assert_initialize_driver()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE,
DataParticleType.THSPH_PARSED,
self.assert_particle_sample,
delay=15)
def test_autosample_on(self):
"""
@brief Test for turning data on
"""
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.THSPH_PARSED,
self.assert_particle_sample,
particle_count=2,
timeout=20)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_direct_access(self):
"""
Verify we can enter the direct access state
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
self.assert_state_change(ProtocolState.COMMAND, 5)
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.START_DIRECT)
self.assert_state_change(ProtocolState.DIRECT_ACCESS, 5)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, THSPHMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly supports direct access to the physical
instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(THSPHProtocol.COMM_TEST_SERIES_A + NEWLINE)
self.tcp_client.expect(COMM_TEST_RESPONSE)
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 10)
def test_sample_particles(self):
self.assert_sample_autosample(self.assert_particle_sample, DataParticleType.THSPH_PARSED)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_SAMPLE,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE
]
self.assert_start_autosample()
self.assert_capabilities(capabilities)
self.assert_stop_autosample()
def test_discover(self):
"""
over-ridden because instrument doesn't actually have a autosample mode and therefore
driver will always go to command mode during the discover process after a reset.
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver and cause it to re-discover which
# will always go back to command for this instrument
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
self.assert_enter_command_mode()
self.assert_start_autosample()
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.INTERVAL, TEST_POLLED_INTERVAL, verify=True)
self.assert_set_parameter(Parameter.INSTRUMENT_SERIES, TEST_INSTRUMENT_SERIES, verify=True)
| |
# -*- coding: utf-8 -*-
'''Base TestCase class for OSF unittests. Uses a temporary MongoDB database.'''
import os
import re
import shutil
import logging
import unittest
import functools
import datetime as dt
from flask import g
from json import dumps
import blinker
import httpretty
from webtest_plus import TestApp
from webtest.utils import NoDefault
import mock
from faker import Factory
from nose.tools import * # noqa (PEP8 asserts)
from pymongo.errors import OperationFailure
from modularodm import storage
from api.base.wsgi import application as django_app
from framework.mongo import set_up_storage
from framework.auth import User
from framework.sessions.model import Session
from framework.guid.model import Guid
from framework.mongo import client as client_proxy
from framework.mongo import database as database_proxy
from framework.transactions import commands, messages, utils
from website.project.model import (
Node, NodeLog, Tag, WatchConfig,
)
from website import settings
from website.addons.wiki.model import NodeWikiPage
import website.models
from website.signals import ALL_SIGNALS
from website.project.signals import contributor_added
from website.app import init_app
from website.addons.base import AddonConfig
from website.project.views.contributor import notify_added_contributor
# Just a simple app without routing set up or backends
test_app = init_app(
settings_module='website.settings', routes=True, set_backends=False,
)
test_app.testing = True
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'factory.generate',
'factory.containers',
'website.search.elastic_search',
'framework.auth.core',
'website.mails',
'website.search_migration.migrate',
'website.util.paths',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# Fake factory
fake = Factory.create()
# All Models
MODELS = (User, Node, NodeLog, NodeWikiPage,
Tag, WatchConfig, Session, Guid)
def teardown_database(client=None, database=None):
client = client or client_proxy
database = database or database_proxy
try:
commands.rollback(database)
except OperationFailure as error:
message = utils.get_error_message(error)
if messages.NO_TRANSACTION_ERROR not in message:
raise
client.drop_database(database)
class DbTestCase(unittest.TestCase):
"""Base `TestCase` for tests that require a scratch database.
"""
DB_NAME = getattr(settings, 'TEST_DB_NAME', 'osf_test')
# dict of addons to inject into the app.
ADDONS_UNDER_TEST = {}
# format: {
# <addon shortname>: {
# 'user_settings': <AddonUserSettingsBase instance>,
# 'node_settings': <AddonNodeSettingsBase instance>,
#}
# list of AddonConfig instances of injected addons
__ADDONS_UNDER_TEST = []
@classmethod
def setUpClass(cls):
super(DbTestCase, cls).setUpClass()
for (short_name, options) in cls.ADDONS_UNDER_TEST.iteritems():
cls.__ADDONS_UNDER_TEST.append(
init_mock_addon(short_name, **options)
)
cls._original_db_name = settings.DB_NAME
settings.DB_NAME = cls.DB_NAME
cls._original_piwik_host = settings.PIWIK_HOST
settings.PIWIK_HOST = None
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
cls._original_bcrypt_log_rounds = settings.BCRYPT_LOG_ROUNDS
settings.BCRYPT_LOG_ROUNDS = 1
teardown_database(database=database_proxy._get_current_object())
# TODO: With `database` as a `LocalProxy`, we should be able to simply
# this logic
set_up_storage(
website.models.MODELS,
storage.MongoStorage,
addons=settings.ADDONS_AVAILABLE,
)
cls.db = database_proxy
@classmethod
def tearDownClass(cls):
super(DbTestCase, cls).tearDownClass()
for addon in cls.__ADDONS_UNDER_TEST:
remove_mock_addon(addon)
teardown_database(database=database_proxy._get_current_object())
settings.DB_NAME = cls._original_db_name
settings.PIWIK_HOST = cls._original_piwik_host
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
settings.BCRYPT_LOG_ROUNDS = cls._original_bcrypt_log_rounds
class AppTestCase(unittest.TestCase):
"""Base `TestCase` for OSF tests that require the WSGI app (but no database).
"""
DISCONNECTED_SIGNALS = {
# disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor]
}
def setUp(self):
super(AppTestCase, self).setUp()
self.app = TestApp(test_app)
self.context = test_app.test_request_context()
self.context.push()
with self.context:
g._celery_tasks = []
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
def tearDown(self):
super(AppTestCase, self).tearDown()
with mock.patch('website.mailchimp_utils.get_mailchimp_api'):
self.context.pop()
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.connect(receiver)
class ApiAppTestCase(unittest.TestCase):
"""Base `TestCase` for OSF API tests that require the WSGI app (but no database).
"""
def setUp(self):
super(ApiAppTestCase, self).setUp()
self.app = TestAppJSONAPI(django_app)
class TestAppJSONAPI(TestApp):
"""
Extends TestApp to add json_api_methods(post, put, patch, and delete)
which put content_type 'application/vnd.api+json' in header. Adheres to
JSON API spec.
"""
def __init__(self, app, *args, **kwargs):
super(TestAppJSONAPI, self).__init__(app, *args, **kwargs)
self.auth = None
self.auth_type = 'basic'
def json_api_method(method):
def wrapper(self, url, params=NoDefault, **kw):
content_type = 'application/vnd.api+json'
if params is not NoDefault:
params = dumps(params, cls=self.JSONEncoder)
kw.update(
params=params,
content_type=content_type,
upload_files=None,
)
return self._gen_request(method, url, **kw)
subst = dict(lmethod=method.lower(), method=method)
wrapper.__name__ = str('%(lmethod)s_json_api' % subst)
return wrapper
post_json_api = json_api_method('POST')
put_json_api = json_api_method('PUT')
patch_json_api = json_api_method('PATCH')
delete_json_api = json_api_method('DELETE')
class UploadTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Store uploads in temp directory.
"""
super(UploadTestCase, cls).setUpClass()
cls._old_uploads_path = settings.UPLOADS_PATH
cls._uploads_path = os.path.join('/tmp', 'osf', 'uploads')
try:
os.makedirs(cls._uploads_path)
except OSError: # Path already exists
pass
settings.UPLOADS_PATH = cls._uploads_path
@classmethod
def tearDownClass(cls):
"""Restore uploads path.
"""
super(UploadTestCase, cls).tearDownClass()
shutil.rmtree(cls._uploads_path)
settings.UPLOADS_PATH = cls._old_uploads_path
methods = [
httpretty.GET,
httpretty.PUT,
httpretty.HEAD,
httpretty.POST,
httpretty.PATCH,
httpretty.DELETE,
]
def kill(*args, **kwargs):
raise httpretty.errors.UnmockedError
class MockRequestTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(MockRequestTestCase, cls).setUpClass()
httpretty.enable()
for method in methods:
httpretty.register_uri(
method,
re.compile(r'.*'),
body=kill,
priority=-1,
)
def tearDown(self):
super(MockRequestTestCase, self).tearDown()
httpretty.reset()
@classmethod
def tearDownClass(cls):
super(MockRequestTestCase, cls).tearDownClass()
httpretty.reset()
httpretty.disable()
class OsfTestCase(DbTestCase, AppTestCase, UploadTestCase, MockRequestTestCase):
"""Base `TestCase` for tests that require both scratch databases and the OSF
application. Note: superclasses must call `super` in order for all setup and
teardown methods to be called correctly.
"""
pass
class ApiTestCase(DbTestCase, ApiAppTestCase, UploadTestCase, MockRequestTestCase):
"""Base `TestCase` for tests that require both scratch databases and the OSF
API application. Note: superclasses must call `super` in order for all setup and
teardown methods to be called correctly.
"""
def setUp(self):
super(ApiTestCase, self).setUp()
settings.USE_EMAIL = False
# From Flask-Security: https://github.com/mattupstate/flask-security/blob/develop/flask_security/utils.py
class CaptureSignals(object):
"""Testing utility for capturing blinker signals.
Context manager which mocks out selected signals and registers which
are `sent` on and what arguments were sent. Instantiate with a list of
blinker `NamedSignals` to patch. Each signal has its `send` mocked out.
"""
def __init__(self, signals):
"""Patch all given signals and make them available as attributes.
:param signals: list of signals
"""
self._records = {}
self._receivers = {}
for signal in signals:
self._records[signal] = []
self._receivers[signal] = functools.partial(self._record, signal)
def __getitem__(self, signal):
"""All captured signals are available via `ctxt[signal]`.
"""
if isinstance(signal, blinker.base.NamedSignal):
return self._records[signal]
else:
super(CaptureSignals, self).__setitem__(signal)
def _record(self, signal, *args, **kwargs):
self._records[signal].append((args, kwargs))
def __enter__(self):
for signal, receiver in self._receivers.items():
signal.connect(receiver)
return self
def __exit__(self, type, value, traceback):
for signal, receiver in self._receivers.items():
signal.disconnect(receiver)
def signals_sent(self):
"""Return a set of the signals sent.
:rtype: list of blinker `NamedSignals`.
"""
return set([signal for signal, _ in self._records.items() if self._records[signal]])
def capture_signals():
"""Factory method that creates a ``CaptureSignals`` with all OSF signals."""
return CaptureSignals(ALL_SIGNALS)
def assert_is_redirect(response, msg="Response is a redirect."):
assert 300 <= response.status_code < 400, msg
def assert_before(lst, item1, item2):
"""Assert that item1 appears before item2 in lst."""
assert_less(lst.index(item1), lst.index(item2),
'{0!r} appears before {1!r}'.format(item1, item2))
def assert_datetime_equal(dt1, dt2, allowance=500):
"""Assert that two datetimes are about equal."""
assert_less(dt1 - dt2, dt.timedelta(milliseconds=allowance))
def init_mock_addon(short_name, user_settings=None, node_settings=None):
"""Add an addon to the settings, so that it is ready for app init
This is used to inject addons into the application context for tests."""
#Importing within the function to prevent circular import problems.
import factories
user_settings = user_settings or factories.MockAddonUserSettings
node_settings = node_settings or factories.MockAddonNodeSettings
settings.ADDONS_REQUESTED.append(short_name)
addon_config = AddonConfig(
short_name=short_name,
full_name=short_name,
owners=['User', 'Node'],
categories=['Storage'],
user_settings_model=user_settings,
node_settings_model=node_settings,
models=[user_settings, node_settings],
)
settings.ADDONS_AVAILABLE_DICT[addon_config.short_name] = addon_config
settings.ADDONS_AVAILABLE.append(addon_config)
return addon_config
def remove_mock_addon(addon_config):
"""Given an AddonConfig instance, remove that addon from the settings"""
settings.ADDONS_AVAILABLE_DICT.pop(addon_config.short_name, None)
try:
settings.ADDONS_AVAILABLE.remove(addon_config)
except ValueError:
pass
try:
settings.ADDONS_REQUESTED.remove(addon_config.short_name)
except ValueError:
pass
| |
# -*- coding: utf-8 -*-
"""
Django settings for Congress project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'haystack',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
'conference',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "+05$p*y!f97!@f4h8mir4^--cuma4)9-3^0*ah&xq(-#^f)+1e"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Ray Besiga', 'raybesiga@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/congress')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Africa/Nairobi'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
######### HAYSTACK
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'Congress <noreply@congress.ug>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[Congress] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
| |
'''
FileChooser
===========
.. versionadded:: 1.0.5
.. versionchanged:: 1.2.0
In the chooser template, the `controller` is not a direct reference anymore
but a weak-reference.
You must update all the notation `root.controller.xxx` to
`root.controller().xxx`.
Simple example
--------------
main.py
.. include:: ../../examples/RST_Editor/main.py
:literal:
editor.kv
.. highlight:: kv
.. include:: ../../examples/RST_Editor/editor.kv
:literal:
'''
__all__ = ('FileChooserListView', 'FileChooserIconView',
'FileChooserController', 'FileChooserProgressBase',
'FileSystemAbstract', 'FileSystemLocal')
from weakref import ref
from time import time
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.utils import platform as core_platform
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (
StringProperty, ListProperty, BooleanProperty, ObjectProperty,
NumericProperty)
from os import listdir
from os.path import (
basename, join, sep, normpath, expanduser, altsep,
splitdrive, realpath, getsize, isdir)
from fnmatch import fnmatch
import collections
platform = core_platform
filesize_units = ('B', 'KB', 'MB', 'GB', 'TB')
_have_win32file = False
if platform == 'win':
# Import that module here as it's not available on non-windows machines.
# See http://bit.ly/i9klJE except that the attributes are defined in
# win32file not win32com (bug on page).
# Note: For some reason this doesn't work after a os.chdir(), no matter to
# what directory you change from where. Windows weirdness.
try:
from win32file import FILE_ATTRIBUTE_HIDDEN, GetFileAttributesEx, error
_have_win32file = True
except ImportError:
Logger.error('filechooser: win32file module is missing')
Logger.error('filechooser: we cant check if a file is hidden or not')
def alphanumeric_folders_first(files, filesystem):
return (sorted(f for f in files if filesystem.is_dir(f)) +
sorted(f for f in files if not filesystem.is_dir(f)))
class FileSystemAbstract(object):
'''Class for implementing a File System view that can be used with the
:class:`FileChooser`.:data:`~FileChooser.file_system`.
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
'''Return the list of files in the directory `fn`
'''
pass
def getsize(self, fn):
'''Return the size in bytes of a file
'''
pass
def is_hidden(self, fn):
'''Return True if the file is hidden
'''
pass
def is_dir(self, fn):
'''Return True if the argument passed to this method is a directory
'''
pass
class FileSystemLocal(FileSystemAbstract):
'''Implementation of :class:`FileSystemAbstract` for local files
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
return listdir(fn)
def getsize(self, fn):
return getsize(fn)
def is_hidden(self, fn):
if platform == 'win':
if not _have_win32file:
return False
try:
return GetFileAttributesEx(fn)[0] & FILE_ATTRIBUTE_HIDDEN
except error:
# This error can occured when a file is already accessed by
# someone else. So don't return to True, because we have lot
# of chances to not being able to do anything with it.
Logger.exception('unable to access to <%s>' % fn)
return True
return basename(fn).startswith('.')
def is_dir(self, fn):
return isdir(fn)
class ForceUnicodeError(Exception):
pass
class FileChooserProgressBase(FloatLayout):
'''Base for implementing a progress view. This view is used when too many
entries need to be created and are delayed over multiple frames.
.. versionadded:: 1.2.0
'''
path = StringProperty('')
'''Current path of the FileChooser, read-only.
'''
index = NumericProperty(0)
'''Current index of :data:`total` entries to be loaded.
'''
total = NumericProperty(1)
'''Total number of entries to load.
'''
def cancel(self, *largs):
'''Cancel any action from the FileChooserController.
'''
if self.parent:
self.parent.cancel()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_down(touch)
return True
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_move(touch)
return True
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_up(touch)
return True
class FileChooserProgress(FileChooserProgressBase):
pass
class FileChooserController(FloatLayout):
'''Base for implementing a FileChooser. Don't use this class directly, but
prefer using an implementation such as the :class:`FileChooserListView` or
:class:`FileChooserIconView`.
:Events:
`on_entry_added`: entry, parent
Fired when a root-level entry is added to the file list.
`on_entries_cleared`
Fired when the the entries list is cleared, usually when the
root is refreshed.
`on_subentry_to_entry`: entry, parent
Fired when a sub-entry is added to an existing entry.
`on_remove_subentry`: entry, parent
Fired when entries are removed from an entry, usually when
a node is closed.
`on_submit`: selection, touch
Fired when a file has been selected with a double-tap.
'''
_ENTRY_TEMPLATE = None
path = StringProperty('/')
'''
:class:`~kivy.properties.StringProperty`, defaults to the current working
directory as a unicode string. It specifies the path on the filesystem that
this controller should refer to.
'''
filters = ListProperty([])
''':class:`~kivy.properties.ListProperty`, defaults to [], equal to '\*'.
Specifies the filters to be applied to the files in the directory.
The filters are not reset when the path changes. You need to do that
yourself if desired.
There are two kinds of filters: patterns and callbacks.
#. Patterns
e.g. ['\*.png'].
You can use the following patterns:
========== =================================
Pattern Meaning
========== =================================
\* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
========== =================================
#. Callbacks
You can specify a function that will be called for each file. The
callback will be passed the folder and file name as the first and second
parameters respectively. It should return True to indicate a match and
False otherwise.
.. versionchanged:: 1.4.0
If the filter is a callable (function or method), it will be called
with the path and the file name as arguments for each file in the
directory.
The callable should returns True to indicate a match and False overwise.
'''
filter_dirs = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Indicates whether filters should also apply to directories.
'''
sort_func = ObjectProperty(alphanumeric_folders_first)
'''
:class:`~kivy.properties.ObjectProperty`.
Provides a function to be called with a list of filenames, and the
filesystem implementation as the second argument.
Returns a list of filenames sorted for display in the view.
.. versionchanged:: 1.8.0
The signature needs now 2 arguments: first the list of files, second the
filesystem class to use.
'''
files = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
The list of files in the directory specified by path after applying the
filters.
'''
show_hidden = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether hidden files and folders should be shown.
'''
selection = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
Contains the list of files that are currently selected.
'''
multiselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether the user is able to select multiple files or not.
'''
dirselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether directories are valid selections or not.
.. versionadded:: 1.1.0
'''
rootpath = StringProperty(None, allownone=True)
'''
Root path to use instead of the system root path. If set, it will not show
a ".." directory to go up to the root path. For example, if you set
rootpath to /users/foo, the user will be unable to go to /users or to any
other directory not starting with /users/foo.
.. versionadded:: 1.2.0
:class:`~kivy.properties.StringProperty`, defaults to None.
'''
progress_cls = ObjectProperty(FileChooserProgress)
'''Class to use for displaying a progress indicator for filechooser loading.
.. versionadded:: 1.2.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileChooserProgress`.
'''
file_encodings = ListProperty(['utf-8', 'latin1', 'cp1252'])
'''Possible encodings for decoding a filename to unicode. In the case that
the user has a weird filename, undecodable without knowing it's
initial encoding, we have no other choice than to guess it.
Please note that if you encounter an issue because of a missing encoding
here, we'll be glad to add it to this list.
.. versionadded:: 1.3.0
:class:`~kivy.properties.ListProperty`, defaults to ['utf-8', 'latin1',
'cp1252']
'''
file_system = ObjectProperty(FileSystemLocal(),
baseclass=FileSystemAbstract)
'''Implementation to access the file system. Must be an instance of
FileSystemAbstract.
.. versionadded:: 1.8.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileSystemLocal()`
'''
__events__ = ('on_entry_added', 'on_entries_cleared',
'on_subentry_to_entry', 'on_remove_subentry', 'on_submit')
def __init__(self, **kwargs):
self._progress = None
super(FileChooserController, self).__init__(**kwargs)
self._items = []
self.bind(selection=self._update_item_selection)
self._previous_path = [self.path]
self.bind(path=self._save_previous_path)
self.bind(path=self._trigger_update,
filters=self._trigger_update,
rootpath=self._trigger_update)
self._trigger_update()
def on_touch_down(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
return super(FileChooserController, self).on_touch_down(touch)
def on_touch_up(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return True
if self.disabled:
return True
return super(FileChooserController, self).on_touch_up(touch)
def _update_item_selection(self, *args):
for item in self._items:
item.selected = item.path in self.selection
def _save_previous_path(self, instance, value):
path = expanduser(value)
path = realpath(path)
if path != value:
self.path = path
return
self._previous_path.append(value)
self._previous_path = self._previous_path[-2:]
def _trigger_update(self, *args):
Clock.unschedule(self._update_files)
Clock.schedule_once(self._update_files)
def on_entry_added(self, node, parent=None):
pass
def on_entries_cleared(self):
pass
def on_subentry_to_entry(self, subentry, entry):
pass
def on_remove_subentry(self, subentry, entry):
pass
def on_submit(self, selected, touch=None):
pass
def entry_touched(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
if self.multiselect:
if self.file_system.is_dir(entry.path) and touch.is_double_tap:
self.open_entry(entry)
else:
if entry.path in self.selection:
self.selection.remove(entry.path)
else:
self.selection.append(entry.path)
else:
if self.file_system.is_dir(entry.path):
if self.dirselect:
self.selection = [entry.path, ]
else:
self.selection = [entry.path, ]
def entry_released(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
.. versionadded:: 1.1.0
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
if not self.multiselect:
if self.file_system.is_dir(entry.path) and not self.dirselect:
self.open_entry(entry)
elif touch.is_double_tap:
if self.dirselect and self.file_system.is_dir(entry.path):
self.open_entry(entry)
else:
self.dispatch('on_submit', self.selection, touch)
def open_entry(self, entry):
try:
# Just check if we can list the directory. This is also what
# _add_file does, so if it fails here, it would also fail later
# on. Do the check here to prevent setting path to an invalid
# directory that we cannot list.
self.file_system.listdir(entry.path)
except OSError:
entry.locked = True
else:
self.path = join(self.path, entry.path)
self.selection = []
def _apply_filters(self, files):
if not self.filters:
return files
filtered = []
for filter in self.filters:
if isinstance(filter, collections.Callable):
filtered.extend([fn for fn in files if list(filter(self.path,
fn))])
else:
filtered.extend([fn for fn in files if fnmatch(fn, filter)])
if not self.filter_dirs:
dirs = [fn for fn in files if self.file_system.is_dir(fn)]
filtered.extend(dirs)
return list(set(filtered))
def get_nice_size(self, fn):
'''Pass the filepath. Returns the size in the best human readable
format or '' if it is a directory (Don't recursively calculate size.).
'''
if self.file_system.is_dir(fn):
return ''
try:
size = self.file_system.getsize(fn)
except OSError:
return '--'
for unit in filesize_units:
if size < 1024.0:
return '%1.0f %s' % (size, unit)
size /= 1024.0
def _update_files(self, *args, **kwargs):
# trigger to start gathering the files in the new directory
# we'll start a timer that will do the job, 10 times per frames
# (default)
self._gitems = []
self._gitems_parent = kwargs.get('parent', None)
self._gitems_gen = self._generate_file_entries(
path=kwargs.get('path', self.path),
parent=self._gitems_parent)
# cancel any previous clock if exist
Clock.unschedule(self._create_files_entries)
# show the progression screen
self._hide_progress()
if self._create_files_entries():
# not enough for creating all the entries, all a clock to continue
# start a timer for the next 100 ms
Clock.schedule_interval(self._create_files_entries, .1)
def _create_files_entries(self, *args):
# create maximum entries during 50ms max, or 10 minimum (slow system)
# (on a "fast system" (core i7 2700K), we can create up to 40 entries
# in 50 ms. So 10 is fine for low system.
start = time()
finished = False
index = total = count = 1
while time() - start < 0.05 or count < 10:
try:
index, total, item = next(self._gitems_gen)
self._gitems.append(item)
count += 1
except StopIteration:
finished = True
break
# if this wasn't enough for creating all the entries, show a progress
# bar, and report the activity to the user.
if not finished:
self._show_progress()
self._progress.total = total
self._progress.index = index
return True
# we created all the files, now push them on the view
self._items = items = self._gitems
parent = self._gitems_parent
if parent is None:
self.dispatch('on_entries_cleared')
for entry in items:
self.dispatch('on_entry_added', entry, parent)
else:
parent.entries[:] = items
for entry in items:
self.dispatch('on_subentry_to_entry', entry, parent)
self.files[:] = [file.path for file in items]
# stop the progression / creation
self._hide_progress()
self._gitems = None
self._gitems_gen = None
Clock.unschedule(self._create_files_entries)
return False
def cancel(self, *largs):
'''Cancel any background action started by filechooser, such as loading
a new directory.
.. versionadded:: 1.2.0
'''
Clock.unschedule(self._create_files_entries)
self._hide_progress()
if len(self._previous_path) > 1:
# if we cancel any action, the path will be set same as the
# previous one, so we can safely cancel the update of the previous
# path.
self.path = self._previous_path[-2]
Clock.unschedule(self._update_files)
def _show_progress(self):
if self._progress:
return
self._progress = self.progress_cls(path=self.path)
self._progress.value = 0
self.add_widget(self._progress)
def _hide_progress(self):
if self._progress:
self.remove_widget(self._progress)
self._progress = None
def _generate_file_entries(self, *args, **kwargs):
# Generator that will create all the files entries.
# the generator is used via _update_files() and _create_files_entries()
# don't use it directly.
is_root = False
path = kwargs.get('path', self.path)
have_parent = kwargs.get('parent', None) is not None
# Add the components that are always needed
if self.rootpath:
rootpath = realpath(self.rootpath)
path = realpath(path)
if not path.startswith(rootpath):
self.path = rootpath
return
elif path == rootpath:
is_root = True
else:
if platform == 'win':
is_root = splitdrive(path)[1] in (sep, altsep)
elif platform in ('macosx', 'linux', 'android', 'ios'):
is_root = normpath(expanduser(path)) == sep
else:
# Unknown fs, just always add the .. entry but also log
Logger.warning('Filechooser: Unsupported OS: %r' % platform)
# generate an entries to go back to previous
if not is_root and not have_parent:
back = '..' + sep
pardir = Builder.template(self._ENTRY_TEMPLATE, **dict(
name=back, size='', path=back, controller=ref(self),
isdir=True, parent=None, sep=sep, get_nice_size=lambda: ''))
yield 0, 1, pardir
# generate all the entries for files
try:
for index, total, item in self._add_files(path):
yield index, total, item
except OSError:
Logger.exception('Unable to open directory <%s>' % self.path)
self.files[:] = []
def _add_files(self, path, parent=None):
force_unicode = self._force_unicode
# Make sure we're using unicode in case of non-ascii chars in
# filenames. listdir() returns unicode if you pass it unicode.
try:
path = expanduser(path)
path = force_unicode(path)
except ForceUnicodeError:
pass
files = []
fappend = files.append
for fn in self.file_system.listdir(path):
try:
fappend(force_unicode(fn))
except ForceUnicodeError:
pass
# In the following, use fully qualified filenames
files = [normpath(join(path, f)) for f in files]
# Apply filename filters
files = self._apply_filters(files)
# Sort the list of files
files = self.sort_func(files, self.file_system)
is_hidden = self.file_system.is_hidden
if not self.show_hidden:
files = [x for x in files if not is_hidden(x)]
self.files[:] = files
total = len(files)
wself = ref(self)
for index, fn in enumerate(files):
def get_nice_size():
# Use a closure for lazy-loading here
return self.get_nice_size(fn)
ctx = {'name': basename(fn),
'get_nice_size': get_nice_size,
'path': fn,
'controller': wself,
'isdir': self.file_system.is_dir(fn),
'parent': parent,
'sep': sep}
entry = Builder.template(self._ENTRY_TEMPLATE, **ctx)
yield index, total, entry
def _force_unicode(self, s):
# the idea is, whatever is the filename, unicode or str, even if the
# str can't be directly returned as a unicode, return something.
if type(s) is str:
return s
encodings = self.file_encodings
for encoding in encodings:
try:
return s.decode(encoding, 'strict')
except UnicodeDecodeError:
pass
except UnicodeEncodeError:
pass
raise ForceUnicodeError('Unable to decode %r' % s)
def entry_subselect(self, entry):
if not self.file_system.is_dir(entry.path):
return
self._update_files(path=entry.path, parent=entry)
def close_subselection(self, entry):
for subentry in entry.entries:
self.dispatch('on_remove_subentry', subentry, entry)
class FileChooserListView(FileChooserController):
'''Implementation of :class:`FileChooserController` using a list view.
'''
_ENTRY_TEMPLATE = 'FileListEntry'
class FileChooserIconView(FileChooserController):
'''Implementation of :class:`FileChooserController` using an icon view.
'''
_ENTRY_TEMPLATE = 'FileIconEntry'
if __name__ == '__main__':
from kivy.app import App
from pprint import pprint
import sys
class FileChooserApp(App):
def build(self):
view = FileChooserListView
if len(sys.argv) > 1:
v = view(path=sys.argv[1])
else:
v = view()
v.bind(selection=lambda *x: pprint("selection: %s" % x[1:]))
v.bind(path=lambda *x: pprint("path: %s" % x[1:]))
return v
FileChooserApp().run()
| |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class DataPacketReceived(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DataPacketReceived - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'sys_time': 'datetime',
'src_port': 'int',
'latency': 'int',
'payload': 'bytearray',
'gen_net_time': 'datetime',
'mac_address': 'str',
'dest_port': 'int',
'type': 'str',
'hops': 'int'
}
self.attribute_map = {
'sys_time': 'sysTime',
'src_port': 'srcPort',
'latency': 'latency',
'payload': 'payload',
'gen_net_time': 'genNetTime',
'mac_address': 'macAddress',
'dest_port': 'destPort',
'type': 'type',
'hops': 'hops'
}
self._sys_time = None
self._src_port = None
self._latency = None
self._payload = None
self._gen_net_time = None
self._mac_address = None
self._dest_port = None
self._type = None
self._hops = None
@property
def sys_time(self):
"""
Gets the sys_time of this DataPacketReceived.
Time of notification
:return: The sys_time of this DataPacketReceived.
:rtype: datetime
"""
return self._sys_time
@sys_time.setter
def sys_time(self, sys_time):
"""
Sets the sys_time of this DataPacketReceived.
Time of notification
:param sys_time: The sys_time of this DataPacketReceived.
:type: datetime
"""
self._sys_time = sys_time
@property
def src_port(self):
"""
Gets the src_port of this DataPacketReceived.
UDP source port
:return: The src_port of this DataPacketReceived.
:rtype: int
"""
return self._src_port
@src_port.setter
def src_port(self, src_port):
"""
Sets the src_port of this DataPacketReceived.
UDP source port
:param src_port: The src_port of this DataPacketReceived.
:type: int
"""
self._src_port = src_port
@property
def latency(self):
"""
Gets the latency of this DataPacketReceived.
Time it took for the packet to reach the Manager, in milliseconds
:return: The latency of this DataPacketReceived.
:rtype: int
"""
return self._latency
@latency.setter
def latency(self, latency):
"""
Sets the latency of this DataPacketReceived.
Time it took for the packet to reach the Manager, in milliseconds
:param latency: The latency of this DataPacketReceived.
:type: int
"""
self._latency = latency
@property
def payload(self):
"""
Gets the payload of this DataPacketReceived.
The payload data of the packet, in base64 format
:return: The payload of this DataPacketReceived.
:rtype: bytearray
"""
return self._payload
@payload.setter
def payload(self, payload):
"""
Sets the payload of this DataPacketReceived.
The payload data of the packet, in base64 format
:param payload: The payload of this DataPacketReceived.
:type: bytearray
"""
self._payload = payload
@property
def gen_net_time(self):
"""
Gets the gen_net_time of this DataPacketReceived.
Timestamp the packet was generated, in ISO 8601 format
:return: The gen_net_time of this DataPacketReceived.
:rtype: datetime
"""
return self._gen_net_time
@gen_net_time.setter
def gen_net_time(self, gen_net_time):
"""
Sets the gen_net_time of this DataPacketReceived.
Timestamp the packet was generated, in ISO 8601 format
:param gen_net_time: The gen_net_time of this DataPacketReceived.
:type: datetime
"""
self._gen_net_time = gen_net_time
@property
def mac_address(self):
"""
Gets the mac_address of this DataPacketReceived.
MAC address of the mote that sent this packet
:return: The mac_address of this DataPacketReceived.
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""
Sets the mac_address of this DataPacketReceived.
MAC address of the mote that sent this packet
:param mac_address: The mac_address of this DataPacketReceived.
:type: str
"""
self._mac_address = mac_address
@property
def dest_port(self):
"""
Gets the dest_port of this DataPacketReceived.
UDP destination port
:return: The dest_port of this DataPacketReceived.
:rtype: int
"""
return self._dest_port
@dest_port.setter
def dest_port(self, dest_port):
"""
Sets the dest_port of this DataPacketReceived.
UDP destination port
:param dest_port: The dest_port of this DataPacketReceived.
:type: int
"""
self._dest_port = dest_port
@property
def type(self):
"""
Gets the type of this DataPacketReceived.
Notification type
:return: The type of this DataPacketReceived.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this DataPacketReceived.
Notification type
:param type: The type of this DataPacketReceived.
:type: str
"""
allowed_values = ["netStarted", "pathStateChanged", "pathAlert", "moteStateChanged", "joinFailed", "pingResponse", "invalidMIC", "dataPacketReceived", "ipPacketReceived", "packetSent", "cmdFinished", "configChanged", "configLoaded", "alarmOpened", "alarmClosed", "deviceHealthReport", "neighborHealthReport", "discoveryHealthReport", "rawMoteNotification", "serviceChanged", "apStateChanged", "managerStarted", "managerStopping", "optPhase", "pathAlert", "moteTrace", "frameCapacity", "apGpsSyncChanged"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type`, must be one of {0}"
.format(allowed_values)
)
self._type = type
@property
def hops(self):
"""
Gets the hops of this DataPacketReceived.
Number of hops the packet took from the mote to the Manager
:return: The hops of this DataPacketReceived.
:rtype: int
"""
return self._hops
@hops.setter
def hops(self, hops):
"""
Sets the hops of this DataPacketReceived.
Number of hops the packet took from the mote to the Manager
:param hops: The hops of this DataPacketReceived.
:type: int
"""
self._hops = hops
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
"""
=========================================================
Connectivity Matrices, ROI Intersections and Density Maps
=========================================================
This example is meant to be an introduction to some of the streamline tools
available in dipy. Some of the functions covered in this example are
``target``, ``connectivity_matrix`` and ``density_map``. ``target`` allows one
to filter streamlines that either pass through or do not pass through some
region of the brain, ``connectivity_matrix`` groups and counts streamlines
based on where in the brain they begin and end, and finally, density map counts
the number of streamlines that pass though every voxel of some image.
To get started we'll need to have a set of streamlines to work with. We'll use
EuDX along with the CsaOdfModel to make some streamlines. Let's import the
modules and download the data we'll be using.
"""
from dipy.tracking.eudx import EuDX
from dipy.reconst import peaks, shm
from dipy.tracking import utils
from dipy.data import read_stanford_labels, fetch_stanford_t1, read_stanford_t1
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
fetch_stanford_t1()
t1 = read_stanford_t1()
t1_data = t1.get_data()
"""
We've loaded an image called ``labels_img`` which is a map of tissue types such
that every integer value in the array ``labels`` represents an anatomical
structure or tissue type [#]_. For this example, the image was created so that
white matter voxels have values of either 1 or 2. We'll use
``peaks_from_model`` to apply the ``CsaOdfModel`` to each white matter voxel
and estimate fiber orientations which we can use for tracking.
"""
white_matter = (labels == 1) | (labels == 2)
csamodel = shm.CsaOdfModel(gtab, 6)
csapeaks = peaks.peaks_from_model(model=csamodel,
data=data,
sphere=peaks.default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
"""
Now we can use EuDX to track all of the white matter. To keep things reasonably
fast we use ``density=2`` which will result in 8 seeds per voxel. We'll set
``a_low`` (the parameter which determines the threshold of FA/QA under which
tracking stops) to be very low because we've already applied a white matter
mask.
"""
seeds = utils.seeds_from_mask(white_matter, density=2)
streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices,
odf_vertices=peaks.default_sphere.vertices,
a_low=.05, step_sz=.5, seeds=seeds)
affine = streamline_generator.affine
streamlines = list(streamline_generator)
"""
The first of the tracking utilities we'll cover here is ``target``. This
function takes a set of streamlines and a region of interest (ROI) and returns
only those streamlines that pass though the ROI. The ROI should be an array
such that the voxels that belong to the ROI are ``True`` and all other voxels
are ``False`` (this type of binary array is sometimes called a mask). This
function can also exclude all the streamlines that pass though an ROI by
setting the ``include`` flag to ``False``. In this example we'll target the
streamlines of the corpus callosum. Our ``labels`` array has a sagittal slice
of the corpus callosum identified by the label value 2. We'll create an ROI
mask from that label and create two sets of streamlines, those that intersect
with the ROI and those that don't.
"""
cc_slice = labels == 2
cc_streamlines = utils.target(streamlines, cc_slice, affine=affine)
cc_streamlines = list(cc_streamlines)
other_streamlines = utils.target(streamlines, cc_slice, affine=affine,
include=False)
other_streamlines = list(other_streamlines)
assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)
"""
We can use some of dipy's visualization tools to display the ROI we targeted
above and all the streamlines that pass though that ROI. The ROI is the yellow
region near the center of the axial image.
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
# Make display objects
color = line_colors(cc_streamlines)
cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
opacities=[1.])
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, vol_actor)
fvtk.add(r, vol_actor2)
fvtk.add(r, cc_streamlines_actor)
fvtk.add(r, cc_ROI_actor)
# Save figures
fvtk.record(r, n_frames=1, out_path='corpuscallosum_axial.png',
size=(800, 800))
fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(r, n_frames=1, out_path='corpuscallosum_sagittal.png',
size=(800, 800))
"""
.. figure:: corpuscallosum_axial.png
:align: center
**Corpus Callosum Axial**
.. include:: ../links_names.inc
.. figure:: corpuscallosum_sagittal.png
:align: center
**Corpus Callosum Sagittal**
"""
"""
Once we've targeted on the corpus callosum ROI, we might want to find out which
regions of the brain are connected by these streamlines. To do this we can use
the ``connectivity_matrix`` function. This function takes a set of streamlines
and an array of labels as arguments. It returns the number of streamlines that
start and end at each pair of labels and it can return the streamlines grouped
by their endpoints. Notice that this function only considers the endpoints of
each streamline.
"""
M, grouping = utils.connectivity_matrix(cc_streamlines, labels, affine=affine,
return_mapping=True,
mapping_as_streamlines=True)
M[:3, :] = 0
M[:, :3] = 0
"""
We've set ``return_mapping`` and ``mapping_as_streamlines`` to ``True`` so that
``connectivity_matrix`` returns all the streamlines in ``cc_streamlines``
grouped by their endpoint.
Because we're typically only interested in connections between gray matter
regions, and because the label 0 represents background and the labels 1 and 2
represent white matter, we discard the first three rows and columns of the
connectivity matrix.
We can now display this matrix using matplotlib, we display it using a log
scale to make small values in the matrix easier to see.
"""
import numpy as np
import matplotlib.pyplot as plt
plt.imshow(np.log1p(M), interpolation='nearest')
plt.savefig("connectivity.png")
"""
.. figure:: connectivity.png
:align: center
**Connectivity of Corpus Callosum**
.. include:: ../links_names.inc
"""
"""
In our example track there are more streamlines connecting regions 11 and
54 than any other pair of regions. These labels represent the left and right
superior frontal gyrus respectively. These two regions are large, close
together, have lots of corpus callosum fibers and are easy to track so this
result should not be a surprise to anyone.
However, the interpretation of streamline counts can be tricky. The
relationship between the underlying biology and the streamline counts will
depend on several factors, including how the tracking was done, and the correct
way to interpret these kinds of connectivity matrices is still an open question
in the diffusion imaging literature.
The next function we'll demonstrate is ``density_map``. This function allows
one to represent the spatial distribution of a track by counting the density of
streamlines in each voxel. For example, let's take the track connecting the
left and right superior frontal gyrus.
"""
lr_superiorfrontal_track = grouping[11, 54]
shape = labels.shape
dm = utils.density_map(lr_superiorfrontal_track, shape, affine=affine)
"""
Let's save this density map and the streamlines so that they can be
visualized together. In order to save the streamlines in a ".trk" file we'll
need to move them to "trackvis space", or the representation of streamlines
specified by the trackvis Track File format.
To do that, we will use tools available in [nibabel](http://nipy.org/nibabel)
"""
import nibabel as nib
# Save density map
dm_img = nib.Nifti1Image(dm.astype("int16"), hardi_img.affine)
dm_img.to_filename("lr-superiorfrontal-dm.nii.gz")
# Make a trackvis header so we can save streamlines
voxel_size = labels_img.header.get_zooms()
trackvis_header = nib.trackvis.empty_header()
trackvis_header['voxel_size'] = voxel_size
trackvis_header['dim'] = shape
trackvis_header['voxel_order'] = "RAS"
# Move streamlines to "trackvis space"
trackvis_point_space = utils.affine_for_trackvis(voxel_size)
lr_sf_trk = utils.move_streamlines(lr_superiorfrontal_track,
trackvis_point_space, input_space=affine)
lr_sf_trk = list(lr_sf_trk)
# Save streamlines
for_save = [(sl, None, None) for sl in lr_sf_trk]
nib.trackvis.write("lr-superiorfrontal.trk", for_save, trackvis_header)
"""
Let's take a moment here to consider the representation of streamlines used in
dipy. Streamlines are a path though the 3d space of an image represented by a
set of points. For these points to have a meaningful interpretation, these
points must be given in a known coordinate system. The ``affine`` attribute of
the ``streamline_generator`` object specifies the coordinate system of the
points with respect to the voxel indices of the input data.
``trackvis_point_space`` specifies the trackvis coordinate system with respect
to the same indices. The ``move_streamlines`` function returns a new set of
streamlines from an existing set of streamlines in the target space. The
target space and the input space must be specified as affine transformations
with respect to the same reference [#]_. If no input space is given, the input
space will be the same as the current representation of the streamlines, in
other words the input space is assumed to be ``np.eye(4)``, the 4-by-4 identity
matrix.
All of the functions above that allow streamlines to interact with volumes take
an affine argument. This argument allows these functions to work with
streamlines regardless of their coordinate system. For example even though we
moved our streamlines to "trackvis space", we can still compute the density map
as long as we specify the right coordinate system.
"""
dm_trackvis = utils.density_map(lr_sf_trk, shape, affine=trackvis_point_space)
assert np.all(dm == dm_trackvis)
"""
This means that streamlines can interact with any image volume, for example a
high resolution structural image, as long as one can register that image to
the diffusion images and calculate the coordinate system with respect to that
image.
"""
"""
.. rubric:: Footnotes
.. [#] The image `aparc-reduced.nii.gz`, which we load as ``labels_img``, is a
modified version of label map `aparc+aseg.mgz` created by freesurfer. The
corpus callosum region is a combination of the freesurfer labels 251-255.
The remaining freesurfer labels were re-mapped and reduced so that they lie
between 0 and 88. To see the freesurfer region, label and name, represented
by each value see `label_info.txt` in `~/.dipy/stanford_hardi`.
.. [#] An affine transformation is a mapping between two coordinate systems
that can represent scaling, rotation, sheer, translation and reflection.
Affine transformations are often represented using a 4x4 matrix where the
last row of the matrix is ``[0, 0, 0, 1]``.
"""
| |
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .config import DEFAULT_CONFIG as CONFIG
from .metric_mock import MetricMock
from .sys_info import get_distinct_id, get_sys_info, get_version, get_isgpu, get_build_number
import logging
import pprint
import threading
import copy as _copy
import requests as _requests
import sys as _sys
if _sys.version_info.major == 3:
import queue as Queue
from urllib.parse import quote_plus as _quote_plus
else:
import Queue
from urllib import quote_plus as _quote_plus
__ALL__ = [ 'MetricTracker' ]
# global objects for producer/consumer for background metrics publishing
METRICS_QUEUE = Queue.Queue(maxsize=100)
METRICS_THREAD = None
SHUTDOWN_MESSAGE = 'SHUTTING_DOWN'
class _MetricsWorkerThread(threading.Thread):
"""Worker Thread for publishing metrics in the background."""
def __init__(self, mode, source):
threading.Thread.__init__(self, name='metrics-worker')
# version and is_gpu from version_info
self._version = get_version()
self._build_number = get_build_number()
self._isgpu = get_isgpu()
self._mode = mode
self._source = source
try:
# product key
from .. import product_key
self._product_key = product_key.get_product_key()
except Exception as e:
self._product_key = None
self.queue = METRICS_QUEUE
root_package_name = __import__(__name__.split('.')[0]).__name__
self.logger = logging.getLogger(root_package_name + '.metrics')
buffer_size = 5
offline_buffer_size = 25
self._sys_info_set = False
self._usable = False
try:
self._metrics_url = CONFIG.metrics_url
self._requests = _requests # support mocking out requests library in unit-tests
except Exception as e:
self.logger.warning("Unexpected exception connecting to Metrics service, disabling metrics, exception %s" % e)
else:
self._usable = True
self._distinct_id = 'unknown'
self._distinct_id = get_distinct_id(self._distinct_id)
def run(self):
while True:
try:
metric = self.queue.get() # block until something received
if (metric['event_name'] == SHUTDOWN_MESSAGE):
# shutting down
self.queue.task_done()
break
self._track(metric['event_name'], metric['value'], metric['type'], metric['properties'], metric['meta'], metric['send_sys_info'])
self.queue.task_done()
except Exception as e:
pass
def _set_sys_info(self):
# Don't do this if system info has been set
if self._sys_info_set:
return
self._sys_info = get_sys_info()
self._sys_info_set = True
def _print_sys_info(self):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(self._sys_info)
def _track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Internal method to actually send metrics, expected to be called from background thread only.
"""
if not self._usable:
return
the_properties = {}
if send_sys_info:
if not self._sys_info_set:
self._set_sys_info()
the_properties.update(self._sys_info)
the_properties.update(properties)
try:
# homebrew metrics - cloudfront
if self._metrics_url != '':
cloudfront_props = {}
props = _copy.deepcopy(the_properties)
props.update(meta)
cloudfront_props['event_name'] = event_name
cloudfront_props['value'] = value
cloudfront_props['distinct_id'] = self._distinct_id
cloudfront_props['version'] = self._version
cloudfront_props['isgpu'] = self._isgpu
cloudfront_props['build_number'] = self._build_number
cloudfront_props['properties'] = _quote_plus(str(props))
# if product key is not set, then try to get it now when submitting
if not self._product_key:
try:
# product key
from .. import product_key
self._product_key = product_key.get_product_key()
except Exception as e:
self._product_key = 'Unknown'
pass
cloudfront_props['product_key'] = self._product_key
# self.logger.debug("SENDING '%s' to %s" % (cloudfront_props, self._metrics_url))
logging.getLogger('requests').setLevel(logging.CRITICAL)
self._requests.get(self._metrics_url, params=cloudfront_props)
except Exception as e:
pass
class MetricTracker:
def __init__(self, mode='UNIT', background_thread=True):
# setup logging
root_package_name = __import__(__name__.split('.')[0]).__name__
self.logger = logging.getLogger(root_package_name + '.metrics')
self._mode = mode
self._queue = METRICS_QUEUE
self._source = ("%s-%s" % (self._mode, get_version()))
self.logger.debug("Running with metric source: %s" % self._source)
# background thread for metrics
self._thread = None
if background_thread:
self._start_queue_thread()
def __del__(self):
try:
self._stop_queue_thread()
except:
# Lot of strange exceptions can happen when destructing, not really anything we can do...
pass
def _stop_queue_thread(self):
# send the shutting down message, wait for thread to exit
if self._thread is not None:
self.track(SHUTDOWN_MESSAGE)
self._thread.join(2.0)
def track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Publishes event / metric to metrics providers.
This method is a facade / proxy, queuing up this metric for a background thread to process.
"""
if self._mode != 'PROD' and (not (isinstance(value, int) or isinstance(value, float))):
raise Exception("Metrics attempted with value being not a number, unsupported.")
try:
item = dict(event_name=event_name, value=value, type=type, properties=properties, meta=meta, send_sys_info=send_sys_info)
self._queue.put_nowait(item) # don't wait if Queue is full, just silently ignore
except Queue.Full:
if not self._thread or not self._thread.is_alive():
self.logger.debug("Queue is full and background thread is no longer alive, trying to restart")
self._restart_queue_thread()
else:
self.logger.debug("Queue is full, doing nothing.")
except Exception as e:
self.logger.debug("Unexpected exception in queueing metrics, %s" % e)
def _start_queue_thread(self):
global METRICS_THREAD
if (self._thread is None):
self.logger.debug("Starting background thread")
self._thread = _MetricsWorkerThread(self._mode, self._source)
METRICS_THREAD = self._thread
self._thread.daemon = True
self._thread.start()
def _restart_queue_thread(self):
global METRICS_THREAD
if (self._thread is not None and self._thread.is_alive()):
self._stop_queue_thread()
METRICS_THREAD = None
del self._thread
self._thread = None
self._start_queue_thread()
| |
"""
Custom report definitions - control display of reports.
The BaseReport is somewhat general, but it's
currently specific to monthly reports. It would be pretty simple to make
this more general and subclass for montly reports , but I'm holding off on
that until we actually have another use case for it.
"""
from collections import defaultdict, OrderedDict
import datetime
import logging
import simplejson
import re
from dateutil import parser
from django.http import HttpResponse, HttpRequest, QueryDict
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_noop, ugettext as _
from couchexport.models import Format
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.dates import DateSpan
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_request
from sqlagg.base import AliasColumn
from sqlagg.columns import SimpleColumn, SumColumn
from corehq.apps.es import cases as case_es, filters as es_filters
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.dates import DatespanFilter
from corehq.apps.reports.filters.select import SelectOpenCloseFilter, MonthFilter, YearFilter
from corehq.apps.reports.generic import ElasticTabularReport, GetParamsMixin
from corehq.apps.reports.sqlreport import DatabaseColumn, SqlData, AggregateColumn, DataFormatter, DictDataFormat
from corehq.apps.reports.standard import CustomProjectReport, MonthYearMixin
from corehq.apps.reports.standard.maps import ElasticSearchMapReport
from corehq.apps.users.models import CommCareCase, CouchUser, CommCareUser
from corehq.elastic import es_query
from corehq.pillows.mappings.user_mapping import USER_INDEX
from corehq.util.translation import localize
from custom.opm import BaseMixin, normal_format, format_percent
from ..opm_tasks.models import OpmReportSnapshot
from .beneficiary import Beneficiary, ConditionsMet
from .health_status import HealthStatus
from .incentive import Worker
from .filters import SnapshotFilter, HierarchyFilter, MetHierarchyFilter
from .constants import *
DATE_FILTER ="date between :startdate and :enddate"
DATE_FILTER_EXTENDED = '(opened_on <= :enddate AND (closed_on >= :enddate OR closed_on = '')) OR (opened_on <= :enddate AND (closed_on >= :startdate or closed_on <= :enddate))'
def ret_val(value):
return value
class OpmCaseSqlData(SqlData):
table_name = "fluff_OpmCaseFluff"
def __init__(self, domain, user_id, datespan):
self.domain = domain
self.user_id = user_id
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
user_id=self.user_id,
startdate=str(self.datespan.startdate_utc.date()),
enddate=str(self.datespan.enddate_utc.date())
)
@property
def group_by(self):
return ['user_id']
@property
def filters(self):
filters = [
"domain = :domain",
"user_id = :user_id",
"(opened_on <= :enddate AND (closed_on >= :enddate OR closed_on = '')) OR (opened_on <= :enddate AND (closed_on >= :startdate or closed_on <= :enddate))"
]
return filters
@property
def columns(self):
return [
DatabaseColumn("User ID", SimpleColumn("user_id")),
DatabaseColumn("Women registered", SumColumn("women_registered_total")),
DatabaseColumn("Children registered", SumColumn("children_registered_total"))
]
@property
def data(self):
if self.user_id in super(OpmCaseSqlData, self).data:
return super(OpmCaseSqlData, self).data[self.user_id]
else:
return None
class OpmFormSqlData(SqlData):
table_name = "fluff_OpmFormFluff"
def __init__(self, domain, case_id, datespan):
self.domain = domain
self.case_id = case_id
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
case_id=self.case_id,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def group_by(self):
return ['case_id']
@property
def filters(self):
filters = [
"domain = :domain",
"date between :startdate and :enddate"
]
if self.case_id:
filters.append("case_id = :case_id")
return filters
@property
def columns(self):
return [
DatabaseColumn("Case ID", SimpleColumn("case_id")),
DatabaseColumn("Growth Monitoring Total", SumColumn("growth_monitoring_total")),
DatabaseColumn("Service Forms Total", SumColumn("service_forms_total")),
]
@property
def data(self):
if self.case_id is None:
return super(OpmFormSqlData, self).data
if self.case_id in super(OpmFormSqlData, self).data:
return super(OpmFormSqlData, self).data[self.case_id]
else:
return None
VHND_PROPERTIES = [
"vhnd_available",
"vhnd_ifa_available",
"vhnd_adult_scale_available",
"vhnd_child_scale_available",
"vhnd_ors_available",
"vhnd_zn_available",
"vhnd_measles_vacc_available",
]
class VhndAvailabilitySqlData(SqlData):
table_name = "fluff_VhndAvailabilityFluff"
@property
def filter_values(self):
return {}
@property
def group_by(self):
return ['owner_id', 'date']
@property
def filters(self):
return []
@property
def columns(self):
return [DatabaseColumn('date', SimpleColumn("date"))] +\
[DatabaseColumn("", SumColumn(prop)) for prop in VHND_PROPERTIES]
class OpmHealthStatusSqlData(SqlData):
table_name = 'fluff_OpmHealthStatusAllInfoFluff'
def __init__(self, domain, user_id, datespan):
self.domain = domain
self.user_id = user_id
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
user_id=self.user_id,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def group_by(self):
return ['user_id']
@property
def filters(self):
filters = [
"domain = :domain",
"user_id = :user_id"
]
return filters
@property
def columns(self):
return [
DatabaseColumn('# of Beneficiaries Registered',
SumColumn('beneficiaries_registered_total',
alias="beneficiaries",
filters=self.filters.append(DATE_FILTER_EXTENDED)),
format_fn=normal_format),
AggregateColumn('# of Pregnant Women Registered', format_percent,
[AliasColumn('beneficiaries'),
SumColumn('lmp_total', filters=self.filters.append(DATE_FILTER_EXTENDED))], slug='lmp', format_fn=ret_val),
AggregateColumn('# of Mothers of Children Aged 3 Years and Below Registered', format_percent,
[AliasColumn('beneficiaries'), SumColumn('lactating_total', alias='mothers', filters=self.filters.append(DATE_FILTER_EXTENDED))], slug='mother_reg', format_fn=ret_val),
DatabaseColumn('# of Children Between 0 and 3 Years of Age Registered', SumColumn('children_total', alias="childrens", filters=self.filters.append(DATE_FILTER_EXTENDED)), format_fn=normal_format),
AggregateColumn('# of Beneficiaries Attending VHND Monthly', format_percent, [AliasColumn('beneficiaries'), SumColumn('vhnd_monthly_total', filters=self.filters.append(DATE_FILTER))], slug='vhnd_monthly', format_fn=ret_val),
AggregateColumn('# of Pregnant Women Who Have Received at least 30 IFA Tablets', format_percent,
[AliasColumn('beneficiaries'), SumColumn('ifa_tablets_total', filters=self.filters.append(DATE_FILTER))], slug='ifa_tablets', format_fn=ret_val),
AggregateColumn('# of Pregnant Women Whose Weight Gain Was Monitored At Least Once', format_percent,
[AliasColumn('beneficiaries'), SumColumn('weight_once_total', filters=self.filters.append(DATE_FILTER))], slug='weight_once', format_fn=ret_val),
AggregateColumn('# of Pregnant Women Whose Weight Gain Was Monitored Twice', format_percent,
[AliasColumn('beneficiaries'), SumColumn('weight_twice_total', filters=self.filters.append(DATE_FILTER))], slug='weight_twice', format_fn=ret_val),
AggregateColumn('# of Children Whose Weight Was Monitored at Birth', format_percent,
[AliasColumn('childrens'), SumColumn('children_monitored_at_birth_total', filters=self.filters.append(DATE_FILTER))], slug='children_monitored_at_birth', format_fn=ret_val),
AggregateColumn('# of Children Whose Birth Was Registered', format_percent,
[AliasColumn('childrens'), SumColumn('children_registered_total', filters=self.filters.append(DATE_FILTER))], slug='children_registered', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 1 Growth Monitoring Session', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_1_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_1', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 2 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_2_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_2', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 3 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_3_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_3', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 4 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_4_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_4', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 5 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_5_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_5', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 6 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_6_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_6', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 7 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_7_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_7', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 8 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_8_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_8', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 9 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_9_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_9', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 10 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_10_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_10', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 11 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_11_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_11', format_fn=ret_val),
AggregateColumn('# of Children Who Have Attended At Least 12 Growth Monitoring Sessions', format_percent,
[AliasColumn('childrens'), SumColumn('growth_monitoring_session_12_total', filters=self.filters.append(DATE_FILTER))], slug='growth_monitoring_session_12', format_fn=ret_val),
AggregateColumn('# of Children Whose Nutritional Status is Normal', format_percent,
[AliasColumn('childrens'), SumColumn('nutritional_status_normal_total', filters=self.filters.append(DATE_FILTER))], slug='nutritional_status_normal', format_fn=ret_val),
AggregateColumn('# of Children Whose Nutritional Status is "MAM"', format_percent,
[AliasColumn('childrens'), SumColumn('nutritional_status_mam_total', filters=self.filters.append(DATE_FILTER))], slug='nutritional_status_mam', format_fn=ret_val),
AggregateColumn('# of Children Whose Nutritional Status is "SAM"', format_percent,
[AliasColumn('childrens'), SumColumn('nutritional_status_sam_total', filters=self.filters.append(DATE_FILTER))], slug='nutritional_status_sam', format_fn=ret_val),
AggregateColumn('# of Children Who Have Received ORS and Zinc Treatment if He/She Contracts Diarrhea', format_percent,
[SumColumn('treated_total', filters=self.filters.append(DATE_FILTER)), SumColumn('suffering_total', filters=self.filters.append(DATE_FILTER))], slug='ors_zinc', format_fn=ret_val),
AggregateColumn('# of Mothers of Children Aged 3 Years and Below Who Reported to Have Exclusively Breastfed Their Children for First 6 Months',
format_percent,
[AliasColumn('mothers'), SumColumn('excbreastfed_total', filters=self.filters.append(DATE_FILTER))], slug="breastfed", format_fn=ret_val),
AggregateColumn('# of Children Who Received Measles Vaccine', format_percent,
[AliasColumn('childrens'), SumColumn('measlesvacc_total', filters=self.filters.append(DATE_FILTER))], slug='measlesvacc', format_fn=ret_val),
]
class SharedDataProvider(object):
"""
Data provider for report data that can be shared across rows in an instance
of a report.
"""
@property
@memoized
def _service_dates(self):
"""
returns {
u'df5123010b24fc35260a84547148af06': {
'ifa_available': {datetime.date(2013, 1, 14),
datetime.date(2013, 8, 23)}
'zn_available': {datetime.date(2013, 1, 14)}
}
}
"""
# TODO: this will load one row per every VHND in history.
# If this gets too big we'll have to make the queries more targeted (which would be
# easy to do in the get_dates_in_range function) but if the dataset is small this will
# avoid significantly fewer DB trips.
# If things start getting slow or memory intensive this would be a good place to look.
data = VhndAvailabilitySqlData().data
results = defaultdict(lambda: defaultdict(lambda: set()))
for (owner_id, date), row in data.iteritems():
if row['vhnd_available'] > 0:
for prop in VHND_PROPERTIES:
if row[prop] == '1' or prop == 'vhnd_available':
results[owner_id][prop].add(date)
return results
def get_dates_in_range(self, owner_id, startdate, enddate, prop='vhnd_available'):
return filter(
lambda vhnd_date: vhnd_date >= startdate and vhnd_date < enddate,
[date for date in self._service_dates[owner_id][prop]],
)
class BaseReport(BaseMixin, GetParamsMixin, MonthYearMixin, CustomProjectReport, ElasticTabularReport):
"""
Report parent class. Children must provide a get_rows() method that
returns a list of the raw data that forms the basis of each row.
The "model" attribute is an object that can accept raw_data for a row
and perform the neccessary calculations. It must also provide a
method_map that is a list of (method_name, "Verbose Title") tuples
that define the columns in the report.
"""
name = None
slug = None
model = None
report_template_path = "opm/report.html"
printable = True
exportable = True
exportable_all = False
export_format_override = Format.UNZIPPED_CSV
block = ''
load_snapshot = True
@property
def show_html(self):
return getattr(self, 'rendered_as', 'html') not in ('print', 'export')
@property
def fields(self):
return [HierarchyFilter] + super(BaseReport, self).fields
@property
def report_subtitles(self):
subtitles = ["For filters:",]
if self.filter_data.get('awc', []):
subtitles.append("Awc's - %s" % ", ".join(self.awcs))
if self.filter_data.get('gp', []):
subtitles.append("Gram Panchayat - %s" % ", ".join(self.gp))
if self.filter_data.get('block', []):
subtitles.append("Blocks - %s" % ", ".join(self.blocks))
startdate = self.datespan.startdate_param_utc
enddate = self.datespan.enddate_param_utc
if startdate and enddate:
sd = parser.parse(startdate)
ed = parser.parse(enddate)
subtitles.append(" From %s to %s" % (str(sd.date()), str(ed.date())))
datetime_format = "%Y-%m-%d %H:%M:%S"
if self.snapshot is not None:
snapshot_str = "Loaded from snapshot"
date = getattr(self.snapshot, 'generated_on', False)
if date:
snapshot_str += " generated on %s" % date.strftime(datetime_format)
subtitles.append(snapshot_str)
try:
if self.request.user.is_superuser:
subtitles.append("Snapshot id: %s" % self.snapshot._id)
except AttributeError:
pass
else:
subtitles.append("Generated {}".format(
datetime.datetime.utcnow().strftime(datetime_format)))
return subtitles
def filter(self, fn, filter_fields=None):
"""
This function is to be called by the row constructer to verify that
the row matches the filters
``fn`` should be a callable that accepts a key, and returns the value
that should match the filters for a given field.
I'm not super happy with this implementation, but it beats repeating
the same logic in incentive, beneficiary, and snapshot.
"""
if filter_fields is None:
filter_fields = self.filter_fields
for key, field in filter_fields:
keys = self.filter_data.get(field, [])
value = fn(key) if (fn(key) is not None) else ""
if field == 'gp':
keys = [user._id for user in self.users if 'user_data' in user and 'gp' in user.user_data and
user.user_data['gp'] and user.user_data['gp'] in keys]
if keys and value not in keys:
raise InvalidRow
@property
def filter_fields(self):
filter_by = []
if self.awcs:
filter_by = [('awc_name', 'awc')]
elif self.gp:
filter_by = [('owner_id', 'gp')]
elif self.block:
if isinstance(self, BeneficiaryPaymentReport):
filter_by = [('block_name', 'block')]
else:
filter_by = [('block', 'block')]
return filter_by
@property
@memoized
def snapshot(self):
# Don't load snapshot if filtering by current case status,
# instead, calculate again.
if self.filter_data.get('is_open', False):
return None
snapshot = OpmReportSnapshot.from_view(self)
if snapshot and self.load_snapshot:
return snapshot
else:
return None
@property
def headers(self):
if self.snapshot is not None:
headers = []
for i, header in enumerate(self.snapshot.headers):
if header != 'Bank Branch Name':
if self.snapshot.visible_cols:
headers.append(DataTablesColumn(name=header, visible=self.snapshot.visible_cols[i]))
else:
headers.append(DataTablesColumn(name=header))
return DataTablesHeader(*headers)
headers = []
for t in self.model.method_map:
if len(t) == 3:
headers.append(DataTablesColumn(name=t[1], visible=t[2]))
else:
headers.append(DataTablesColumn(name=t[1]))
return DataTablesHeader(*headers)
@property
def rows(self):
if self.snapshot is not None:
return self.snapshot.rows
rows = []
for row in self.row_objects:
data = []
for t in self.model.method_map:
data.append(getattr(row, t[0]))
rows.append(data)
return rows
@property
@memoized
def filter_data(self):
fields = []
for field in self.fields:
value = field.get_value(self.request, DOMAIN)
if isinstance(value, tuple):
for lvl in field.get_value(self.request, DOMAIN)[0]:
fields.append((lvl['slug'], lvl['value']))
else:
fields.append((field.slug, field.get_value(self.request, DOMAIN)))
return dict(fields)
@property
def row_objects(self):
"""
Returns a list of objects, each representing a row in the report
"""
rows = []
for row in self.get_rows(self.datespan):
try:
rows.append(self.get_row_data(row))
except InvalidRow:
pass
return rows
def get_row_data(self, row):
return self.model(row, self)
@property
def date_range(self):
start = self.datespan.startdate_utc
end = self.datespan.enddate_utc
now = datetime.datetime.utcnow()
# if report is run on current month, date range should be
# this month up till now
if start.year == now.year and start.month == now.month:
end = now
return (start, end)
def get_model_kwargs(self):
"""
Override this method to provide a dict of extra kwargs to the
row constructor
"""
return {}
@property
@request_cache("raw")
def print_response(self):
"""
Returns the report for printing.
"""
self.is_rendered_as_email = True
self.use_datatables = False
self.override_template = "opm/print_report.html"
return HttpResponse(self._async_context()['report'])
@property
@memoized
def users(self):
return CouchUser.by_domain(self.domain) if self.filter_data.get('gp', []) else []
@property
@memoized
def data_provider(self):
return SharedDataProvider()
def _get_terms_list(terms):
"""
>>> terms = ["Sahora", "Kenar Paharpur", " ", " Patear"]
>>> _get_filter_list(terms)
[["sahora"], ["kenar", "paharpur"], ["patear"]]
"""
return filter(None, [term.lower().split() for term in terms])
def get_nested_terms_filter(prop, terms):
filters = []
def make_filter(term):
return es_filters.term(prop, term)
for term in _get_terms_list(terms):
if len(term) == 1:
filters.append(make_filter(term[0]))
elif len(term) > 1:
filters.append(es_filters.AND(*(make_filter(t) for t in term)))
return es_filters.OR(*filters)
class CaseReportMixin(object):
default_case_type = "Pregnancy"
extra_row_objects = []
is_rendered_as_email = False
@property
def display_open_cases_only(self):
return self.request_params.get('is_open') == 'open'
@property
def display_closed_cases_only(self):
return self.request_params.get('is_open') == 'closed'
def get_rows(self, datespan):
def get_awc_filter(awcs):
return get_nested_terms_filter("awc_name.#value", awcs)
def get_gp_filter(gp):
owner_ids = [user._id for user in self.users
if getattr(user, 'user_data', {}).get('gp') in self.gp]
return es_filters.term("owner_id", owner_ids)
def get_block_filter(block):
return es_filters.term("block_name.#value", block.lower())
query = case_es.CaseES().domain(self.domain)\
.fields([])\
.opened_range(lte=self.datespan.enddate_utc)\
.term("type.exact", self.default_case_type)
query.index = 'report_cases'
if self.display_open_cases_only:
query = query.filter(es_filters.OR(
case_es.is_closed(False),
case_es.closed_range(gte=self.datespan.enddate_utc)
))
elif self.display_closed_cases_only:
query = query.filter(case_es.closed_range(lte=self.datespan.enddate_utc))
if self.awcs:
query = query.filter(get_awc_filter(self.awcs))
elif self.gp:
query = query.filter(get_gp_filter(self.gp))
elif self.block:
query = query.filter(get_block_filter(self.block))
result = query.run()
return map(CommCareCase, iter_docs(CommCareCase.get_db(), result.ids))
@property
def fields(self):
return [
MetHierarchyFilter,
MonthFilter,
YearFilter,
SelectOpenCloseFilter,
SnapshotFilter
]
@property
def load_snapshot(self):
return self.request.GET.get("load_snapshot", False)
@property
def block(self):
block = self.request_params.get("block")
if block:
return block
else:
return 'atri'
@property
def rows(self):
if self.snapshot is not None:
if 'status' in self.snapshot.slugs:
current_status_index = self.snapshot.slugs.index('status')
for row in self.snapshot.rows:
if self.is_rendered_as_email:
with localize('hin'):
row[current_status_index] = _(row[current_status_index])
return self.snapshot.rows
rows = []
for row in self.row_objects + self.extra_row_objects:
rows.append([getattr(row, method) for
method, header, visible in self.model.method_map])
sorted_rows = sorted(rows, key=lambda item: item[0])
return sorted_rows
def filter(self, fn, filter_fields=None):
# TODO test with snapshots.
if filter_fields is None:
filter_fields = self.filter_fields
for key, field in filter_fields:
keys = self.filter_data.get(field, [])
if keys:
case_key = fn(key)['#value'] if isinstance(fn(key), dict) else fn(key)
if field == 'is_open':
if case_key != (keys == 'closed'):
raise InvalidRow
else:
if field == 'gp':
keys = [user._id for user in self.users if 'user_data' in user and 'gp' in user.user_data and user.user_data['gp'] in keys]
if case_key not in keys:
raise InvalidRow
def set_extra_row_objects(self, row_objects):
self.extra_row_objects = self.extra_row_objects + row_objects
class BeneficiaryPaymentReport(CaseReportMixin, BaseReport):
name = "Beneficiary Payment Report"
slug = 'beneficiary_payment_report'
report_template_path = "opm/beneficiary_report.html"
model = Beneficiary
@memoized
def column_index(self, key):
for i, (k, _, _) in enumerate(self.model.method_map):
if k == key:
return i
@property
def rows(self):
raw_rows = super(BeneficiaryPaymentReport, self).rows
# Consolidate rows with the same account number
accounts = OrderedDict()
for row in raw_rows:
account_number = row[self.column_index('account_number')]
existing_row = accounts.get(account_number, [])
accounts[account_number] = existing_row + [row]
return map(self.join_rows, accounts.values())
def join_rows(self, rows):
def zip_fn((i, values)):
if isinstance(values[0], int):
return sum(values)
elif i == self.column_index('case_id'):
unique_values = set(v for v in values if v is not None)
if self.show_html:
return ''.join('<p>{}</p>'.format(v) for v in unique_values)
else:
return ','.join(unique_values)
else:
return sorted(values)[-1]
return map(zip_fn, enumerate(zip(*rows)))
class MetReport(CaseReportMixin, BaseReport):
name = ugettext_noop("Conditions Met Report")
report_template_path = "opm/met_report.html"
slug = "met_report"
model = ConditionsMet
exportable = False
@property
def headers(self):
if not self.is_rendered_as_email:
if self.snapshot is not None:
return DataTablesHeader(*[
DataTablesColumn(name=header[0], visible=header[1]) for header in zip(self.snapshot.headers, self.snapshot.visible_cols)
])
return DataTablesHeader(*[
DataTablesColumn(name=header, visible=visible) for method, header, visible in self.model.method_map
])
else:
with localize('hin'):
return DataTablesHeader(*[
DataTablesColumn(name=_(header), visible=visible) for method, header, visible in self.model.method_map
])
@property
@request_cache("raw")
def print_response(self):
"""
Returns the report for printing.
"""
self.is_rendered_as_email = True
self.use_datatables = False
self.override_template = "opm/met_print_report.html"
return HttpResponse(self._async_context()['report'])
class IncentivePaymentReport(BaseReport):
name = "AWW Payment Report"
slug = 'incentive_payment_report'
model = Worker
@property
def fields(self):
return [HierarchyFilter] + super(BaseReport, self).fields + [SnapshotFilter,]
@property
def load_snapshot(self):
return self.request.GET.get("load_snapshot", False)
@property
@memoized
def last_month_totals(self):
last_month = self.datespan.startdate_utc - datetime.timedelta(days=4)
snapshot = OpmReportSnapshot.by_month(last_month.month, last_month.year,
"IncentivePaymentReport")
if snapshot is not None:
total_index = snapshot.slugs.index('month_total')
account_index = snapshot.slugs.index('account_number')
return dict(
(row[account_index], row[total_index]) for row in snapshot.rows
)
def get_model_kwargs(self):
return {'last_month_totals': self.last_month_totals}
def get_rows(self, datespan):
return CommCareUser.by_domain(DOMAIN)
def get_row_data(self, row):
case_sql_data = OpmCaseSqlData(DOMAIN, row._id, self.datespan)
form_sql_data = OpmFormSqlData(DOMAIN, row._id, self.datespan)
return self.model(row, self, case_sql_data.data, form_sql_data.data)
def this_month_if_none(month, year):
if month is not None:
assert year is not None, \
"You must pass either nothing or a month AND a year"
return month, year
else:
this_month = datetime.datetime.now()
return this_month.month, this_month.year
def get_report(ReportClass, month=None, year=None, block=None, lang=None):
"""
Utility method to run a report for an arbitrary month without a request
"""
month, year = this_month_if_none(month, year)
class Report(ReportClass):
snapshot = None
report_class = ReportClass
_visible_cols = []
def __init__(self, *args, **kwargs):
if ReportClass.__name__ in ["MetReport", "BeneficiaryPaymentReport"]:
self._slugs, self._headers, self._visible_cols = [
list(tup) for tup in zip(*self.model.method_map)
]
for idx, val in enumerate(self._headers):
with localize(self.lang):
self._headers[idx] = _(self._headers[idx])
elif ReportClass.__name__ == "IncentivePaymentReport":
self._slugs, self._headers, self._visible_cols = [list(tup) for tup in zip(*self.model.method_map)]
else:
self._slugs, self._headers = [list(tup) for tup in zip(*self.model.method_map)]
@property
def domain(self):
return DOMAIN
@property
def slugs(self):
return self._slugs
@property
def visible_cols(self):
return self._visible_cols
@property
def month(self):
return month
@property
def year(self):
return year
@property
def block(self):
return block
@property
def headers(self):
return self._headers
@property
def lang(self):
return lang
@property
def datespan(self):
return DateSpan.from_month(self.month, self.year)
@property
def filter_data(self):
return {}
@property
def request(self):
request = HttpRequest()
request.GET = QueryDict(None)
return request
@property
def request_params(self):
return json_request({})
return Report()
class HealthStatusReport(BaseReport):
ajax_pagination = True
asynchronous = True
name = "Health Status Report"
slug = "health_status"
fix_left_col = True
model = HealthStatus
report_template_path = "opm/hsr_report.html"
@property
def rows(self):
ret = list(super(HealthStatusReport, self).rows)
self.total_row = calculate_total_row(ret)
return ret
@property
def fields(self):
return [HierarchyFilter, SelectOpenCloseFilter, DatespanFilter]
@property
@memoized
def es_results(self):
q = {
"query": {
"filtered": {
"query": {
},
"filter": {
"bool": {
"must": [
{"term": {"domain.exact": self.domain}},
]
}
}
}
},
"size": self.pagination.count,
"from": self.pagination.start,
}
es_filters = q["query"]["filtered"]["filter"]
if self.awcs:
awc_term = get_nested_terms_filter("user_data.awc", self.awcs)
es_filters["bool"]["must"].append(awc_term)
elif self.gp:
gp_term = get_nested_terms_filter("user_data.gp", self.gp)
es_filters["bool"]["must"].append(gp_term)
elif self.blocks:
block_term = get_nested_terms_filter("user_data.block", self.blocks)
es_filters["bool"]["must"].append(block_term)
q["query"]["filtered"]["query"].update({"match_all": {}})
logging.info("ESlog: [%s.%s] ESquery: %s" % (self.__class__.__name__, self.domain, simplejson.dumps(q)))
return es_query(q=q, es_url=USER_INDEX + '/_search', dict_only=False,
start_at=self.pagination.start, size=self.pagination.count)
def get_rows(self, dataspan):
return self.es_results['hits'].get('hits', [])
def get_row_data(self, row):
if 'user_data' in row['_source'] and 'awc' in row['_source']['user_data']:
sql_data = OpmHealthStatusSqlData(DOMAIN, row['_id'], self.datespan)
if sql_data.data:
formatter = DataFormatter(DictDataFormat(sql_data.columns, no_value=format_percent(0, 0)))
data = dict(formatter.format(sql_data.data, keys=sql_data.keys, group_by=sql_data.group_by))
data[row['_id']].update({'awc': row['_source']['user_data']['awc']})
return HealthStatus(**data[row['_id']])
else:
model = HealthStatus()
model.awc = row['_source']['user_data']['awc']
return model
else:
raise InvalidRow
@property
def fixed_cols_spec(self):
return dict(num=2, width=300)
@property
@request_cache("raw")
def print_response(self):
"""
Returns the report for printing.
"""
self.is_rendered_as_email = True
self.use_datatables = False
self.override_template = "opm/hsr_print.html"
self.update_report_context()
self.pagination.count = 1000000
self.context['report_table'].update(
rows=self.rows
)
rendered_report = render_to_string(self.template_report, self.context,
context_instance=RequestContext(self.request)
)
return HttpResponse(rendered_report)
@property
def export_table(self):
"""
Exports the report as excel.
When rendering a complex cell, it will assign a value in the following order:
1. cell['raw']
2. cell['sort_key']
3. str(cell)
"""
try:
import xlwt
except ImportError:
raise Exception("It doesn't look like this machine is configured for "
"excel export. To export to excel you have to run the "
"command: easy_install xlutils")
headers = self.headers
formatted_rows = self.rows
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
table.extend(rows)
if self.total_row:
table.append(_unformat_row(self.total_row))
if self.statistics_rows:
table.extend([_unformat_row(row) for row in self.statistics_rows])
return [[self.export_sheet_name, table]]
def calculate_total_row(rows):
regexp = re.compile('(.*?)>([0-9]+)<.*')
total_row = []
if len(rows) > 0:
num_cols = len(rows[0])
for i in range(num_cols):
colrows = [cr[i] for cr in rows]
if i == 0:
total_row.append("Total:")
else:
columns = [int(regexp.match(r).group(2)) for r in colrows]
if len(columns):
total_row.append("<span style='display: block; text-align:center;'>%s</span>" % reduce(lambda x, y: x + y, columns, 0))
else:
total_row.append('')
return total_row
def _unformat_row(row):
regexp = re.compile('(.*?)>([0-9]+)(<.*?)>([0-9]*).*')
formatted_row = []
for col in row:
if regexp.match(col):
formated_col = "%s" % (regexp.match(col).group(2))
if regexp.match(col).group(4) != "":
formated_col = "%s - %s%%" % (formated_col, regexp.match(col).group(4))
formatted_row.append(formated_col)
else:
formatted_row.append(col)
return formatted_row
class HealthMapSource(HealthStatusReport):
@property
def snapshot(self):
# Don't attempt to load a snapshot
return None
@property
@memoized
def get_users(self):
return super(HealthMapSource, self).es_results['hits'].get('hits', [])
@property
def gps_mapping(self):
users = self.get_users
mapping = {}
for user in users:
user_src = user['_source']
aww_name = user_src['first_name'] + " " + user_src['last_name']
meta_data = user_src['user_data']
awc = meta_data.get("awc", "")
block = meta_data.get("block", "")
gp = meta_data.get("gp", "")
gps = meta_data.get("gps", "")
mapping[awc] = {
"AWW": aww_name,
"Block": block,
"GP": gp,
"gps": gps
}
return mapping
@property
def headers(self):
ret = super(HealthMapSource, self).headers
for key in ["GP", "Block", "AWW", "gps"]:
ret.prepend_column(DataTablesColumn(key))
return ret
@property
def rows(self):
pattern = re.compile("(\d+)%")
gps_mapping = self.gps_mapping
ret = super(HealthMapSource, self).rows
new_rows = []
for row in ret:
awc = row[0]
awc_map = gps_mapping.get(awc, None) or ""
gps = awc_map["gps"] if awc_map else "--"
extra_columns = ["--"] * 4
if awc_map:
extra_columns = []
for key in ["gps", "AWW", "Block", "GP"]:
extra_columns.append(awc_map.get(key, "--"))
escaped_row = [row[0]]
for cell in row[1:]:
# _unformat_row([<html>] => ["N - f%"])
percent = re.findall(pattern, _unformat_row([cell])[0])
html_cell = {"html": cell, "sort_key": int(percent[0] if percent else 0)}
escaped_row.append(html_cell)
new_rows.append(extra_columns + escaped_row)
return new_rows
class HealthMapReport(BaseMixin, ElasticSearchMapReport, GetParamsMixin, CustomProjectReport):
name = "Health Status (Map)"
slug = "health_status_map"
fields = [HierarchyFilter, SelectOpenCloseFilter, DatespanFilter]
data_source = {
'adapter': 'legacyreport',
'geo_column': 'gps',
'report': 'custom.opm.opm_reports.reports.HealthMapSource',
}
@property
def display_config(self):
colorstops = [
[40, 'rgba(255, 0, 0, .8)'],
[70, 'rgba(255, 255, 0, .8)'],
[100, 'rgba(0, 255, 0, .8)']
]
reverse_colorstops = [
[40, 'rgba(0, 255, 0, .8)'],
[70, 'rgba(255, 255, 0, .8)'],
[100, 'rgba(255, 0, 0, .8)'],
]
title_mapping = {
"AWC": "AWC",
"# of Pregnant Women Registered": "Pregnant Women Registered",
"# of Children Whose Birth Was Registered": "Children Whose Birth Was Registered",
"# of Beneficiaries Attending VHND Monthly": "Beneficiaries Attending VHND Monthly",
'# of Children Whose Nutritional Status is "SAM"': 'Children Whose Nutritional Status is "SAM"',
'# of Children Whose Nutritional Status is "MAM"': 'Children Whose Nutritional Status is "MAM"',
'# of Children Whose Nutritional Status is Normal': 'Children Whose Nutritional Status is Normal'
}
additional_columns = [
"Total # of Beneficiaries Registered",
"# of Mothers of Children Aged 3 Years and Below Registered",
"# of Children Between 0 and 3 Years of Age Registered",
"# of Pregnant Women Who Have Received at least 30 IFA Tablets",
"# of Pregnant Women Whose Weight Gain Was Monitored At Least Once",
"# of Pregnant Women Whose Weight Gain Was Monitored Twice",
"# of Children Whose Weight Was Monitored at Birth",
"# of Children Who Have Attended At Least 1 Growth Monitoring Session",
"# of Children Who Have Attended At Least 2 Growth Monitoring Sessions",
"# of Children Who Have Attended At Least 3 Growth Monitoring Sessions",
"# of Children Who Have Attended At Least 4 Growth Monitoring Sessions",
'# of Children Who Have Attended At Least 5 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 6 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 7 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 8 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 9 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 10 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 11 Growth Monitoring Sessions',
'# of Children Who Have Attended At Least 12 Growth Monitoring Sessions',
'# of Children Who Have Received ORS and Zinc Treatment if He/She Contracts Diarrhea',
'# of Mothers of Children Aged 3 Years and Below Who Reported to Have Exclusively Breastfed Their Children for First 6 Months',
'# of Children Who Received Measles Vaccine',
]
columns = ["AWW", "Block", "GP"] + [
"AWC",
"# of Pregnant Women Registered",
"# of Children Whose Birth Was Registered",
"# of Beneficiaries Attending VHND Monthly",
'# of Children Whose Nutritional Status is "SAM"',
'# of Children Whose Nutritional Status is "MAM"',
'# of Children Whose Nutritional Status is Normal'
]
return {
"detail_columns": columns[0:5],
"display_columns": columns[4:],
"table_columns": columns,
"column_titles": title_mapping,
"metrics": [{"color": {"column": column}} for column in columns[:4]] + [
{"color": {"column": column, "colorstops": colorstops}} for column in columns[4:-3] + columns[-1:0]
] + [
{"color": {"column": column, "colorstops": reverse_colorstops}} for column in columns[-3:-1]
] + [
{"color": {"column": column, "colorstops": colorstops}} for column in additional_columns
],
"numeric_format": {
title: "return x + ' \%'" for title in additional_columns + columns[4:]
}
}
@property
def rows(self):
data = self._get_data()
columns = self.display_config['table_columns']
display_columns = self.display_config['display_columns']
rows = []
for feature in data['features']:
row = []
for column in columns:
if column in feature['properties'] and column not in display_columns:
row.append(feature['properties'][column])
else:
disp_col = '__disp_' + column
if disp_col in feature['properties']:
row.append(feature['properties'][disp_col])
rows.append(row)
return rows
@property
def headers(self):
columns = self.display_config['table_columns']
headers = DataTablesHeader(*[
DataTablesColumn(name=name, sortable=False) for name in columns]
)
return headers
| |
# -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../lib"))
sys.path.insert(0, os.path.abspath("../..")) # examples
sys.path.insert(0, os.path.abspath("."))
# -- General configuration --------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.5.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"zzzeeksphinx",
"changelog",
"sphinx_paramlinks",
]
needs_extensions = {"zzzeeksphinx": "1.2.1"}
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath("templates")]
nitpicky = False
# The suffix of source filenames.
source_suffix = ".rst"
# section names used by the changelog extension.
changelog_sections = [
"general",
"platform",
"orm",
"orm declarative",
"orm querying",
"orm configuration",
"examples",
"engine",
"sql",
"schema",
"extensions",
"typing",
"mypy",
"asyncio",
"postgresql",
"mysql",
"mariadb",
"sqlite",
"mssql",
"oracle",
"tests",
]
# tags to sort on inside of sections
changelog_inner_tag_sort = [
"feature",
"improvement",
"usecase",
"change",
"changed",
"performance",
"bug",
"deprecated",
"removed",
"renamed",
"moved",
]
# how to render changelog links
changelog_render_ticket = "https://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"default": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
"github": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
}
changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s"
exclude_patterns = ["build", "**/unreleased*/*", "*_include.rst"]
# zzzeeksphinx makes these conversions when it is rendering the
# docstrings classes, methods, and functions within the scope of
# Sphinx autodoc
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.sql.operators": "sqlalchemy.sql.expression",
"sqlalchemy.event.base": "sqlalchemy.event",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.url": "sqlalchemy.engine",
"sqlalchemy.engine.row": "sqlalchemy.engine",
"sqlalchemy.engine.cursor": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
"sqlalchemy.ext.asyncio.result": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.engine": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.session": "sqlalchemy.ext.asyncio",
"sqlalchemy.util._collections": "sqlalchemy.util",
"sqlalchemy.orm.attributes": "sqlalchemy.orm",
"sqlalchemy.orm.relationships": "sqlalchemy.orm",
"sqlalchemy.orm.interfaces": "sqlalchemy.orm",
"sqlalchemy.orm.query": "sqlalchemy.orm",
"sqlalchemy.orm.util": "sqlalchemy.orm",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# on the referencing side, a newer zzzeeksphinx extension
# applies shorthand symbols to references so that we can have short
# names that are still using absolute references.
zzzeeksphinx_module_prefixes = {
"_sa": "sqlalchemy",
"_engine": "sqlalchemy.engine",
"_url": "sqlalchemy.engine",
"_result": "sqlalchemy.engine",
"_row": "sqlalchemy.engine",
"_schema": "sqlalchemy.schema",
"_types": "sqlalchemy.types",
"_sqltypes": "sqlalchemy.types",
"_asyncio": "sqlalchemy.ext.asyncio",
"_expression": "sqlalchemy.sql.expression",
"_sql": "sqlalchemy.sql.expression",
"_dml": "sqlalchemy.sql.expression",
"_ddl": "sqlalchemy.schema",
"_functions": "sqlalchemy.sql.functions",
"_pool": "sqlalchemy.pool",
# base event API, like listen() etc.
"_event": "sqlalchemy.event",
# core events like PoolEvents, ConnectionEvents
"_events": "sqlalchemy.events",
# note Core events are linked as sqlalchemy.event.<cls>
# ORM is sqlalchemy.orm.<cls>.
"_ormevent": "sqlalchemy.orm",
"_ormevents": "sqlalchemy.orm",
"_scoping": "sqlalchemy.orm.scoping",
"_exc": "sqlalchemy.exc",
"_reflection": "sqlalchemy.engine.reflection",
"_orm": "sqlalchemy.orm",
"_query": "sqlalchemy.orm",
"_ormexc": "sqlalchemy.orm.exc",
"_roles": "sqlalchemy.sql.roles",
"_baked": "sqlalchemy.ext.baked",
"_horizontal": "sqlalchemy.ext.horizontal_shard",
"_associationproxy": "sqlalchemy.ext.associationproxy",
"_automap": "sqlalchemy.ext.automap",
"_hybrid": "sqlalchemy.ext.hybrid",
"_compilerext": "sqlalchemy.ext.compiler",
"_mutable": "sqlalchemy.ext.mutable",
"_declarative": "sqlalchemy.ext.declarative",
"_future": "sqlalchemy.future",
"_futureorm": "sqlalchemy.future.orm",
"_postgresql": "sqlalchemy.dialects.postgresql",
"_mysql": "sqlalchemy.dialects.mysql",
"_mssql": "sqlalchemy.dialects.mssql",
"_oracle": "sqlalchemy.dialects.oracle",
"_sqlite": "sqlalchemy.dialects.sqlite",
"_util": "sqlalchemy.util",
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = "SQLAlchemy"
copyright = "2007-2022, the SQLAlchemy authors and contributors" # noqa
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "2.0"
# The full version, including alpha/beta/rc tags.
release = "2.0.0b1"
release_date = None
site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = "3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "zzzeeksphinx"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = "default.css"
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%m/%d/%Y %H:%M:%S"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"notfound": "notfound.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
# html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "SQLAlchemydoc"
# autoclass_content = 'both'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"contents",
"sqlalchemy_%s.tex" % release.replace(".", "_"),
"SQLAlchemy Documentation",
"Mike Bayer",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = r"\setcounter{tocdepth}{3}"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
# }
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"sqlalchemy",
"SQLAlchemy Documentation",
["SQLAlchemy authors"],
1,
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "SQLAlchemy"
epub_author = "SQLAlchemy authors"
epub_publisher = "SQLAlchemy authors"
epub_copyright = "2007-2015, SQLAlchemy authors"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
| |
"""Wrapper to call Phabricator's Conduit API."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_conduit
#
# Public Classes:
# ConduitException
# Conduit
# .set_act_as_user
# .clear_act_as_user
# .get_act_as_user
# .conduit_uri
# .raw_call
# .ping
# MultiConduit
# .call_as_user
# .conduit_uri
# CallMultiConduitAsUser
#
# Public Functions:
# act_as_user_context
# make_conduit_uri
# make_phab_example_conduit
#
# Public Assignments:
# SESSION_ERROR
# CONDUITPROXY_ERROR_CONNECT
# CONDUITPROXY_ERROR_BADAUTH
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import hashlib
import json
import logging
import time
import urllib
import urllib2
import urlparse
import phldef_conduit
import phlsys_multiprocessing
_URLLIB_TIMEOUT = 600
# TODO: handle re-authentication when the token expires
# TODO: allow connections without specifying user details where possible
@contextlib.contextmanager
def act_as_user_context(conduit, user):
"""Manage the context of impersonating another user.
Restore the original act_as_user_context value when the context expires
or if an exception is raised. The context manager itself is exception
neutral.
Usage Example:
# Impersonate alice
conduit = make_phab_example_conduit()
with act_as_user_context(conduit, 'alice'):\
conduit("user.whoami")["userName"]
u'alice'
# Impersonate bob
conduit = make_phab_example_conduit()
with act_as_user_context(conduit, 'bob'):\
conduit("user.whoami")["userName"]
u'bob'
# Impersonate bob, revert to phab when context expires
conduit = make_phab_example_conduit()
with act_as_user_context(conduit, 'bob'): pass
conduit("user.whoami")["userName"]
u'phab'
"""
prevUser = conduit.get_act_as_user()
try:
conduit.set_act_as_user(user)
yield conduit
finally:
if prevUser:
conduit.set_act_as_user(prevUser)
else:
conduit.clear_act_as_user()
def make_conduit_uri(uri):
"""Return the expected conduit uri based on the supplied 'uri'.
Usage examples:
>>> make_conduit_uri('http://127.0.0.1')
'http://127.0.0.1/api/'
>>> make_conduit_uri('http://127.0.0.1/')
'http://127.0.0.1/api/'
>>> make_conduit_uri('http://127.0.0.1/conduit/')
'http://127.0.0.1/api/'
:uri: a uri to the Phabricator instance
:returns: the expected conduit uri
"""
url = urlparse.urlparse(uri)
# pylint: disable=E1101
expected = url.scheme + "://" + url.netloc + "/api/"
# pylint: enable=E1101
return expected
def make_phab_example_conduit():
"""Return a new Conduit constructed from phldef_conduit test_uri and phab.
:returns: a new Conduit constructed from phldef_conduit test_uri and phab
"""
test_data = phldef_conduit
return Conduit(
test_data.TEST_URI,
test_data.PHAB.user,
test_data.PHAB.certificate)
class ConduitException(Exception):
def __init__(self, method, error, errormsg, result, obj, uri, actAsUser):
"""Construct from an error returned by conduit.
:method: the conduit method that was being called
:error: the type of error
:message: the error message
:response: the response field (expected to be empty)
:obj: the object that was passed to conduit
:uri: the URI to conduit
:actAsUser: user that was being impersonated or None
"""
message = (
"phlsys_conduit.Conduit\n" +
"method: '" + str(method) + "'\n" +
"error: '" + str(error) + "'\n" +
"errormsg: '" + str(errormsg) + "'\n" +
"result: '" + str(result) + "'\n" +
"object: '" + str(obj) + "'\n" +
"uri: '" + str(uri) + "'\n" +
"actAsUser: '" + str(actAsUser) + "'\n")
super(ConduitException, self).__init__(message)
self.method = method
self.error = error
self.errormsg = errormsg
self.result = result
self.obj = obj
self.uri = uri
self.actAsUser = actAsUser
# we would expect this to arise normally from time to time
SESSION_ERROR = "ERR-INVALID-SESSION"
# if we try to conduit.connect to a conduitproxy then we'll get this error,
# this means we should send the full cert every time.
CONDUITPROXY_ERROR_CONNECT = "CONDUITPROXY-ERR-REDUNDANT-CONNECT"
CONDUITPROXY_ERROR_BADAUTH = "CONDUITPROXY-ERR-BADAUTH"
class Conduit(object):
# TODO: make this configurable
testUri = phldef_conduit.TEST_URI
def __init__(
self,
conduitUri,
user=None,
certificate=None,
actAsUser=None,
http_proxy=None,
https_proxy=None):
self._conduit_uri = conduitUri
self._act_as_user = actAsUser
self._timeout = 5
self._username = user
self._certificate = certificate
self._client = "phlsys_conduit"
self._client_version = 1
self._http_proxy = http_proxy
self._https_proxy = https_proxy
self._conduit = {}
if user and certificate:
self._authenticate()
def set_act_as_user(self, user):
self._act_as_user = user
self._conduit["actAsUser"] = self._act_as_user
def clear_act_as_user(self):
self._act_as_user = None
del self._conduit["actAsUser"]
def get_act_as_user(self):
return self._act_as_user
@property
def conduit_uri(self):
return self._conduit_uri
def _authenticate(self):
message_dict = self._authenticate_make_message()
method = "conduit.connect"
response = self._communicate(method, message_dict)
error = response["error_code"]
error_message = response["error_info"]
result = response["result"]
is_conduitproxy = False
if error:
if error == CONDUITPROXY_ERROR_CONNECT:
is_conduitproxy = True
else:
raise ConduitException(
method=method,
error=error,
errormsg=error_message,
result=result,
obj=message_dict,
uri=self._conduit_uri,
actAsUser=self._act_as_user)
if is_conduitproxy:
# conduit proxies don't have sessions, send the cert every time
self._conduit = {
'user': self._username,
'cert': self._certificate
}
else:
self._conduit = {
'sessionKey': result["sessionKey"],
'connectionID': result["connectionID"],
}
if self._act_as_user:
self._conduit["actAsUser"] = self._act_as_user
def _authenticate_make_message(self):
token = str(int(time.time()))
# pylint: disable=E1101
signature = hashlib.sha1(token + self._certificate).hexdigest()
# pylint: enable=E1101
return {
"user": self._username,
"host": self._conduit_uri,
"client": self._client,
"clientVersion": self._client_version,
"authToken": token,
"authSignature": signature,
}
def _communicate(self, method, message_dict):
path = self._conduit_uri + method
params = json.dumps(message_dict)
body = urllib.urlencode({
"params": params,
"output": "json",
})
if self._https_proxy or self._http_proxy:
proxy = {}
if self._https_proxy:
proxy['https'] = self._https_proxy
if self._http_proxy:
proxy['http'] = self._http_proxy
proxy_handler = urllib2.ProxyHandler(proxy)
opener = urllib2.build_opener(proxy_handler)
data = opener.open(path, body, _URLLIB_TIMEOUT).read()
else:
data = urllib2.urlopen(path, body, _URLLIB_TIMEOUT).read()
return json.loads(data)
def __call__(self, method, param_dict_in=None):
return self.raw_call(method, param_dict_in)["result"]
def raw_call(self, method, param_dict_in=None):
attempts = 3
for x in range(attempts):
param_dict = dict(param_dict_in) if param_dict_in else {}
param_dict["__conduit__"] = self._conduit
response = self._communicate(method, param_dict)
error = response["error_code"]
error_message = response["error_info"]
result = response["result"]
if not error:
break
else:
if error == SESSION_ERROR:
logging.warning(
"phlsys_conduit: SESSION-ERROR (try {0})".format(x))
self._authenticate()
else:
raise ConduitException(
method=method,
error=error,
errormsg=error_message,
result=result,
obj=param_dict,
uri=self._conduit_uri,
actAsUser=self._act_as_user)
if error:
raise ConduitException(
method=method,
error=error,
errormsg=error_message,
result=result,
obj=param_dict,
uri=self._conduit_uri,
actAsUser=self._act_as_user)
return response
def ping(self):
return self("conduit.ping")
class MultiConduit(object):
"""A conduit that supports multi-processing."""
def __init__(self, *args, **kwargs):
def factory():
return Conduit(*args, **kwargs)
# Phabricator supports 5 simultaneous connections per user
# by default:
#
# conf/default.conf.php: 'auth.sessions.conduit' => 5,
#
max_sessions_per_user = 5
self._conduits = phlsys_multiprocessing.MultiResource(
max_sessions_per_user, factory)
def call_as_user(self, user, *args, **kwargs):
with self._conduits.resource_context() as conduit:
with act_as_user_context(conduit, user):
return conduit(*args, **kwargs)
def __call__(self, *args, **kwargs):
with self._conduits.resource_context() as conduit:
return conduit(*args, **kwargs)
@property
def conduit_uri(self):
with self._conduits.resource_context() as conduit:
return conduit.conduit_uri
class CallMultiConduitAsUser(object):
"""A proxy for calling a MultiConduit as a particular user."""
def __init__(self, conduit, as_user):
super(CallMultiConduitAsUser, self).__init__()
self._conduit = conduit
self._as_user = as_user
def __call__(self, *args, **kwargs):
return self._conduit.call_as_user(self._as_user, *args, **kwargs)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| |
#/*+***********************************************************************************
# * The contents of this file are subject to the Vtiger CRM Public License Version 1.0
# * ("License"); You may not use this file except in compliance with the License
# * The Original Code is: Vtiger CRM Open Source
# * The Initial Developer of the Original Code is Vtiger.
# * Portions created by Vtiger are Copyright (C) www.vtiger.com
# * All Rights Reserved.
# *************************************************************************************/
# Setup include path for dependent libraries
import sys, os, string
JSON_DIR = string.rstrip(__file__, (os.sep + os.path.basename(__file__)))
JSON_DIR += os.sep + '..' + os.sep + 'third-party' + os.sep + 'python'
# Include in sys path
if JSON_DIR not in sys.path:
sys.path.append(JSON_DIR)
# Import required libraries
import json, urllib
# Vtiger Webservice Client
class Vtiger_WSClient:
def __init__(self, url):
# Webservice file
self._servicebase = 'webservice.php'
# Service URL to which client connects to
self._serviceurl = False
# Webservice login validity
self._servicetoken = False
self._expiretime = False
self._servertime = False
# Webservice user credentials
self._serviceuser = False
self._servicekey = False
# Webservice login credentials
self._userid = False
self._sessionid = False
# Last operation error information
self._lasterror = False
if url.endswith('/') == False: url += '/'
if url.endswith(self._servicebase) == False: url += self._servicebase
self._serviceurl = url
'''
Perform GET request and return response
@url URL to connect
@parameters Parameter map (key, value pairs)
@tojson True if response should be decoded as JSON
'''
def __doGet(self, url, parameters=False, tojson=True):
if not parameters: parameters = {}
useurl = (url + '?%s') % urllib.urlencode(parameters);
connection = urllib.urlopen(useurl)
response = connection.read()
if tojson == True: response = json.read(response)
return response
'''
Perform POST request and return response
@url URL to connect
@parameters Parameter map (key, value pairs)
@tojson True if response should be decoded as JSON
'''
def __doPost(self, url, parameters=False, tojson=True):
if not parameters: parameters = {}
parameters = urllib.urlencode(parameters);
connection = urllib.urlopen(url, parameters)
response = connection.read()
if tojson == True: response = self.toJSON(response)
return response
'''
Convert input data to JSON
'''
def toJSON(self, indata):
return json.read(indata)
'''
Convert input object to JSON String
'''
def toJSONString(self, indata):
return json.write(indata)
'''
Check if webservice response was not successful
'''
def hasError(self, response):
if not response or (response['success'] == False):
self._lasterror = response['error']
return True
self._lasterror = False
return False
'''
Get last operation error
'''
def lastError(self):
return self._lasterror
'''
Check webservice login.
'''
def __checkLogin(self):
# TODO: Perform Login Again?
return (self._userid != False)
'''
Create MD5 value (hexdigest)
'''
def __md5(self, indata):
import md5
m = md5.new()
m.update(indata)
return m.hexdigest()
'''
Get record id sent from the server
'''
def getRecordId(self, record):
ids = record.split('x')
return ids[1]
'''
Perform Challenge operation
'''
def __doChallenge(self, username):
parameters = {
'operation' : 'getchallenge',
'username' : username
}
response = self.__doGet(self._serviceurl, parameters)
if not self.hasError(response):
result = response['result']
self._servicetoken = result['token']
self._expiretime = result['expireTime']
self._servertime = result['serverTime']
return True
return False
'''
Perform Login operation
'''
def doLogin(self, username, accesskey):
if self.__doChallenge(username) == False: return False
parameters = {
'operation' : 'login',
'username' : username,
'accessKey' : self.__md5(self._servicetoken + accesskey)
}
response = self.__doPost(self._serviceurl, parameters)
if not self.hasError(response):
result = response['result']
self._serviceuser = username
self._servicekey = accesskey
self._sessionid = result['sessionName']
self._userid = result['userId']
return True
return False
'''
Perform ListTypes operation
@return modules names list
'''
def doListTypes(self):
if not self.__checkLogin(): return False
parameters = {
'operation' : 'listtypes',
'sessionName' : self._sessionid
}
response = self.__doGet(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
modulenames = result['types']
returnvalue = {}
for modulename in modulenames:
returnvalue[modulename] = {
'name' : modulename
}
return returnvalue
'''
Perform Query operation
'''
def doQuery(self, query):
if not self.__checkLogin(): return False
# Make the query end with ;
if not query.endswith(';'): query += ';'
parameters = {
'operation' : 'query',
'sessionName' : self._sessionid,
'query' : query
}
response = self.__doGet(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
return result
'''
Extract column names from the query operation result.
'''
def getResultColumns(self, result):
if len(result) > 0:
return result[0].keys()
return False
'''
Perform Describe operation on the module
'''
def doDescribe(self, module):
if not self.__checkLogin(): return False
parameters = {
'operation' : 'describe',
'sessionName' : self._sessionid,
'elementType' : module
}
response = self.__doGet(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
return result
'''
Perform Retrieve operation on the module record.
'''
def doRetrieve(self, record):
if not self.__checkLogin(): return False
parameters = {
'operation' : 'retrieve',
'sessionName' : self._sessionid,
'id' : record
}
response = self.__doGet(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
return result
'''
Perform create operation on the module.
'''
def doCreate(self, module, valuemap):
if not self.__checkLogin(): return False
if 'assigned_user_id' not in valuemap:
valuemap['assigned_user_id'] = self._userid
parameters = {
'operation' : 'create',
'sessionName' : self._sessionid,
'elementType' : module,
'element' : self.toJSONString(valuemap)
}
response = self.__doPost(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
return result
'''
Invoke webservice method
'''
def doInvoke(self, method, params = False, type = 'POST'):
if not self.__checkLogin(): return False
parameters = {
'operation' : method,
'sessionName': self._sessionid
}
if params is not False:
for key in params:
if not parameters.has_key(key):
parameters[key] = params[key]
response = False
if type.upper() == 'POST':
response = self.__doPost(self._serviceurl, parameters)
else:
response = self.__doGet(self._serviceurl, parameters)
if self.hasError(response): return False
result = response['result']
return result
| |
"""Cubic spline functions used for interpolation.
"""
import numpy as np
import numpy
from flare.mgp.cubic_splines_numba import *
class PCASplines:
"""
Build splines for PCA decomposition, mainly used for the mapping of the variance
:param l_bounds: lower bound for the interpolation. \
E.g. 1-d for two-body, 3-d for three-body.
:type l_bounds: numpy array
:param u_bounds: upper bound for the interpolation.
:type u_bounds: numpy array
:param orders: grid numbers in each dimension. E.g, 1-d for two-body, \
3-d for three-body, should be positive integers.
:type orders: numpy array
:param svd_rank: rank for decomposition of variance matrix,\
also equal to the number of mappings constructed for mapping variance.\
For two-body `svd_rank<=min(grid_num, train_size*3)`, \
for three-body `svd_rank<=min(grid_num_in_cube, train_size*3)`
:type svd_rank: int
"""
def __init__(self, l_bounds, u_bounds, orders, svd_rank):
self.svd_rank = svd_rank
self.models = []
for r in range(svd_rank):
spline_u = CubicSpline(l_bounds, u_bounds, orders)
self.models.append(spline_u)
def build_cubic(self, y, u_bounds, l_bounds, orders):
dim_0 = 1
for d in range(len(y.shape) - 1):
dim_0 *= y.shape[d]
dim_1 = y.shape[-1]
var_matr = np.reshape(y, (dim_0, dim_1))
models = []
for r in range(self.svd_rank):
spline_u = CubicSpline(l_bounds, u_bounds, orders, var_matr[:, r])
models.append(spline_u)
return models
def set_values(self, y):
dim_0 = 1
for d in range(len(y.shape) - 1):
dim_0 *= y.shape[d]
dim_1 = y.shape[-1]
var_matr = np.reshape(y, (dim_0, dim_1))
U, S, Vh = np.linalg.svd(var_matr, full_matrices=False)
self.V = Vh[: self.svd_rank, :].T
for r in range(self.svd_rank):
self.models[r].set_values(S[r] * U[:, r])
def __call__(self, x):
y_pred = []
rank = self.svd_rank
for r in range(rank):
y_pred.append(self.models[r](x))
return np.array(y_pred)
class CubicSpline:
"""
Forked from Github repository: https://github.com/EconForge/interpolation.py.\
High-level API for cubic splines. \
Class representing a cubic spline interpolator on a regular cartesian grid.
Creates a cubic spline interpolator on a regular cartesian grid.
Args:
a (numpy array of size d (float)): Lower bounds of the cartesian grid.
b (numpy array of size d (float)): Upper bounds of the cartesian grid.
orders (numpy array of size d (int)): Number of nodes along each \
dimension (=(n1,...,nd) )
Other Parameters:
values (numpy array (float)): (optional, (n1 x ... x nd) array). \
Values on the nodes of the function to interpolate.
"""
__grid__ = None
__values__ = None
__coeffs__ = None
def __init__(self, a, b, orders, values=None):
self.d = len(a)
assert len(b) == self.d
assert len(orders) == self.d
self.a = np.array(a, dtype=float)
self.b = np.array(b, dtype=float)
self.orders = np.array(orders, dtype=int)
self.dtype = self.a.dtype
self.__coeffs__ = None
if values is not None:
self.set_values(values)
def set_values(self, values):
"""Set values on the nodes for the function to interpolate."""
values = np.array(values, dtype=float)
if not np.all(np.isfinite(values)):
raise Exception("Trying to interpolate non-finite values")
sh = self.orders.tolist()
sh2 = [e + 2 for e in self.orders]
values = values.reshape(sh)
self.__values__ = values
# this should be done without temporary memory allocation
self.__coeffs__ = filter_coeffs(self.a, self.b, self.orders, values)
def interpolate(self, points, values=None, with_derivatives=False):
"""
Interpolate spline at a list of points.
:param points: (array-like) list of point where the spline is evaluated.
:param values: (optional) container for inplace computation.
:return values: (array-like) list of point where the spline is evaluated.
"""
if not np.all(np.isfinite(points)):
raise Exception("Spline interpolator evaluated at non-finite points.")
if not with_derivatives:
if points.ndim == 1:
# evaluate only on one point
points = np.array([points])
N, d = points.shape
assert d == self.d
if values is None:
values = np.empty(N, dtype=self.dtype)
vec_eval_cubic_spline(
self.a, self.b, self.orders, self.__coeffs__, points, values
)
return values
else:
N, d = points.shape
assert d == self.d
values, dvalues = vec_eval_cubic_splines_G(
self.a,
self.b,
self.orders,
self.__coeffs__,
points,
values,
dvalues=None,
)
return values, dvalues
@property
def grid(self):
"""Cartesian enumeration of all nodes."""
if self.__grid__ is None:
self.__grid__ = mlinspace(self.a, self.b, self.orders)
return self.__grid__
def __call__(self, s, with_derivatives=False):
"""Interpolate the spline at one or many points"""
if s.ndim == 1:
res = self.__call__(numpy.atleast_2d(s))
return res[0]
return self.interpolate(s, with_derivatives=with_derivatives)
def vec_eval_cubic_spline(a, b, orders, coefs, points, values=None):
"""
Forked from Github repository: https://github.com/EconForge/interpolation.py.\
Evaluates a cubic spline at many points
:param a: Lower bounds of the cartesian grid.
:type a: numpy array of size d (float)
:param b: Upper bounds of the cartesian grid.
:type b: numpy array of size d (float)
:param orders: Number of nodes along each dimension (=(n1,...,nd) )
:type orders: numpy array of size d (int)
:param coefs: Filtered coefficients.
:type coefs: array of dimension d, and size (n1+2, ..., nd+2)
:param point: List of points where the splines must be interpolated.
:type point: array of size N x d
:param values: (optional) If not None, contains the result.
:type values: array of size N
:return value: Interpolated values. values[i] contains spline evaluated at point points[i,:].
:type value: array of size N
"""
a = numpy.array(a, dtype=float)
b = numpy.array(b, dtype=float)
orders = numpy.array(orders, dtype=int)
d = a.shape[0]
if values is None:
N = points.shape[0]
values = numpy.empty(N)
if d == 1:
vec_eval_cubic_spline_1(a, b, orders, coefs, points, values)
elif d == 2:
vec_eval_cubic_spline_2(a, b, orders, coefs, points, values)
elif d == 3:
vec_eval_cubic_spline_3(a, b, orders, coefs, points, values)
elif d == 4:
vec_eval_cubic_spline_4(a, b, orders, coefs, points, values)
return values
def vec_eval_cubic_splines_G(a, b, orders, mcoefs, points, values=None, dvalues=None):
a = numpy.array(a, dtype=float)
b = numpy.array(b, dtype=float)
orders = numpy.array(orders, dtype=int)
d = a.shape[0]
N = points.shape[0]
# n_sp = mcoefs.shape[-1]
n_sp = 1
if values is None:
values = numpy.empty((N, n_sp))
if dvalues is None:
dvalues = numpy.empty((N, d, n_sp))
if d == 1:
vec_eval_cubic_splines_G_1(a, b, orders, mcoefs, points, values, dvalues)
elif d == 2:
vec_eval_cubic_splines_G_2(a, b, orders, mcoefs, points, values, dvalues)
elif d == 3:
vec_eval_cubic_splines_G_3(a, b, orders, mcoefs, points, values, dvalues)
elif d == 4:
vec_eval_cubic_splines_G_4(a, b, orders, mcoefs, points, values, dvalues)
return [values, dvalues]
| |
#!/usr/bin/env python
# ========================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from IntWritable import LongWritable
import SequenceFile
INDEX_FILE_NAME = 'index'
DATA_FILE_NAME = 'data'
class Writer(object):
INDEX_INTERVAL = 128
def __init__(self, dirname, key_class, value_class):
os.mkdir(dirname)
data_path = os.path.join(dirname, DATA_FILE_NAME)
self._data = SequenceFile.createWriter(data_path, key_class, value_class)
index_path = os.path.join(dirname, INDEX_FILE_NAME)
self._index = SequenceFile.createBlockWriter(index_path, key_class, LongWritable)
self._size = 0
self._last_index_pos = -1
self._last_index_nkeys = -4294967295
def close(self):
self._data.close()
self._index.close()
def append(self, key, value):
self._checkKey(key)
pos = self._data.getLength()
if self._size >= self._last_index_nkeys + self.INDEX_INTERVAL and pos > self._last_index_pos:
self._index.append(key, LongWritable(pos))
self._last_index_pos = pos
self._last_index_nkeys = self._size
self._data.append(key, value)
self._size += 1
def _checkKey(self, key):
pass
class Reader(object):
INDEX_SKIP = 0
def __init__(self, dirname):
self._data = SequenceFile.Reader(os.path.join(dirname, DATA_FILE_NAME))
self._index = SequenceFile.Reader(os.path.join(dirname, INDEX_FILE_NAME))
self._first_position = self._data.getPosition()
self._positions = []
self._keys = []
def close(self):
self._data.close()
self._index.close()
def getIndexInterval(self):
return self._index_interval
def setIndexInterval(self, interval):
self._index_interval = interval
def reset(self):
self._data.seek(self._first_position)
def midKey(self):
self._readIndex()
count = len(self._keys)
if count == 0:
return None
return self._keys[(count - 1) >> 1]
def finalKey(self, key):
original_position = self._data.getPosition()
try:
self._readIndex()
count = len(self._keys)
if count > 0:
self._data.seek(self._positions[count - 1])
else:
self._reset()
while self._data.nextKey(key):
continue
finally:
self._data.seek(original_position)
def seek(self, key):
return self._seekInternal(key) == 0
def next(self, key, value):
return self._data.next(key, value)
def get(self, key, value):
if self.seek(key):
self._data._getCurrentValue(value)
return value
return None
def getClosest(self, key, value, before=False):
c = self._seekInternal(key, before)
if (not before and c > 0) or (before and c < 0):
return None
self._data._getCurrentValue(value)
return self._next_key
def _readIndex(self):
if self._keys:
return
key_class = self._index.getKeyClass()
skip = self.INDEX_SKIP
position = LongWritable()
last_position = None
while True:
key = key_class()
if not self._index.next(key, position):
break
if skip > 0:
skip -= 1
continue
skip = self.INDEX_SKIP
if position.get() == last_position:
continue
self._positions.append(position.get())
self._keys.append(key)
def _seekInternal(self, key, before=None):
self._readIndex()
seek_index = self._indexSearch(key)
if seek_index < 0:
seek_index = -seek_index - 2
if seek_index == -1:
seek_position = self._first_position
else:
seek_position = self._positions[seek_index]
prev_position = -1
curr_position = seek_position
key_class = self._data.getKeyClass()
self._next_key = key_class()
self._data.seek(seek_position)
while self._data.nextKey(self._next_key):
cmp = key.compareTo(self._next_key)
if cmp <= 0:
if before and cmp != 0:
if prev_position == -1:
self._data.seek(curr_position)
else:
self._data.seek(prev_position)
self._data.nextKey(self._next_key)
return 1
return cmp
if before:
prev_position = curr_position
curr_position = self._data.getPosition()
return 1
def _indexSearch(self, key):
high = len(self._keys) - 1
low = 0
while low <= high:
mid = (low + high) >> 1
cmp = self._keys[mid].compareTo(key)
if cmp < 0:
low = mid + 1
elif cmp > 0:
high = mid - 1
else:
return mid
return -(low + 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.